content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from datetime import date
def run_example():
march_2020_15 = date(year=2020, month=3, day=15)
print("march_2020_15.toordinal():", march_2020_15.toordinal())
print("march_2020_15.isocalendar():", march_2020_15.isocalendar())
if __name__ == "__main__":
run_example()
|
nilq/baby-python
|
python
|
class Solution:
def XXX(self, nums: List[int]) -> int:
length = len(nums)
if length <= 1:
return nums[0]
for i in range(1, length):
sum_ = nums[i-1] + nums[i]
if sum_ > nums[i]:
nums[i] = sum_
return max(nums)
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
|
nilq/baby-python
|
python
|
import itertools
# Have the function ArrayAdditionI(arr) take the array of numbers stored in arr and return the string true if any combination of numbers in the array
# (excluding the largest number) can be added up to equal the largest number in the array, otherwise return the string false.
# For example: if arr contains [4, 6, 23, 10, 1, 3] the output should return true because 4 + 6 + 10 + 3 = 23.
# The array will not be empty, will not contain all the same elements, and may contain negative numbers.
def ArrayAdditionI(arr):
# find max value in arr
_max = max(arr)
arr.remove(_max)
_comb = []
# for i in range(1, len(arr)):
# if arr[i] > _max:
# _max = arr[i]
for i in range(len(arr)+1):
for cb in itertools.combinations(arr, i):
_comb.append(cb)
for i in _comb:
if sum(int(x) for x in i) == _max:
return True
return False
test = [3,5,-1,8,12]
print(ArrayAdditionI(test))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from aliyun.api.rest import *
from aliyun.api.base import FileItem
|
nilq/baby-python
|
python
|
total_pf = {{packing_fraction}}
poly_coeff = {{polynomial_triso}}
|
nilq/baby-python
|
python
|
# Declare Variables
name = input() # Seller's name
salary = float(input()) # Seller's salary
sales = float(input()) # Sale's total made by the seller in the month
# Calculate salary with bonus
total = salary + (sales * .15)
# Show result
print("Total = R$ {:.2f}".format(total))
|
nilq/baby-python
|
python
|
from contextlib import contextmanager
import sys
@contextmanager
def stdout_translator(stream):
old_stdout = sys.stdout
sys.stdout = stream
try:
yield
finally:
sys.stdout = old_stdout
def read_translation(stream):
out = stream.getvalue()
outs = out.split('\n')
for item in outs:
if outs.index(item) + 1 != len(outs):
if 'coverage' in item:
item = item.replace('coverage','covfefe')
else:
item += ' covfefe'
print(item)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import redis
import json
from flask import Flask, render_template, redirect, request, url_for, make_response
#r = redis.Redis(host='123.12.148.95', port='15379', password='ABCDEFG1231LQ4L')
if 'VCAP_SERVICES' in os.environ:
VCAP_SERVICES = json.loads(os.environ['VCAP_SERVICES'])
CREDENTIALS = VCAP_SERVICES["rediscloud"][0]["credentials"]
r = redis.Redis(host=CREDENTIALS["hostname"], port=CREDENTIALS["port"], password=CREDENTIALS["password"])
else:
r = redis.Redis(host='127.0.0.1', port='6379')
app = Flask(__name__)
@app.route('/')
def survey():
resp = make_response(render_template('survey.html'))
return resp
@app.route('/suthankyou.html', methods=['POST'])
def suthankyou():
## This is how you grab the contents from the form
f = request.form['feedback']
## Now you can now do someting with variable "f"
print ("The feedback received was:")
print (f)
resp = """
<h3> - THANKS FOR TAKING THE SURVEY - </h3>
<a href="/"><h3>Back to main menu</h3></a>
"""
return resp
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', \
port=int(os.getenv('PORT', '5000')), threaded=True)
|
nilq/baby-python
|
python
|
import battlecode as bc
import sys
import traceback
import time
import pathFinding
#TODO: remove random and use intelligent pathing
import random
totalTime = 0
start = time.time()
#build my environment
gc = bc.GameController()
directions = list(bc.Direction)
#get the starting map
myMap = gc.starting_map(gc.planet())
#get my team name
my_team = gc.team()
#get the details of the orbit
orbit = gc.orbit_pattern()
#TOTO:research, currently only gets the first level of rockets
gc.queue_research(bc.UnitType.Rocket)
#count my starting units, and find out where the enemy spawned
enemyx = 0
enemyy = 0
friendlyx = 0
friendlyy = 0
myStartingUnits = 0
#TODO:account for starting off world
for unit in myMap.initial_units:
if unit.team != my_team:
enemyLocation = unit.location
enemyx = enemyLocation.map_location().x
enemyy = enemyLocation.map_location().y
continue
if unit.team == my_team:
myStartingUnits += 1
friendlyx = unit.location.map_location().x
friendlyy = unit.location.map_location().y
continue
#processes the map into an int field
thisMap = pathFinding.pathPlanetMap(myMap)
resourcesMap = pathFinding.miningMap(thisMap,myMap)
#enemyx,enemyy is the starting locations of(at least one) of the enemies bots
#I am making the assumption that they stay near there
#start = time.time()
#if we are mars, figure out 1 safe landing spot for each wholy blocked off zone
#and send it to earth
#TODO: a 50*50 map with a full grid of 1*1 accessable squares may exceed the num of team array slots, should cap at ~10
if gc.planet() == bc.Planet.Mars:
print("we on mars")
landingZones = pathFinding.landingZone(thisMap)
for zone in range(0,len(landingZones)):
gc.write_team_array(zone*2,landingZones[zone][0])
gc.write_team_array(zone*2+1,landingZones[zone][1])
if gc.planet() == bc.Planet.Earth:
landingZones = []
#TODO:map testing
#TODO: generalize map again, multiple destinations(one for each enemy bot, store the targets so i can recalculate the field every x turns?
myMap = pathFinding.pathMap(thisMap, enemyx, enemyy)
#reverseMap = pathFinding.pathMap(myMap, friendlyx, friendlyy)
#end = time.time()
#print("did the map thing in:")
#print(end-start)
#print(myMap.initial_units)
#unit counters init
numFactories = 0
numRockets = 0
numWorkers = 0
numKnights = 0
numRangers = 0
numMages = 0
numHealers = 0
factoryCount = 0
rocketCount = 0
workerCount = myStartingUnits
knightCount = 0
rangerCount = 0
mageCount = 0
healerCount = 0
end = time.time()
totalTime+= end-start
#logic for each unit type
def factoryLogic():
#TODO: build order/rations ect
if gc.can_produce_robot(unit.id, bc.UnitType.Ranger) and numRangers < (5*numHealers+5):#make this a ratio
gc.produce_robot(unit.id, bc.UnitType.Ranger)
if gc.can_produce_robot(unit.id, bc.UnitType.Healer) and numRangers *5 > numHealers:
gc.produce_robot(unit.id, bc.UnitType.Healer)
if len(unit.structure_garrison()) > 0:
myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y)
for d in myDirections:
if gc.can_unload(unit.id, d):
gc.unload(unit.id, d)
return
def workerLogic():
#If i am on a map
if unit.location.is_on_map():#TODO: testing rockets and maps things, remove False
#get valid directions around me
myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y)
#find out what else is near me
nearby = gc.sense_nearby_units(unit.location.map_location(), 50)
nearbyWorkers = 0
for other in nearby:
if gc.can_build(unit.id, other.id):#if its something I can build, then I should
gc.build(unit.id, other.id)
continue
if other.unit_type == unit.unit_type and other.team == unit.team:#note, this unit shows up here, so +1
nearbyWorkers +=1#we cound the number of other workers we can see
if other.unit_type == bc.UnitType.Rocket and other.team == unit.team:
print(len(other.structure_garrison()))
if len(other.structure_garrison()) == 0:
#distanceTo = unit.location.map_location().distance_squared_to(other.location.map_location())
#print(distanceTo)
if gc.can_load(other.id, unit.id):
gc.load(other.id, unit.id)
else:
me = unit.location.map_location()
them = other.location.map_location()
directionToThem = me.direction_to(them)
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem):
gc.move_robot(unit.id, directionToThem)
if numWorkers < 5:#if there arent enough, we build more workers
for d in reversed(myDirections):#we want to buid the worker as far from the enemy as possible without moving
if gc.can_replicate(unit.id, d):
gc.replicate(unit.id, d)
#TODO:factories on again
"""
if numFactories < 5:#if their arent many factories reporting in
if gc.karbonite() > bc.UnitType.Factory.blueprint_cost():#can we afford it
for d in myDirections:#furthest from the enemy again
if gc.can_blueprint(unit.id, bc.UnitType.Factory, d):#if the direction is valid for building
print("built factory")
gc.blueprint(unit.id, bc.UnitType.Factory, d)
"""
#if numFactories > 3 and numWorkers > 5:
if numWorkers > 5:
if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.research_info().get_level(bc.UnitType.Rocket) > 0:
for d in reversed(myDirections):
if gc.can_blueprint(unit.id, bc.UnitType.Rocket, d):
gc.blueprint(unit.id, bc.UnitType.Rocket, d)
#next we want to harvest all the kryponite, we also want to track if we have harvested any
#TODO: harvest and/or move at all
haveHarvested = 0
for direction in myDirections:
if gc.can_harvest(unit.id, direction):
haveHarvested = 1
#print("found dirt")
gc.harvest(unit.id, direction)
#TODO:spread out to make sure we harvest all kryptonite on the map
if haveHarvested == 0:
#print("no dirt")
for d in reversed(myDirections):
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d):
#print(d)
gc.move_robot(unit.id, d)
#basicly do a fill, if i cant see another worker, make one, gather any kryponite i can see, then move slowly out from my corner
"""
#TODO: be picky about building placement
if unit.location.is_on_map(): # and unit.location.is_on_planet(bc.Planet.Earth):
nearby = gc.sense_nearby_units(unit.location.map_location(), 2)
for other in nearby:
if gc.can_build(unit.id, other.id):
gc.build(unit.id, other.id)
continue
if gc.can_load(other.id, unit.id):
gc.load(other.id, unit.id)
else:
if numRockets < 1:
if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.can_blueprint(unit.id, bc.UnitType.Rocket, d) and gc.research_info().get_level(bc.UnitType.Rocket) > 0:
#numRockets+=1#because we just built one, saves us making many at a time#makes numRockets local, breaks functionality
print("built rocket")
gc.blueprint(unit.id, bc.UnitType.Rocket, d)
if numFactories < 5:
if gc.karbonite() > bc.UnitType.Factory.blueprint_cost() and gc.can_blueprint(unit.id, bc.UnitType.Factory, d):
print("built factory")
gc.blueprint(unit.id, bc.UnitType.Factory, d)
"""
return
def rocketLogic():
if unit.location.is_on_planet(bc.Planet.Mars):
myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y)
for d in myDirections:
if gc.can_unload(unit.id, d):
gc.unload(unit.id, d)
elif unit.location.is_on_planet(bc.Planet.Earth):
#TODO:wait until has someone in before launch
garrison = len(unit.structure_garrison())
#print("waitin on friends")
if garrison > 0:
if len(landingZones)>0:
myx = landingZones[0][0]
myy = landingZones[0][1]
print("im going where im told")
else:
myx = unit.location.map_location().x
myy = unit.location.map_location().y
print("we lazy")
destination = bc.MapLocation(bc.Planet.Mars, myx, myy)
print("we takin off boys")
#TODO:make sure destination is a valid landing zone, currently keeps x,y from earth
if gc.can_launch_rocket(unit.id, destination):
del landingZones[0]
gc.launch_rocket(unit.id, destination)
return
def knightLogic():
#TODO: movement and attack logic
if unit.location.is_on_map():
nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range)
myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y)
knightsNearby = 0
for other in nearby:
if other.unit_type == unit.unit_type and other.team == unit.team:
knightsNearby+=1
if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id):
gc.attack(unit.id, other.id)
if other.team != unit.team:
me = unit.location.map_location()
them = other.location.map_location()
directionToThem = me.direction_to(them)
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem):
gc.move_robot(unit.id, directionToThem)
#print(myDirections)
for d in myDirections:
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d):
#print(d)
gc.move_robot(unit.id, d)
return
def rangerLogic():
#TODO: movement and attack logic
#print("i'm alive")
#TODO: dont move into my minimum range
if unit.location.is_on_map():
nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range)
myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y)
rangersNearby = 0
for other in nearby:
if other.unit_type == unit.unit_type and other.team == unit.team:
rangersNearby+=1
if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id):
gc.attack(unit.id, other.id)
if other.team != unit.team:
distanceTo = unit.location.map_location().distance_squared_to(other.location.map_location())
myRange = unit.attack_range()
if distanceTo < myRange:
#move away
for d in reversed(myDirections):
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,d):
gc.move_robot(unit.id,d)
else:
me = unit.location.map_location()
them = other.location.map_location()
directionToThem = me.direction_to(them)
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem):
gc.move_robot(unit.id, directionToThem)
#outside range, inside view range, move closer
#print(myDirections)
for d in myDirections:
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d):
#print(d)
gc.move_robot(unit.id, d)
#since I have moved, check again if there is anything to shoot
for other in nearby:
if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id):
gc.attack(unit.id, other.id)
#TODO: wait for friends
#TODO: once i dont have enemies, full map search
#if there are 3? other rangers nearme, then move toward target
return
def mageLogic():
#TODO: movement and attack logic
if unit.location.is_on_map():
nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range)
myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y)
magesNearby = 0
for other in nearby:
if other.unit_type == unit.unit_type and other.team == unit.team:
magesNearby+=1
if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id):
gc.attack(unit.id, other.id)
if other.team != unit.team:
distanceTo = unit.location.map_location().distance_squared_to(other.location.map_location())
myRange = unit.attack_range()
if distanceTo < myRange:
#move away
for d in reversed(myDirections):
if gc.is_move_ready(unit.id) and gc.can_move(unit.id,d):
gc.move_robot(unit.id,d)
else:
me = unit.location.map_location()
them = other.location.map_location()
directionToThem = me.direction_to(them)
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem):
gc.move_robot(unit.id, directionToThem)
#outside range, inside view range, move closer
#print(myDirections)
for d in myDirections:
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d):
#print(d)
gc.move_robot(unit.id, d)
return
def healerLogic():
#TODO: movement and heal logic
if unit.location.is_on_map():
nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range)
for other in nearby:#find the nearest ranger and follow them
if other.unit_type == bc.UnitType.Ranger:
me = unit.location.map_location()
them = other.location.map_location()
directionToThem = me.direction_to(them)
if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem):
gc.move_robot(unit.id, directionToThem)
return
#turn loop
while True:
try:
start = time.time()
#TODO:testing communications delay and potential offloading work to mars
#communications delay is 50
if gc.planet() == bc.Planet.Earth and gc.round() == 52:
commArray = gc.get_team_array(bc.Planet.Mars)
for i in range(0,10,2):
x=commArray[i]
y=commArray[i+1]
landingZones.append([x,y])
#print("Recieved:", gc.round())
#print(landingZones)
"""
if gc.planet() == bc.Planet.Mars:
index = 0
value = 1
gc.write_team_array(index,value)
"""
#print(gc.karbonite())#proves karbonite is shared accross planets
#unit counters
numFactories = factoryCount
numWorkers = workerCount
numRockets = rocketCount
numKnights = knightCount
numRangers = rangerCount
numMages = mageCount
numHealers = healerCount
factoryCount = 0
rocketCount = 0
workerCount = 0
knightCount = 0
rangerCount = 0
mageCount = 0
healerCount = 0
#turn logic goes here,
#we seperate into a function for each unit type,
#and count the number of each unit we have
#so we can have build ratios and limits
for unit in gc.my_units():
if unit.unit_type == bc.UnitType.Factory:
factoryCount+=1
factoryLogic()
continue
if unit.unit_type == bc.UnitType.Rocket:
rocketCount+=1
rocketLogic()
continue
if unit.unit_type == bc.UnitType.Worker:
if unit.location.is_on_map():
workerCount+=1
workerLogic()
continue
if unit.unit_type == bc.UnitType.Knight:
knightCount+=1
knightLogic()
continue
if unit.unit_type == bc.UnitType.Ranger:
rangerCount+=1
rangerLogic()
continue
if unit.unit_type == bc.UnitType.Mage:
mageCount+=1
mageLogic()
continue
if unit.unit_type == bc.UnitType.Healer:
healerCount+=1
healerLogic()
continue
#TODO: remove time keeping
end = time.time()
totalTime+= end-start
#print(totalTime)
except Exception as e:
print('Error:', e)
# use this to show where the error was
traceback.print_exc()
# send the actions we've performed, and wait for our next turn.
gc.next_turn()
# these lines are not strictly necessary, but it helps make the logs make more sense.
# it forces everything we've written this turn to be written to the manager.
sys.stdout.flush()
sys.stderr.flush()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import numpy as np # For efficient utilization of array
import cv2 # Computer vision library
import os # Here this package is used writing CLI commands
import vlc_ctrl
import time
import pandas as pd
import os # package used for controlling vlc media player
import subprocess
import tkinter as tk
import math
from tkinter import filedialog as fd
from tkinter import messagebox
from tkinter import Canvas
from tkinter import *
from PIL import Image, ImageTk
root = tk.Tk()
root.configure(background="#426999")
load = Image.open("bg.png")
render = ImageTk.PhotoImage(load)
img = Label(image=render)
img.image = render
img.place(x=0, y=0)
root.title('Vision Based Media Player')
def write_slogan():
global filename
filename = fd.askopenfilename()
def play():
cap = cv2.VideoCapture(0)
try:
os.system("vlc-ctrl play -p "+filename)
# Frontal face classifier is imported here
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#LOADING HAND CASCADE
hand_cascaderr = cv2.CascadeClassifier('Hand_haar_cascade.xml')
hand_cascade = cv2.CascadeClassifier('hand.xml')
count = 0
# Flag is used to pause and play the video [ if flag is 1 then the video plays else it doesn't ]
Pauseflag = 0
try:
while True:
ret , img = cap.read() # For caturing the frame
blur = cv2.GaussianBlur(img,(5,5),0) # BLURRING IMAGE TO SMOOTHEN EDGES
grayc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hands = hand_cascade.detectMultiScale(grayc, 1.5, 2)
contour = hands
contour = np.array(contour)
if count==0:
if len(contour)==2:
cv2.putText(img=img, text='Your engine started', org=(int(100 / 2 - 20), int(100 / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1,
color=(0, 255, 0))
for (x, y, w, h) in hands:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
if count>0:
if len(contour)>=2:
pass
elif len(contour)==1:
subprocess.Popen(['vlc-ctrl', 'volume', '-0.1'])
elif len(contour)==0:
pass
count+=1
grayh = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY) # BGR -> GRAY CONVERSION
retval2,thresh1 = cv2.threshold(grayh,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # THRESHOLDING IMAGE
hand = hand_cascaderr.detectMultiScale(thresh1, 1.3, 5) # DETECTING HAND IN THE THRESHOLDE IMAGE
mask = np.zeros(thresh1.shape, dtype = "uint8") # CREATING MASK
for (x,y,w,h) in hand: # MARKING THE DETECTED ROI
cv2.rectangle(img,(x,y),(x+w,y+h), (122,122,0), 2)
cv2.rectangle(mask, (x,y),(x+w,y+h),255,-1)
img2 = cv2.bitwise_and(thresh1, mask)
final = cv2.GaussianBlur(img2,(7,7),0)
contours, hierarchy = cv2.findContours(final, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, 0, (255,255,0), 3)
cv2.drawContours(final, contours, 0, (255,255,0), 3)
if len(contours) > 0:
cnt=contours[0]
hull = cv2.convexHull(cnt, returnPoints=False)
# finding convexity defects
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defect
if not (defects is None):
for i in range(defects.shape[0]):
p,q,r,s = defects[i,0]
finger1 = tuple(cnt[p][0])
finger2 = tuple(cnt[q][0])
dip = tuple(cnt[r][0])
# find length of all sides of triangle
a = math.sqrt((finger2[0] - finger1[0])**2 + (finger2[1] - finger1[1])**2)
b = math.sqrt((dip[0] - finger1[0])**2 + (dip[1] - finger1[1])**2)
c = math.sqrt((finger2[0] - dip[0])**2 + (finger2[1] - dip[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57.29
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
# define actions required
if count_defects == 1:
print("2")
subprocess.Popen(['vlc-ctrl', 'volume', '+10%'])
#cv2.putText(img,"THIS IS 2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 2:
print("3")
subprocess.Popen(['vlc-ctrl', 'volume', '+10%'])
#cv2.putText(img, "THIS IS 3", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 3:
print("4")
subprocess.Popen(['vlc-ctrl', 'volume', '+10%'])
#cv2.putText(img,"This is 4", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
print("5")
subprocess.Popen(['vlc-ctrl', 'volume', '+10%'])
#cv2.putText(img,"THIS IS 5", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
# face detection section
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# Gets the x and y coordinates of the face as well the width and height of the face if detected
for (x, y, w, h) in faces:
print ("Face is facing front")
os.system("vlc-ctrl play")
time.sleep(0.2)
Pauseflag = 1 # Face is detected hence play the video continuesly
if Pauseflag == 0: # Face is not facing front hence pause the video
print ("Face is not facing front")
ti=time.asctime()
m=ti[14:16]
s=ti[17:19]
mi=int(m)
si=int(s)
print(mi,si)
os.system("vlc-ctrl pause")
if mi==59:
mi=00
else:
co=mi+1
cs=si
if mi==co and si==cs:
os.system("systemct1 suspend")
Pauseflag = 0
except KeyboardInterrupt:
print ("Closing the application!!! [Interrupted]")
cap.release()
except:
messagebox.showerror("warning", "upload the video")
def fun():
messagebox.showinfo("Instructoions", "step1 : upload the video \n \nstep2 : Click the play Button \n\n step3 : If face fronts the camera then video will play else it will pause \n \nstep4 : Closed fist will decrease the volume opened hand will increase the volume")
tk.Entry(root, width = 100).grid(row=0, column=0)
tk.Button(root, text = "Upload",command=write_slogan, height = 2, width=8,fg = "black",activeforeground = "white",activebackground = "black").grid(row=1, column=0, pady = (40,50))
tk.Button(root, text = "How to use",command=fun).grid(row=4, column=0, pady = (180,50))
tk.Button(root, text = "play",command=play).grid(row=2, column=0, pady = (180,50))
tk.Entry(root, width = 100).grid(row=5, column=0)
root.mainloop()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import logging
from dku_model_accessor.constants import DkuModelAccessorConstants
from dku_model_accessor.preprocessing import Preprocessor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
logger = logging.getLogger(__name__)
class SurrogateModel(object):
"""
In case the chosen saved model uses a non-tree based algorithm (and thus does not have feature importance), we fit this surrogate model
on top of the prediction of the former one to be able to retrieve the feature importance information.
"""
def __init__(self, prediction_type):
self.check(prediction_type)
self.feature_names = None
self.target = None
self.prediction_type = prediction_type
# TODO should we define some params of RF to avoid long computation ?
if prediction_type == DkuModelAccessorConstants.CLASSIFICATION_TYPE:
self.clf = RandomForestClassifier(random_state=1407)
else:
self.clf = RandomForestRegressor(random_state=1407)
def check(self, prediction_type):
if prediction_type not in [DkuModelAccessorConstants.CLASSIFICATION_TYPE,
DkuModelAccessorConstants.REGRRSSION_TYPE]:
raise ValueError('Prediction type must either be CLASSIFICATION or REGRESSION.')
def get_features(self):
return self.feature_names
def fit(self, df, target):
preprocessor = Preprocessor(df, target)
train, test = preprocessor.get_processed_train_test()
train_X = train.drop(target, axis=1)
train_Y = train[target]
self.clf.fit(train_X, train_Y)
self.feature_names = train_X.columns
|
nilq/baby-python
|
python
|
import yaml
try:
# use faster C loader if available
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
# follows similar logic to cwrap, ignores !inc, and just looks for [[]]
def parse(filename):
with open(filename, 'r') as file:
declaration_lines = []
declarations = []
in_declaration = False
for line in file.readlines():
line = line.rstrip()
if line == '[[':
declaration_lines = []
in_declaration = True
elif line == ']]':
in_declaration = False
declaration = yaml.load('\n'.join(declaration_lines), Loader=Loader)
declarations.append(declaration)
elif in_declaration:
declaration_lines.append(line)
return declarations
|
nilq/baby-python
|
python
|
import re
class Graph:
def __init__(self, nodes, numWorkers=5):
self.graph = {}
for asciiCode in range(65, 91):
self.graph[chr(asciiCode)] = []
# populate link nodes
for node in nodes:
if node.pre in self.graph:
self.graph[node.pre].append(node.post)
# sort link nodes into descending alphabetical order
for key,val in self.graph.items():
val.sort()
# visited nodes - initially empty
self.visitedNodes = []
# available nodes - initially those nodes with no predecessors
self.initialiseAvailableNodes()
# PART 2 - add workers
self.workers = [Worker(workerId+1) for workerId in range(0,numWorkers)]
def outputVisitedNodes(self):
output = ''
for node in self.visitedNodes:
output = f'{output}{node}'
return output
def initialiseAvailableNodes(self):
self.availableNodes = []
for node in self.graph:
predecessors = self.predecessors(node)
if len(predecessors) == 0:
self.availableNodes.append(node)
self.availableNodes.sort()
# list all the predecessors of given node i.e. all nodes that link to given node
def predecessors(self, node):
predecessors = []
for key, val in self.graph.items():
if node in val:
predecessors.append(key)
predecessors.sort()
return predecessors
def predecessorsAllVisited(self, node):
# predecessors all visited if all preceeding nodes in visited nodes
allVisited = True
predecessors = self.predecessors(node)
for predecessor in predecessors:
if predecessor not in self.visitedNodes:
allVisited = False
break
return allVisited
def updateAvailableNodes(self, node):
# update available nodes to:
# 1. Include all successor nodes of given node
# 2. Remove given node
# Available nodes must not contain duplicated and must always be sorted in alphabetical order
newAvailableNodes = self.graph[node]
for newAvailableNode in newAvailableNodes:
if not newAvailableNode in self.availableNodes:
self.availableNodes.append(newAvailableNode)
if node in self.availableNodes:
self.availableNodes.remove(node)
self.availableNodes.sort()
def stepOrder(self):
# while there are available nodes:
# check each available node in order.
# First node where all predecessors have been visited should be added to visited nodes
# Available nods are then updated to include all successors of just visited node (do not allow duplicates to be added to available nodes) and remove just visited node
# Note: Available nodes must remain in alphabetical order
# Break and repeat
self.visitedNodes = []
self.initialiseAvailableNodes()
while len(self.availableNodes) > 0:
for node in self.availableNodes:
if self.predecessorsAllVisited(node):
self.visitedNodes.append(node)
self.updateAvailableNodes(node)
break
def starters(self, currentTime):
# get all available nodes and workers
# assign available nodes to available workers
availableWorkers = [worker for worker in self.workers if worker.available()]
availableNodesWithPre = [node for node in self.availableNodes if self.predecessorsAllVisited(node)]
availableWorkerIndex = len(availableWorkers) - 1
for currNode in availableNodesWithPre:
if availableWorkerIndex >= 0:
avWorker = availableWorkers[availableWorkerIndex]
avWorker.workingOn = currNode
avWorker.finishTime = currentTime + (ord(currNode) - ord('A') + 1) + 60
self.availableNodes.remove(currNode)
availableWorkerIndex -= 1
def finishers(self, currentTime):
# any workers finishing at currentTime?
for worker in self.workers:
if worker.finishTime == currentTime:
node = worker.workingOn
worker.workingOn = None
worker.finishTime = None
self.visitedNodes.append(node)
self.updateAvailableNodes(node)
def workersAllAvailable(self):
return len([worker for worker in self.workers if worker.available()]) == len(self.workers)
def timeToCompleteSteps(self):
# Part 2
currentTime = 1
self.visitedNodes = []
self.initialiseAvailableNodes()
complete = False
while not complete:
self.finishers(currentTime)
# check if complete
if len(self.availableNodes) == 0:
complete = self.workersAllAvailable()
if not complete:
self.starters(currentTime)
currentTime += 1
return currentTime-1
class Worker:
def __init__(self,workerId):
self.workerId = workerId
self.workingOn = None
self.finishTime = None
def available(self):
return self.workingOn == None
def unavailable(self):
return not self.available()
class Node:
def __init__(self, pre, post):
self.pre = pre
self.post = post
def processFile(filename):
with open(filename, "r") as input:
nodes = [Node(line.strip()[5], line.strip()[-12]) for line in input]
return nodes
# solve puzzle
nodes = processFile("day7.txt")
graph = Graph(nodes)
# Part 1 - Work out order in which steps should be completed
graph.stepOrder()
print(f'Step Order: {graph.outputVisitedNodes()}')
# Part 2 - Time to complete all steps
# 5 workers available each step takes 60 seconds plus number of seconds corresponding to its letter A=1 (61), B=2 (62), .. Z=26 (86)
# Available steps can begin simultaneously but where multiple steps are available they must still begin alphabetically
time = graph.timeToCompleteSteps()
print(f'Time to complete steps: {time} seconds. Step order: {graph.outputVisitedNodes()}')
|
nilq/baby-python
|
python
|
# sql/expression.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Defines the public namespace for SQL expression constructs.
"""
from ._dml_constructors import delete as delete
from ._dml_constructors import insert as insert
from ._dml_constructors import update as update
from ._elements_constructors import all_ as all_
from ._elements_constructors import and_ as and_
from ._elements_constructors import any_ as any_
from ._elements_constructors import asc as asc
from ._elements_constructors import between as between
from ._elements_constructors import bindparam as bindparam
from ._elements_constructors import case as case
from ._elements_constructors import cast as cast
from ._elements_constructors import collate as collate
from ._elements_constructors import column as column
from ._elements_constructors import desc as desc
from ._elements_constructors import distinct as distinct
from ._elements_constructors import extract as extract
from ._elements_constructors import false as false
from ._elements_constructors import funcfilter as funcfilter
from ._elements_constructors import label as label
from ._elements_constructors import not_ as not_
from ._elements_constructors import null as null
from ._elements_constructors import nulls_first as nulls_first
from ._elements_constructors import nulls_last as nulls_last
from ._elements_constructors import or_ as or_
from ._elements_constructors import outparam as outparam
from ._elements_constructors import over as over
from ._elements_constructors import text as text
from ._elements_constructors import true as true
from ._elements_constructors import tuple_ as tuple_
from ._elements_constructors import type_coerce as type_coerce
from ._elements_constructors import typing as typing
from ._elements_constructors import within_group as within_group
from ._selectable_constructors import alias as alias
from ._selectable_constructors import cte as cte
from ._selectable_constructors import except_ as except_
from ._selectable_constructors import except_all as except_all
from ._selectable_constructors import exists as exists
from ._selectable_constructors import intersect as intersect
from ._selectable_constructors import intersect_all as intersect_all
from ._selectable_constructors import join as join
from ._selectable_constructors import lateral as lateral
from ._selectable_constructors import outerjoin as outerjoin
from ._selectable_constructors import select as select
from ._selectable_constructors import table as table
from ._selectable_constructors import tablesample as tablesample
from ._selectable_constructors import union as union
from ._selectable_constructors import union_all as union_all
from ._selectable_constructors import values as values
from .base import _from_objects as _from_objects
from .base import _select_iterables as _select_iterables
from .base import ColumnCollection as ColumnCollection
from .base import Executable as Executable
from .cache_key import CacheKey as CacheKey
from .dml import Delete as Delete
from .dml import Insert as Insert
from .dml import Update as Update
from .dml import UpdateBase as UpdateBase
from .dml import ValuesBase as ValuesBase
from .elements import _truncated_label as _truncated_label
from .elements import BinaryExpression as BinaryExpression
from .elements import BindParameter as BindParameter
from .elements import BooleanClauseList as BooleanClauseList
from .elements import Case as Case
from .elements import Cast as Cast
from .elements import ClauseElement as ClauseElement
from .elements import ClauseList as ClauseList
from .elements import CollectionAggregate as CollectionAggregate
from .elements import ColumnClause as ColumnClause
from .elements import ColumnElement as ColumnElement
from .elements import Extract as Extract
from .elements import False_ as False_
from .elements import FunctionFilter as FunctionFilter
from .elements import Grouping as Grouping
from .elements import Label as Label
from .elements import literal as literal
from .elements import literal_column as literal_column
from .elements import Null as Null
from .elements import Over as Over
from .elements import quoted_name as quoted_name
from .elements import ReleaseSavepointClause as ReleaseSavepointClause
from .elements import RollbackToSavepointClause as RollbackToSavepointClause
from .elements import SavepointClause as SavepointClause
from .elements import TextClause as TextClause
from .elements import True_ as True_
from .elements import Tuple as Tuple
from .elements import TypeClause as TypeClause
from .elements import TypeCoerce as TypeCoerce
from .elements import UnaryExpression as UnaryExpression
from .elements import WithinGroup as WithinGroup
from .functions import func as func
from .functions import Function as Function
from .functions import FunctionElement as FunctionElement
from .functions import modifier as modifier
from .lambdas import lambda_stmt as lambda_stmt
from .lambdas import LambdaElement as LambdaElement
from .lambdas import StatementLambdaElement as StatementLambdaElement
from .operators import ColumnOperators as ColumnOperators
from .operators import custom_op as custom_op
from .operators import Operators as Operators
from .selectable import Alias as Alias
from .selectable import AliasedReturnsRows as AliasedReturnsRows
from .selectable import CompoundSelect as CompoundSelect
from .selectable import CTE as CTE
from .selectable import Exists as Exists
from .selectable import FromClause as FromClause
from .selectable import FromGrouping as FromGrouping
from .selectable import GenerativeSelect as GenerativeSelect
from .selectable import HasCTE as HasCTE
from .selectable import HasPrefixes as HasPrefixes
from .selectable import HasSuffixes as HasSuffixes
from .selectable import Join as Join
from .selectable import LABEL_STYLE_DEFAULT as LABEL_STYLE_DEFAULT
from .selectable import (
LABEL_STYLE_DISAMBIGUATE_ONLY as LABEL_STYLE_DISAMBIGUATE_ONLY,
)
from .selectable import LABEL_STYLE_NONE as LABEL_STYLE_NONE
from .selectable import (
LABEL_STYLE_TABLENAME_PLUS_COL as LABEL_STYLE_TABLENAME_PLUS_COL,
)
from .selectable import Lateral as Lateral
from .selectable import ReturnsRows as ReturnsRows
from .selectable import ScalarSelect as ScalarSelect
from .selectable import Select as Select
from .selectable import Selectable as Selectable
from .selectable import SelectBase as SelectBase
from .selectable import Subquery as Subquery
from .selectable import TableClause as TableClause
from .selectable import TableSample as TableSample
from .selectable import TableValuedAlias as TableValuedAlias
from .selectable import TextAsFrom as TextAsFrom
from .selectable import TextualSelect as TextualSelect
from .selectable import Values as Values
from .visitors import Visitable as Visitable
nullsfirst = nulls_first
nullslast = nulls_last
|
nilq/baby-python
|
python
|
import argparse
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils.data as data
from PIL import Image, ImageFile
from torchvision import transforms
from tqdm import tqdm
from template import imagenet_templates
import fast_stylenet
from sampler import InfiniteSamplerWrapper
import clip
from template import imagenet_templates
import torch.nn.functional as F
from torchvision.utils import save_image
from torchvision.transforms.functional import adjust_contrast
cudnn.benchmark = True
Image.MAX_IMAGE_PIXELS = None
ImageFile.LOAD_TRUNCATED_IMAGES = True
import time
def test_transform():
transform_list = [
transforms.Resize(size=(512, 512)),
transforms.ToTensor()
]
return transforms.Compose(transform_list)
def hr_transform():
transform_list = [
transforms.ToTensor()
]
return transforms.Compose(transform_list)
class FlatFolderDataset(data.Dataset):
def __init__(self, root, transform):
super(FlatFolderDataset, self).__init__()
self.root = root
self.paths = list(Path(self.root).glob('*'))
self.transform = transform
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(str(path)).convert('RGB')
img = self.transform(img)
return img
def __len__(self):
return len(self.paths)
def name(self):
return 'FlatFolderDataset'
parser = argparse.ArgumentParser()
parser.add_argument('--test_dir', type=str, default ='./test_set')
parser.add_argument('--hr_dir', type=str)
parser.add_argument('--vgg', type=str, default='models/vgg_normalised.pth')
# training options
parser.add_argument('--n_threads', type=int, default=16)
parser.add_argument('--num_test', type=int, default=16)
parser.add_argument('--decoder', type=str, default='./experiments/clip_decoder_pencil.pth.tar')
args = parser.parse_args()
device = torch.device('cuda')
decoder = fast_stylenet.decoder
vgg = fast_stylenet.vgg
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])
decoder.load_state_dict(torch.load(args.decoder))
network = net.Net(vgg, decoder)
network.eval()
network.to(device)
test_tf = test_transform()
test_dataset = FlatFolderDataset(args.test_dir, test_tf)
test_iter = iter(data.DataLoader(
test_dataset, batch_size=args.num_test,
num_workers=args.n_threads))
test_images1 = next(test_iter)
test_images1 = test_images1.cuda()
if args.hr_dir is not None:
hr_tf = hr_transform()
hr_dataset = FlatFolderDataset(args.hr_dir, hr_tf)
hr_iter = iter(data.DataLoader(
hr_dataset, batch_size=1,
num_workers=args.n_threads))
hr_images = next(hr_iter)
hr_images = hr_images.cuda()
with torch.no_grad():
_, test_out1 = network( test_images1)
test_out1 = adjust_contrast(test_out1,1.5)
output_test = torch.cat([test_images1,test_out1],dim=0)
output_name = './output_test/test.png'
save_image(output_test, str(output_name),nrow=test_out1.size(0),normalize=True,scale_each=True)
if args.hr_dir is not None:
_, test_out = network(hr_images)
test_out = adjust_contrast(test_out,1.5)
output_name = './output_test/hr.png'
save_image(test_out, str(output_name),nrow=test_out.size(0),normalize=True,scale_each=True)
|
nilq/baby-python
|
python
|
# import os
# import json
#
# target_dirs = [ 'home_1', 'home_2', 'home_3', 'real_v0', 'real_v1', 'real_v2', 'real_v3', 'human_label_kobeF2', 'victor_1']
# target_file = './data/'
# for target_dir in target_dirs:
# target_file += target_dir + '_'
# target_file += 'output.json'
#
# output_images = {}
# output_annotations = {}
#
# for idx, target_dir in enumerate(target_dirs):
# target_json = os.path.join('./data', target_dir, 'annotations', 'output.json')
# labels = json.load(open(target_json))
# if idx == 0:
# output_images = labels['images']
# output_annotations = labels['annotations']
# for i in range(len(output_images)):
# output_images[i]['file_name'] = os.path.join(target_dir, 'images', output_images[i]['file_name'])
# output_images[i]['id'] = int(output_images[i]['id'])
# for i in range(len(output_annotations)):
# output_annotations[i]['image_id'] = int(output_annotations[i]['image_id'])
# print(len(output_images))
# print(len(output_annotations))
# else:
# temp_images = labels['images']
# temp_annotations = labels['annotations']
# for i in range(len(temp_images)):
# temp_images[i]['file_name'] = os.path.join(target_dir, 'images', temp_images[i]['file_name'])
# temp_images[i]['id'] = int(temp_images[i]['id']) + len(output_images)
# for i in range(len(temp_annotations)):
# temp_annotations[i]['image_id'] = int(temp_annotations[i]['image_id']) + len(output_images)
# temp_annotations[i]['id'] = len(output_images) + i
# # temp_annotations[i]['id'] = int(temp_annotations[i]['id']) + len(output_annotations)
#
# output_images.extend(temp_images)
# output_annotations.extend(temp_annotations)
# print(len(output_images))
# print(len(output_annotations))
# output_json = {
# 'images': output_images,
# 'annotations': output_annotations
# }
#
# with open(target_file, 'w') as f:
# json.dump(output_json, f)
import os
import json
import datetime
import numpy as np
IsPrivacy = True
if IsPrivacy:
Privacyname = 'images_privacy'
else:
Privacyname = 'images'
target_dirs = ['real_v0', 'real_v1', 'real_v2', 'real_v3', 'home_1', 'home_2', 'home_3', 'human_label_kobeF2', 'Virtual_V7', 'Virtual_V7_2', 'Virtual_V7_3', 'Virtual_V8_1', 'Virtual_victor_v1']
target_file = './data/'
target_file = target_file + Privacyname + '_'
for target_dir in target_dirs:
target_file += target_dir + '_'
target_file += 'keypoint.json'
output_images = {}
output_annotations = {}
INFO = {
"description": "Dataset",
"url": "",
"version": "0.1.0",
"year": 2019,
"contributor": "",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "",
"url": ""
}
]
CATEGORIES = [
{
'id': 1,
'name': 'human',
'supercategory': 'human',
'keypoints': ["nose", "left_eye", "right_eye", "left_ear", "right_ear", "left_shoulder", "right_shoulder",
"left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip", "right_hip", "left_knee",
"right_knee", "left_ankle", "right_ankle"],
'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9],
[8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]],
}
]
temp_id = 0
anotation_id = 0
for idx, target_dir in enumerate(target_dirs):
target_json = os.path.join('./data', target_dir, 'annotations', 'output.json')
labels = json.load(open(target_json))
if idx == 0:
max_id = 0
output_images = labels['images']
output_annotations = labels['annotations']
for i in range(len(output_images)):
output_images[i]['file_name'] = os.path.join(target_dir, Privacyname, output_images[i]['file_name'])
output_images[i]['id'] = int(output_images[i]['id'])
if output_images[i]['id'] > max_id:
max_id = output_images[i]['id']
for i in range(len(output_annotations)):
output_annotations[i]['image_id'] = int(output_annotations[i]['image_id'])
output_annotations[i]['id'] = '{}'.format(anotation_id)
anotation_id = anotation_id + 1
temp_id += max_id
else:
max_id = 0
temp_images = labels['images']
temp_annotations = labels['annotations']
for i in range(len(temp_images)):
temp_images[i]['file_name'] = os.path.join(target_dir, Privacyname, temp_images[i]['file_name'])
temp_images[i]['id'] = int(temp_images[i]['id']) + temp_id
if temp_images[i]['id'] > max_id:
max_id = temp_images[i]['id']
for i in range(len(temp_annotations)):
temp_annotations[i]['image_id'] = int(temp_annotations[i]['image_id']) + temp_id
temp_annotations[i]['id'] = '{}'.format(anotation_id)
anotation_id = anotation_id + 1
# temp_annotations[i]['id'] = int(temp_annotations[i]['id']) + len(output_annotations)
output_images.extend(temp_images)
output_annotations.extend(temp_annotations)
temp_id += max_id
# check id is unique
image_ids = []
annotation_ids = []
for i in range(len(output_images)):
image_ids.append(output_images[i]['id'])
for i in range(len(output_annotations)):
annotation_ids.append(output_annotations[i]['id'])
image_ids = np.array(image_ids)
annotation_ids = np.array(annotation_ids)
unique = False
if len(image_ids) == len(np.unique(image_ids)):
print('image_id is unique!')
if len(annotation_ids) == len(np.unique(annotation_ids)):
print('annotation_id is unique!')
unique = True
# save file
output_json = {
'info': INFO,
'licenses': LICENSES,
'categories': CATEGORIES,
'images': output_images,
'annotations': output_annotations
}
if unique:
with open(target_file, 'w') as f:
json.dump(output_json, f)
print('save annotation!')
|
nilq/baby-python
|
python
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from abc import ABCMeta
from numbers import Integral, Real
from sys import version_info as sys_version_info
# TODO(wickman) Since the io package is available in 2.6.x, use that instead of
# cStringIO/StringIO
try:
# CPython 2.x
from cStringIO import StringIO
except ImportError:
try:
# Python 2.x
from StringIO import StringIO
except:
# Python 3.x
from io import StringIO
from io import BytesIO
AbstractClass = ABCMeta('AbstractClass', (object,), {})
PY2 = sys_version_info[0] == 2
PY3 = sys_version_info[0] == 3
StringIO = StringIO
BytesIO = BytesIO if PY3 else StringIO
integer = (Integral,)
real = (Real,)
numeric = integer + real
string = (str,) if PY3 else (str, unicode)
bytes = (bytes,)
if PY2:
def to_bytes(st):
if isinstance(st, unicode):
return st.encode('utf-8')
else:
return str(st)
else:
def to_bytes(st):
return st.encode('utf-8')
if PY3:
def exec_function(ast, globals_map):
locals_map = globals_map
exec(ast, globals_map, locals_map)
return locals_map
else:
eval(compile(
"""
def exec_function(ast, globals_map):
locals_map = globals_map
exec ast in globals_map, locals_map
return locals_map
""", "<exec_function>", "exec"))
if PY3:
from contextlib import contextmanager, ExitStack
@contextmanager
def nested(*context_managers):
enters = []
with ExitStack() as stack:
for manager in context_managers:
enters.append(stack.enter_context(manager))
yield tuple(enters)
else:
from contextlib import nested
__all__ = (
'AbstractClass',
'BytesIO',
'PY2',
'PY3',
'StringIO',
'bytes',
'exec_function',
'nested',
'string',
'to_bytes',
)
|
nilq/baby-python
|
python
|
__author__ = 'Kalyan'
from placeholders import *
# For most of these tests use the interpreter to fill up the blanks.
# type(object) -> returns the object's type.
def test_numbers_types():
assert "int" == type(7).__name__
assert "float" == type(7.5).__name__
assert "long" == type(10L).__name__
def test_numbers_int_arithmetic_operations():
assert 30 == 10 + 20
assert 200 == 10 * 20
assert 32 == 2 ** 5
assert -10 == 10 - 20
assert 2 == 7/3
def test_numbers_string_to_int():
"""hint: execute print int.__doc__ in python console
to find out what int(..) does"""
assert 255== int("FF", 16)
assert 63== int("77", 8)
def test_numbers_int_to_string():
assert "012" == oct(10)
assert "0x64" == hex(100)
assert "0b11111111"== bin(255)
def test_numbers_long():
"""Long is not the long in c"""
assert 1606938044258990275541962092341162602522202993782792835301376L == 2 ** 200
# Being comfortable with number bases mentally is important and it is routinely asked in interviews as quick test
# of a candidate.
#
# Replace the __ with the correct string representation by working it out on paper (don't use any code or console).
#
# Read the following links:
# http://courses.cs.vt.edu/~cs1104/number_conversion/convexp.html
# https://docs.python.org/2/library/functions.html#int
def test_numbers_base():
assert 255 == int("11111111", 2)
assert 254 == int("FE", 16)
assert 121 == int("232", 7)
assert 675 == int("pp", 26)
three_things_i_learnt = """
-base conversions
-length function
-difference between type and instanceof
"""
|
nilq/baby-python
|
python
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import bpy
from mathutils import Vector
class AMTH_NODE_OT_AddTemplateVignette(bpy.types.Operator):
bl_idname = "node.template_add_vignette"
bl_label = "Add Vignette"
bl_description = "Add a vignette effect"
bl_options = {"REGISTER", "UNDO"}
@classmethod
def poll(cls, context):
space = context.space_data
return space.type == "NODE_EDITOR" \
and space.node_tree is not None \
and space.tree_type == "CompositorNodeTree"
# used as reference the setup scene script from master nazgul
def _setupNodes(self, context):
scene = context.scene
space = context.space_data
tree = scene.node_tree
has_act = True if tree.nodes.active else False
bpy.ops.node.select_all(action="DESELECT")
ellipse = tree.nodes.new(type="CompositorNodeEllipseMask")
ellipse.width = 0.8
ellipse.height = 0.4
blur = tree.nodes.new(type="CompositorNodeBlur")
blur.use_relative = True
blur.factor_x = 30
blur.factor_y = 50
ramp = tree.nodes.new(type="CompositorNodeValToRGB")
ramp.color_ramp.interpolation = "B_SPLINE"
ramp.color_ramp.elements[1].color = (0.6, 0.6, 0.6, 1)
overlay = tree.nodes.new(type="CompositorNodeMixRGB")
overlay.blend_type = "OVERLAY"
overlay.inputs[0].default_value = 0.8
overlay.inputs[1].default_value = (0.5, 0.5, 0.5, 1)
tree.links.new(ellipse.outputs["Mask"], blur.inputs["Image"])
tree.links.new(blur.outputs["Image"], ramp.inputs[0])
tree.links.new(ramp.outputs["Image"], overlay.inputs[2])
if has_act:
tree.links.new(tree.nodes.active.outputs[0], overlay.inputs[1])
if has_act:
overlay.location = tree.nodes.active.location
overlay.location += Vector((350.0, 0.0))
else:
overlay.location += Vector(
(space.cursor_location[0], space.cursor_location[1]))
ellipse.location = overlay.location
ellipse.location += Vector((-715.0, -400))
ellipse.inputs[0].hide = True
ellipse.inputs[1].hide = True
blur.location = ellipse.location
blur.location += Vector((300.0, 0.0))
blur.inputs["Size"].hide = True
ramp.location = blur.location
ramp.location += Vector((175.0, 0))
ramp.outputs["Alpha"].hide = True
for node in (ellipse, blur, ramp, overlay):
node.select = True
node.show_preview = False
bpy.ops.node.join()
frame = ellipse.parent
frame.label = "Vignette"
frame.use_custom_color = True
frame.color = (0.1, 0.1, 0.1)
overlay.parent = None
overlay.label = "Vignette Overlay"
def execute(self, context):
self._setupNodes(context)
return {"FINISHED"}
|
nilq/baby-python
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Script for STFTGAN on a waveform dataset.
Follows the same setup as SpecPhaseGAN, but
generates STFTs instead of Magnitude and Instantaneous
Frequency.
"""
import os
import tensorflow as tf
from audio_synthesis.structures import spec_gan
from audio_synthesis.models import wgan
from audio_synthesis.datasets import waveform_dataset
from audio_synthesis.utils import waveform_save_helper as save_helper
from audio_synthesis.utils import spectral
# Setup Paramaters
D_UPDATES_PER_G = 5
Z_DIM = 64
BATCH_SIZE = 64
EPOCHS = 1800
SAMPLING_RATE = 16000
FFT_FRAME_LENGTH = 512
FFT_FRAME_STEP = 128
Z_IN_SHAPE = [4, 8, 1024]
SPECTOGRAM_IMAGE_SHAPE = [-1, 128, 256, 2]
CHECKPOINT_DIR = '_results/representation_study/SpeechMNIST/STFTGAN_HR/training_checkpoints/'
RESULT_DIR = '_results/representation_study/SpeechMNIST/STFTGAN_HR/audio/'
DATASET_PATH = 'data/SpeechMNIST_1850.npz'
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
print('Num GPUs Available: ', len(tf.config.experimental.list_physical_devices('GPU')))
raw_dataset = waveform_dataset.get_stft_dataset(
DATASET_PATH, frame_length=FFT_FRAME_LENGTH, frame_step=FFT_FRAME_STEP
)
generator = spec_gan.Generator(channels=2, in_shape=Z_IN_SHAPE)
discriminator = spec_gan.Discriminator(input_shape=SPECTOGRAM_IMAGE_SHAPE)
generator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9)
get_waveform = lambda stft:\
spectral.stft_2_waveform(
stft, FFT_FRAME_LENGTH, FFT_FRAME_STEP
)[0]
save_examples = lambda epoch, real, generated:\
save_helper.save_wav_data(
epoch, real, generated, SAMPLING_RATE, RESULT_DIR, get_waveform
)
stft_gan_model = wgan.WGAN(
raw_dataset, generator, [discriminator], Z_DIM,
generator_optimizer, discriminator_optimizer, discriminator_training_ratio=D_UPDATES_PER_G,
batch_size=BATCH_SIZE, epochs=EPOCHS, checkpoint_dir=CHECKPOINT_DIR,
fn_save_examples=save_examples
)
stft_gan_model.restore('ckpt-100', 1000)
stft_gan_model.train()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# File to explore the difference between the error function relying on Hoeffding's bound and the one relying on the
# bound of Maurer and Pontil.
import os
import sys
import configparser
import numpy as np
directory = os.path.dirname(os.path.dirname(os.path.expanduser(__file__)))
sys.path.append(directory)
path_config = configparser.ConfigParser()
path_config.read(os.path.join(directory, 'paths.ini'))
spibb_path = path_config['PATHS']['spibb_path']
sys.path.append(spibb_path)
from wet_chicken_discrete.dynamics import WetChicken
from wet_chicken_discrete.baseline_policy import WetChickenBaselinePolicy
from batch_rl_algorithms.soft_spibb import ApproxSoftSPIBB
import spibb_utils
if __name__ == '__main__':
nb_iterations = 50
seed = 1602421836
seed = 1
np.random.seed(seed)
log = True
# ratio = 0.9
epsilon = 0.1
delta = 1
gamma = 0.95
length = 5
width = 5
max_turbulence = 3.5
max_velocity = 3
nb_states = length * width
nb_actions = 5
learning_rate = 0.5
max_nb_it = 10 ** 5
epsilon_baseline = 0.1
order_epsilon = np.inf
order_learning_rate = 3
episodic = False
results = []
wet_chicken = WetChicken(length=length, width=width, max_turbulence=max_turbulence,
max_velocity=max_velocity)
pi_baseline = WetChickenBaselinePolicy(env=wet_chicken, gamma=gamma, method='heuristic',
order_epsilon=order_epsilon, learning_rate=learning_rate,
max_nb_it=max_nb_it, epsilon=epsilon_baseline,
order_learning_rate=order_learning_rate)
pi_b = pi_baseline.pi
P = wet_chicken.get_transition_function()
R = wet_chicken.get_reward_function()
r_reshaped = spibb_utils.get_reward_model(P, R)
length_trajectory = 10000
trajectory = spibb_utils.generate_batch_wet_chicken(length_trajectory, wet_chicken, pi_b)
approx_soft_spibb = ApproxSoftSPIBB(pi_b=pi_b, gamma=gamma, nb_states=nb_states, nb_actions=nb_actions,
data=trajectory, R=R, delta=delta, epsilon=epsilon,
error_kind='hoeffding', episodic=episodic, checks=False)
e_hoeffding = np.nan_to_num(approx_soft_spibb.errors, nan=0, posinf=0)
approx_soft_spibb = ApproxSoftSPIBB(pi_b=pi_b, gamma=gamma, nb_states=nb_states, nb_actions=nb_actions,
data=trajectory, R=R, delta=delta, epsilon=epsilon,
error_kind='mpeb', episodic=episodic, checks=False, g_max=40)
e_mpeb = np.nan_to_num(approx_soft_spibb.errors, nan=0, posinf=0)
print(f'L1 distance (interpreted as long vector instead of matrix) : {np.sum(np.abs(e_hoeffding - e_mpeb))}')
# count_state_action = approx_soft_spibb.count_state_action
print(f'Hi')
|
nilq/baby-python
|
python
|
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, ConvLSTM2D
from keras.layers import Activation, Dropout, Flatten, Dense, LeakyReLU
from keras.layers import LSTM, TimeDistributed, Lambda, BatchNormalization
from keras import optimizers
from keras import backend as K
import tensorflow as tf
from matplotlib import pyplot as plt
from IPython.display import clear_output
img_width, img_height = 4101, 247
train_data_dir = '/training'
validation_data_dir = 'validation'
multiplier = 1
num_classes = 9
nb_train_samples = multiplier*num_classes*70
nb_validation_samples = multiplier*num_classes*20
epochs = 50
batch_size = 10
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
class PlotLearning(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('categorical_accuracy'))
self.val_acc.append(logs.get('val_categorical_accuracy'))
self.i += 1
clear_output(wait=True)
color1 = 'tab:red'
color2 = 'tab:blue'
fig, ax1 = plt.subplots(figsize=(10, 6))
ax1.set_xlabel('Epoch',size=24)
ax1.set_ylabel('Loss',color=color1,size=24)
ax1.plot(self.x, self.losses, label="tr_loss",color=color1,linestyle='dashed')
ax1.plot(self.x, self.val_losses, label="val_loss",color=color1)
ax1.tick_params(axis='x', labelsize = 16)
ax1.tick_params(axis='y_train', labelcolor=color1, labelsize = 14)
ax1.legend(loc='center right',fontsize=16,bbox_to_anchor=(0.4, 1.1),ncol = 2)
ax2 = ax1.twinx()
ax2.set_ylabel('Accuracy',color=color2,size=24)
ax2.plot(self.x, self.acc, label="tr_accuracy",color=color2,linestyle='dashed')
ax2.plot(self.x, self.val_acc, label="val_accuracy",color=color2)
ax2.tick_params(axis='y_train', labelcolor=color2, labelsize = 16)
ax2.legend(loc='center right',fontsize=16, bbox_to_anchor=(1.1, 1.1),ncol = 2)
fig.tight_layout()
plt.show();
plot_losses = PlotLearning()
model = Sequential()
#CNN:
model.add(Conv2D(8, (3, 3), input_shape=input_shape))
model.add(LeakyReLU(alpha=0.01))
model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))
model.add(Dropout(0.5))
model.add(Conv2D(16, (3, 3), padding = 'same'))
model.add(LeakyReLU(alpha=0.01))
model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))
model.add(Dropout(0.5))
model.add(Conv2D(32, (3, 3), padding = 'same'))
model.add(LeakyReLU(alpha=0.01))
model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same'))
model.add(Dropout(0.5))
model.add(Flatten())
#MLP:
model.add(Dense(128))
model.add(LeakyReLU(alpha=0.01))
model.add(Dropout(0.5))
model.add(Dense(16))
model.add(LeakyReLU(alpha=0.01))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
adam = optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8, decay = 1e-6, amsgrad = False)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
model.summary()
train_datagen = ImageDataGenerator(rescale = 1. / 255)
test_datagen = ImageDataGenerator(rescale = 1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir, target_size=(img_width, img_height),
batch_size=batch_size, color_mode='rgb', class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir, target_size=(img_width, img_height),
batch_size=batch_size, color_mode='rgb', class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
callbacks=[plot_losses],
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model_json = model.to_json()
with open("conv_lstm_model.json", "w") as json_file:
json_file.write(model_json)
model.save("predictor.h5")
print("Saved conv_lstm_model to disk")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from nseta.analytics.model import *
from nseta.common.history import historicaldata
from nseta.common.log import tracelog, default_logger
from nseta.plots.plots import *
from nseta.cli.inputs import *
from nseta.archives.archiver import *
import click
from datetime import datetime
__all__ = ['create_cdl_model']
@click.command(help='Create candlestick model.Plot uncovered patterns')
@click.option('--symbol', '-S', help='Security code')
@click.option('--start', '-s', help='Start date in yyyy-mm-dd format')
@click.option('--end', '-e', help='End date in yyyy-mm-dd format')
@click.option('--file', '-o', 'file_name', help='Output file name. Default is {symbol}.csv')
@click.option('--steps/--no-steps', default=False, help='--steps for saving intermediate steps in output file')
@click.option('--clear', '-c', default=False, is_flag=True, help='Clears the cached data for the given options.')
@click.option('--format', '-f', default='csv', type=click.Choice(['csv', 'pkl']),
help='Output format, pkl - to save as Pickel and csv - to save as csv')
@tracelog
def create_cdl_model(symbol, start, end, file_name, steps, clear, format):
if not validate_inputs(start, end, symbol):
print_help_msg(create_cdl_model)
return
sd = datetime.strptime(start, '%Y-%m-%d').date()
ed = datetime.strptime(end, '%Y-%m-%d').date()
try:
if clear:
arch = archiver()
arch.clearcache(response_type=ResponseType.History, force_clear=False)
historyinstance = historicaldata()
df = historyinstance.daily_ohlc_history(symbol, sd, ed, type=ResponseType.History)
df = df.sort_values(by='Date',ascending=True)
df.set_index('Date', inplace=True)
df = model_candlestick(df, steps)
click.echo('\n{}\n'.format(df.to_string(index=False)))
except Exception as e:
default_logger().debug(e, exc_info=True)
click.secho('Failed to create candlestick model', fg='red', nl=True)
return
except SystemExit:
pass
if not file_name:
file_name = symbol + '.' + format
if format == 'csv':
df.to_csv(file_name)
else:
df.to_pickle(file_name)
default_logger().debug('Model saved to: {}'.format(file_name))
default_logger().debug('Candlestick pattern model plot saved to: {}'.format(symbol +'_candles.html'))
click.secho('Model saved to: {}'.format(file_name), fg='green', nl=True)
try:
plot_candlestick(df, symbol, 'Candlestick Pattern Model Recognition for ' + symbol)
click.secho('Candlestick pattern model plot saved to: {}'.format(symbol +'_candles.html'), fg='green', nl=True)
except Exception as e:
default_logger().debug(e, exc_info=True)
click.secho('Failed to plot candlestick pattern for the model', fg='red', nl=True)
return
except SystemExit:
pass
|
nilq/baby-python
|
python
|
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 50007))
s.listen(1)
while True:
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(1024)
if not data:
break
print('data: {}, add: {}'.format(data, addr))
conn.sendall(b'Recieved: ' + data)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# ganben: MontyLemmatiser port of montylingua 2.1`
import re
# think about import
class MontyLemmatiser:
#
# original implt read `.mdf` file as db
# u obviously need a bigger db file
xtag_morph_zhcn_corpus = '' # add a real source
exceptions_source = '' # file or db, see LEMMAEXCEPTION.MDF
regular_any = []
regular_verb = []
regular_noun = []
regular_adj = []
regular_operators = [] # operator/operands concept
# regular can be default option
irregular_re_any = []
irregular_re_verb = []
# irregular can be tf model/db
irregular_nouns += [
]
# additional irregular nouns
def __init__(self):
#
filename_str=[]
self.regular_any,self.regular_verb,self.regular_noun,self.irregular_re_any,self.irregular_re_verbs,self.irregular_re_nouns=map(lambda the_tokenizers:map(lambda the_tokenizer_str:[re.compile('^'+the_tokenizer_str[0].lower()+'$')]+the_tokenizer_str[1:],the_tokenizers),filename_str)
return
def lemmatise_untagged_sentence(self,untagged):
#
def lemmatise_tagged_sentence(self,tagged):
#
def lemmatise_word(self,word,pos=""):
#
def verify_lemmatiser(self):
#
def make_verification_dictionary(self):
#
return
def fix_case(self, word1, word2):
#
return
def _re_match_helper(self, re_kb, word):
#
return
def find_irregular_morph(self,word,pos=""):
a1=self._re_match_helper
groupnames1=self.find_irregular_morph
cron_dictr=a1(self.irregular_re_any,word)
return
|
nilq/baby-python
|
python
|
import logging
from typing import List, Tuple, Dict
import psycopg2
from src.tools.utils import read_config
class Postgres:
def __init__(self, config: Dict = None):
self.connection = None
self.cursor = None
self.connect(config)
def connect(self, config: Dict = None) -> None:
config = config or read_config()
pg_con_params = config.get('postgresql')
assert pg_con_params
self.connection = psycopg2.connect(**pg_con_params)
logging.info("Database connect established")
self.cursor = self.connection.cursor()
def execute(self, req: str) -> None:
self.cursor.execute(req)
def fetch_data(self) -> List[Tuple]:
return self.cursor.fetchall()
def commit(self) -> None:
self.connection.commit()
def exec_file(self, filepath: str):
with open(filepath, 'r') as f:
self.cursor.execute(f.read())
|
nilq/baby-python
|
python
|
import os
from typing import Any, Dict, Literal
import wandb
from wicker.core.config import get_config
from wicker.core.definitions import DatasetID
from wicker.core.storage import S3PathFactory
def version_dataset(
dataset_name: str,
dataset_version: str,
entity: str,
metadata: Dict[str, Any],
dataset_backend: Literal["s3"] = "s3",
) -> None:
"""
Version the dataset on Weights and Biases using the config parameters defined in wickerconfig.json.
Args:
dataset_name: The name of the dataset to be versioned
dataset_version: The version of the dataset to be versioned
entity: Who the run will belong to
metadata: The metadata to be logged as an artifact, enforces dataclass for metadata documentation
dataset_backend: The backend where the dataset is stored, currently only supports s3
"""
# needs to first acquire and set wandb creds
# WANDB_API_KEY, WANDB_BASE_URL
# _set_wandb_credentials()
# needs to init the wandb run, this is going to be a 'data' run
dataset_run = wandb.init(project="dataset_curation", name=f"{dataset_name}_{dataset_version}", entity=entity)
# grab the uri of the dataset to be versioned
dataset_uri = _identify_s3_url_for_dataset_version(dataset_name, dataset_version, dataset_backend)
# establish the artifact and save the dir/s3_url to the artifact
data_artifact = wandb.Artifact(f"{dataset_name}_{dataset_version}", type="dataset")
data_artifact.add_reference(dataset_uri, name="dataset")
# save metadata dict to the artifact
data_artifact.metadata["version"] = dataset_version
data_artifact.metadata["s3_uri"] = dataset_uri
for key, value in metadata.items():
data_artifact.metadata[key] = value
# save the artifact to the run
dataset_run.log_artifact(data_artifact) # type: ignore
dataset_run.finish() # type: ignore
def _set_wandb_credentials() -> None:
"""
Acquire the weights and biases credentials and load them into the environment.
This load the variables into the environment as ENV Variables for WandB to use,
this function overrides the previously set wandb env variables with the ones specified in
the wicker config if they exist.
"""
# load the config
config = get_config()
# if the keys are present in the config add them to the env
wandb_config = config.wandb_config
for field in wandb_config.__dataclass_fields__: # type: ignore
attr = wandb_config.__getattribute__(field)
if attr is not None:
os.environ[str(field).upper()] = attr
else:
if str(field).upper() not in os.environ:
raise EnvironmentError(
f"Cannot use W&B without setting {str(field.upper())}. "
f"Specify in either ENV or through wicker config file."
)
def _identify_s3_url_for_dataset_version(
dataset_name: str,
dataset_version: str,
dataset_backend: Literal["s3"] = "s3",
) -> str:
"""
Identify and return the s3 url for the dataset and version specified in the backend.
Args:
dataset_name: name of the dataset to retrieve url
dataset_version: version of the dataset to retrieve url
dataset_backend: backend of the dataset to retrieve url
Returns:
The url pointing to the dataset on storage
"""
schema_path = ""
if dataset_backend == "s3":
# needs to do the parsing work to fetch the correct s3 uri
schema_path = S3PathFactory().get_dataset_assets_path(DatasetID(name=dataset_name, version=dataset_version))
return schema_path
|
nilq/baby-python
|
python
|
from .munger import * # noqa
from .munger_link_only import * # noqa
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""
Test _extend_kb_with_fixed_labels from core
"""
import pyqms
import sys
import unittest
TESTS = [
# {
# 'in' : {
# 'params' : {
# 'molecules' : ['KLEINERTEST'],
# 'charges' : [2, ],
# 'fixed_labels' : {
# 'R' : ['C(-6) 13C(6) N(-4) 15N(4)']
# },
# }
# },
# 'out' : {
# 'formated_molecule' : ['KLEINER0TEST'],
# }
# },
{
"in": {
"params": {
"molecules": ["KLEINERTEST"],
"charges": [2],
"fixed_labels": {
"R": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"K": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
},
}
},
"out": {
"formated_molecule": sorted(
["K0LEINER0TEST", "K1LEINER0TEST", "K1LEINER1TEST", "K0LEINER1TEST"]
)
},
},
{
"in": {
"params": {
"molecules": ["KLEINERTEST"],
"charges": [2],
"fixed_labels": {
"R": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"K": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
},
"params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]},
}
},
"out": {"formated_molecule": sorted(["K0LEINER0TEST", "K1LEINER1TEST"])},
},
{
"in": {
"params": {
"molecules": ["KLEINERTEST"],
"charges": [2],
"fixed_labels": {
"R": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"K": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"I": ["FOCK"],
},
"params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]},
}
},
"out": {"formated_molecule": sorted(["K0LEI0NER0TEST", "K1LEI0NER1TEST"])},
},
{
"in": {
"params": {
"molecules": ["KLEINERTEST"],
"charges": [2],
"fixed_labels": {
"R": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"K": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"I": ["FOCK", ""],
},
"params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]},
}
},
"out": {
"formated_molecule": sorted(
["K0LEI0NER0TEST", "K1LEI0NER1TEST", "K0LEI1NER0TEST", "K1LEI1NER1TEST"]
)
},
},
{
"in": {
"params": {
"molecules": ["KLEINERTEST"],
"charges": [2],
"fixed_labels": {
"R": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"K": ["", "C(-6) 13C(6) N(-4) 15N(4)"],
"I": ["FOCK", ""],
"L": ["Noo", "Way"],
},
"params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]},
}
},
"out": {
"formated_molecule": sorted(
[
"K0L0EI0NER0TEST",
"K1L0EI0NER1TEST",
"K0L0EI1NER0TEST",
"K1L0EI1NER1TEST",
"K0L1EI0NER0TEST",
"K1L1EI0NER1TEST",
"K0L1EI1NER0TEST",
"K1L1EI1NER1TEST",
]
)
},
},
]
# 2 isotope element (N,nitrogen)
CRASH_TESTS = {
"in": {
"params": {
"molecules": ["KLEINERTEST"],
"charges": [2],
"fixed_labels": {
# non existing aa
"U": ["C(-6) 13C(6) N(-4) 15N(4)"]
},
}
},
"out": {
# 'formated_molecule' : ['KLEINER0TEST'],
},
}
def extend_kb_with_fixed_labels_test():
for test_id, test_dict in enumerate(TESTS):
yield _extend_kb_with_fixed_labels, test_id, test_dict
def _extend_kb_with_fixed_labels(test_id, test_dict):
lib_1 = pyqms.IsotopologueLibrary(**test_dict["in"]["params"])
print(lib_1.lookup["molecule fixed label variations"])
formula_1 = list(lib_1.keys())[0]
# __oOo__
lookup_key = test_dict["in"]["params"]["molecules"][0]
for label_percentile in lib_1[formula_1]["env"].keys():
assert (
sorted(list(lib_1.lookup["molecule fixed label variations"][lookup_key]))
== test_dict["out"]["formated_molecule"]
)
class TestResults(unittest.TestCase):
def setUp(self):
pass
def crash_test(self):
"""
Check if a key error is raised when using a non existent amino acid
"""
with self.assertRaises(SystemExit) as system_exit_check:
pyqms.IsotopologueLibrary(**CRASH_TESTS["in"]["params"])
self.assertEqual(system_exit_check.exception.code, 1)
if __name__ == "__main__":
pass
|
nilq/baby-python
|
python
|
'''
EXERCÍCIO 015: Aluguel de Carros
Escreva um programa que pergunte a quantidade de km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$ 60 por dia e R$ 0,15 por km rodado.
Escreva um programa que pergunte a quantidade de km percorridos por um carro alugado
e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar,
sabendo que o carro custa R$ 60 por dia e R$ 0,15 por km rodado.
'''
def menu_inicial():
print('='*15,'By Portela','='*15,'\n')
print('-'*15,'Aluguel de carros','-'*15)
def valor_pago():
t = (d * 60) + (r * 0.15)
print(f'Total a pagar é de R${t:.2f}.')
def lin():
print('-'*29)
from time import sleep
def fim():
for contagem in range(0,1):
print('Saindo...')
sleep(6)
print('Muito obrigado, volte sempre.')
if __name__=='__main__':
menu_inicial()
n = 'N'
while n == 'N':
lin()
d = float(input('Quantos dias alugados? '))
lin()
lin()
r = float(input('Quantos Km rodados? '))
lin()
valor_pago()
lin()
n = str(input('Deseja sair do programa? ')).upper()
lin()
fim()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: utf8
"""
Day 5: Alchemical Reduction part 2
https://adventofcode.com/2018/day/5
"""
from string import ascii_lowercase
def reactPolymer(polymer):
pats = []
pats += [c + c.upper() for c in ascii_lowercase]
pats += [c.upper() + c for c in ascii_lowercase]
reactedPolymer = polymer
while True:
for pat in pats:
reactedPolymer = reactedPolymer.replace(pat, '')
if polymer == reactedPolymer:
break
else:
polymer = reactedPolymer
return reactedPolymer
def main():
with open('day05input.txt') as f:
line = f.readline()
line = line.strip()
polymers = [line] * 26
for i, c in enumerate(ascii_lowercase):
polymers[i] = polymers[i].replace(c, '').replace(c.upper(), '')
print(min([len(reactPolymer(x)) for x in polymers]))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from pathlib import Path
import os
import random
import json
import itertools
import copy
import torch
from torch.utils.data import Dataset, DataLoader, BatchSampler, RandomSampler, \
SequentialSampler
from torchvision import transforms
import numpy as np
import cv2
import PIL
import scipy.io
import glob
from . import utils
default_data_dir = Path(__file__).resolve().parent.parent / "data"
# Set default paths
if "DReye" not in os.environ:
os.environ["DReye_DATA_DIR"] = str(default_data_dir / "New_DReye")
if "DADA2000_DATA_DIR" not in os.environ:
os.environ["DADA2000_DATA_DIR"] = str(default_data_dir / "DADA")
if "DT16_DATA_DIR" not in os.environ:
os.environ["DT16_DATA_DIR"] = str(default_data_dir / "DT16")
if "BDDA_DATA_DIR" not in os.environ:
os.environ["BDDA_DATA_DIR"] = str(default_data_dir / "BDDA")
config_path = Path(__file__).resolve().parent / "cache"
# os.environ["DADA2000_DATA_DIR"] = "/media/acl/7A4A85A74A85612D/01_Driver_Gaze/TASED_Net_DADA/data"
def get_dataloader(src='DHF1K'):
if src in ('MIT1003',):
return ImgSizeDataLoader
return DataLoader
class ImgSizeBatchSampler:
def __init__(self, dataset, batch_size=1, shuffle=False, drop_last=False):
assert(isinstance(dataset, MIT1003Dataset))
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
out_size_array = [
dataset.size_dict[img_idx]['out_size']
for img_idx in dataset.samples]
self.out_size_set = sorted(list(set(out_size_array)))
self.sample_idx_dict = {
out_size: [] for out_size in self.out_size_set}
for sample_idx, img_idx in enumerate(dataset.samples):
self.sample_idx_dict[dataset.size_dict[img_idx]['out_size']].append(
sample_idx)
self.len = 0
self.n_batches_dict = {}
for out_size, sample_idx_array in self.sample_idx_dict.items():
this_n_batches = len(sample_idx_array) // self.batch_size
self.len += this_n_batches
self.n_batches_dict[out_size] = this_n_batches
def __iter__(self):
batch_array = list(itertools.chain.from_iterable(
[out_size for _ in range(n_batches)]
for out_size, n_batches in self.n_batches_dict.items()))
if not self.shuffle:
random.seed(27)
random.shuffle(batch_array)
this_sample_idx_dict = copy.deepcopy(self.sample_idx_dict)
for sample_idx_array in this_sample_idx_dict.values():
random.shuffle(sample_idx_array)
for out_size in batch_array:
this_indices = this_sample_idx_dict[out_size][:self.batch_size]
del this_sample_idx_dict[out_size][:self.batch_size]
yield this_indices
def __len__(self):
return self.len
class ImgSizeDataLoader(DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, drop_last=False,
**kwargs):
if batch_size == 1:
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
else:
batch_sampler = ImgSizeBatchSampler(
dataset, batch_size=batch_size, shuffle=shuffle,
drop_last=drop_last)
super().__init__(dataset, batch_sampler=batch_sampler, **kwargs)
def get_optimal_out_size(img_size):
ar = img_size[0] / img_size[1]
min_prod = 100
max_prod = 120
ar_array = []
size_array = []
for n1 in range(7, 14):
for n2 in range(7, 14):
if min_prod <= n1 * n2 <= max_prod:
this_ar = n1 / n2
this_ar_ratio = min((ar, this_ar)) / max((ar, this_ar))
ar_array.append(this_ar_ratio)
size_array.append((n1, n2))
max_ar_ratio_idx = np.argmax(np.array(ar_array)).item()
bn_size = size_array[max_ar_ratio_idx]
out_size = tuple(r * 32 for r in bn_size)
return out_size
class FolderVideoDataset(Dataset):
def __init__(self, images_path, frame_modulo=None, source=None):
self.images_path = images_path
self.frame_modulo = frame_modulo or 5
self.preproc_cfg = {
'rgb_mean': (0.485, 0.456, 0.406),
'rgb_std': (0.229, 0.224, 0.225),
}
frame_files = sorted(list(images_path.glob("*")))
frame_files = [file for file in frame_files
if file.suffix in ('.png', '.jpg', '.jpeg')]
self.frame_files = frame_files
self.vid_nr_array = [0]
self.n_images_dict = {0: len(frame_files)}
img = cv2.imread(str(frame_files[0]))
img_size = tuple(img.shape[:2])
self.target_size_dict = {0: img_size}
if source == 'DHF1K' and img_size == (360, 640):
self.out_size = (224, 384)
elif source == 'Hollywood':
self.out_size = (224, 416)
elif source == 'UCFSports':
self.out_size = (256, 384)
else:
self.out_size = get_optimal_out_size(img_size)
def load_frame(self, f_nr):
frame_file = self.frame_files[f_nr - 1]
frame = cv2.imread(str(frame_file))
if frame is None:
raise FileNotFoundError(frame_file)
frame = np.ascontiguousarray(frame[:, :, ::-1])
return frame
def preprocess_sequence(self, frame_seq):
transformations = []
transformations.append(transforms.ToPILImage())
transformations.append(transforms.Resize(
self.out_size, interpolation=PIL.Image.LANCZOS))
transformations.append(transforms.ToTensor())
if 'rgb_mean' in self.preproc_cfg:
transformations.append(
transforms.Normalize(
self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std']))
processing = transforms.Compose(transformations)
tensor = [processing(img) for img in frame_seq]
tensor = torch.stack(tensor)
return tensor
def get_data(self, vid_nr, start):
n_images = self.n_images_dict[vid_nr]
frame_nrs = list(range(start, n_images + 1, self.frame_modulo))
frame_seq = [self.load_frame(f_nr) for f_nr in frame_nrs]
frame_seq = self.preprocess_sequence(frame_seq)
target_size = self.target_size_dict[vid_nr]
return frame_nrs, frame_seq, target_size
def __len__(self):
return len(self.samples)
def __getitem__(self, item):
return self.get_data(item, 0)
class FolderImageDataset(Dataset):
def __init__(self, images_path):
self.images_path = images_path
self.frame_modulo = 1
self.preproc_cfg = {
'rgb_mean': (0.485, 0.456, 0.406),
'rgb_std': (0.229, 0.224, 0.225),
}
image_files = sorted(list(images_path.glob("*")))
image_files = [file for file in image_files
if file.suffix in ('.png', '.jpg', '.jpeg')]
self.image_files = image_files
self.n_images_dict = {
img_idx: 1 for img_idx in range(len(self.image_files))}
self.target_size_dict = {}
self.out_size_dict = {}
for img_idx, file in enumerate(image_files):
img = cv2.imread(str(file))
img_size = tuple(img.shape[:2])
self.target_size_dict[img_idx] = img_size
self.out_size_dict[img_idx] = get_optimal_out_size(img_size)
def load_image(self, img_idx):
image_file = self.image_files[img_idx]
image = cv2.imread(str(image_file))
if image is None:
raise FileNotFoundError(image_file)
image = np.ascontiguousarray(image[:, :, ::-1])
return image
def preprocess(self, img, out_size):
transformations = [
transforms.ToPILImage(),
transforms.Resize(
out_size, interpolation=PIL.Image.LANCZOS),
transforms.ToTensor(),
]
if 'rgb_mean' in self.preproc_cfg:
transformations.append(
transforms.Normalize(
self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std']))
processing = transforms.Compose(transformations)
tensor = processing(img)
return tensor
def get_data(self, img_idx):
file = self.image_files[img_idx]
img = cv2.imread(str(file))
assert (img is not None)
img = np.ascontiguousarray(img[:, :, ::-1])
out_size = self.out_size_dict[img_idx]
img = self.preprocess(img, out_size)
return [1], img, self.target_size_dict[img_idx]
def __len__(self):
return len(self.image_files)
def __getitem__(self, item):
return self.get_data(item, 0)
###
class DReyeDataset(Dataset, utils.KwConfigClass):
img_channels = 1
n_train_val_videos = 405 # 570
test_vid_nrs = (406, 780) #1110
frame_rate = 24 # note video 25fps and modify frame_modulo=4
source = 'DReye'
dynamic = True
def __init__(self,
seq_len=12,
frame_modulo=4,
max_seq_len=1e6,
preproc_cfg=None,
out_size=(224, 384), phase='train', target_size=(360, 640),
debug=False, val_size=27, n_x_val=3, x_val_step=2,
x_val_seed=0, seq_per_vid=1, subset=None, verbose=1,
n_images_file='DReye_n_images.dat', seq_per_vid_val=2,
sal_offset=None):
self.phase = phase
self.train = phase == 'train'
if not self.train:
preproc_cfg = {}
elif preproc_cfg is None:
preproc_cfg = {}
preproc_cfg.update({
'rgb_mean': (0.485, 0.456, 0.406),
'rgb_std': (0.229, 0.224, 0.225),
})
self.preproc_cfg = preproc_cfg
self.out_size = out_size
self.debug = debug
self.val_size = val_size
self.n_x_val = n_x_val
self.x_val_step = x_val_step
self.x_val_seed = x_val_seed
self.seq_len = seq_len
self.seq_per_vid = seq_per_vid
self.seq_per_vid_val = seq_per_vid_val
self.frame_modulo = frame_modulo
self.clip_len = seq_len * frame_modulo
self.subset = subset
self.verbose = verbose
self.n_images_file = n_images_file
self.target_size = target_size
self.sal_offset = sal_offset
self.max_seq_len = max_seq_len
self._dir = None
self._n_images_dict = None
self.vid_nr_array = None
# Evaluation
if phase in ('eval', 'test'):
self.seq_len = int(1e6)
if self.phase in ('test',):
self.vid_nr_array = list(range(
self.test_vid_nrs[0], self.test_vid_nrs[1] + 1))
self.samples, self.target_size_dict = self.prepare_samples()
return
# Cross-validation split
n_videos = self.n_train_val_videos
assert(self.val_size <= n_videos // self.n_x_val)
assert(self.x_val_step < self.n_x_val)
vid_nr_array = np.arange(1, n_videos + 1)
if self.x_val_seed > 0:
np.random.seed(self.x_val_seed)
np.random.shuffle(vid_nr_array)
val_start = (len(vid_nr_array) - self.val_size) //\
(self.n_x_val - 1) * self.x_val_step
vid_nr_array = vid_nr_array.tolist()
if not self.train:
self.vid_nr_array =\
vid_nr_array[val_start:val_start + self.val_size]
else:
del vid_nr_array[val_start:val_start + self.val_size]
self.vid_nr_array = vid_nr_array
if self.subset is not None:
self.vid_nr_array =\
self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)]
self.samples, self.target_size_dict = self.prepare_samples()
@property
def n_images_dict(self):
if self._n_images_dict is None:
with open(config_path.parent / self.n_images_file, 'r') as f:
self._n_images_dict = {
idx + 1: int(line) for idx, line in enumerate(f)
if idx + 1 in self.vid_nr_array}
return self._n_images_dict
@property
def dir(self):
if self._dir is None:
self._dir = Path(os.environ["DReye_DATA_DIR"])
return self._dir
@property
def n_samples(self):
return len(self.vid_nr_array)
def __len__(self):
return len(self.samples)
def prepare_samples(self):
samples = []
too_short = 0
too_long = 0
for vid_nr, n_images in self.n_images_dict.items():
if self.phase in ('eval', 'test'):
samples += [
(vid_nr, offset + 1) for offset in range(self.frame_modulo)]
continue
# 帧数过小多大直接跳过
if n_images < self.clip_len:
too_short += 1
continue
if n_images // self.frame_modulo > self.max_seq_len:
too_long += 1
continue
#
if self.phase == 'train':
samples += [(vid_nr, None)] * self.seq_per_vid
continue
elif self.phase == 'valid':
x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2
start = max(1, x)
end = min(n_images - self.clip_len, n_images - x)
samples += [
(vid_nr, int(start)) for start in
np.linspace(start, end, self.seq_per_vid_val)]
continue
# 打印数据集加载的基本信息
if self.phase not in ('eval', 'test') and self.n_images_dict:
n_loaded = len(self.n_images_dict) - too_short - too_long
print(f"{n_loaded} videos loaded "
f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_short} videos are too short "
f"({too_short / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_long} videos are too long "
f"({too_long / len(self.n_images_dict) * 100:.1f}%)")
target_size_dict = {
vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()}
return samples, target_size_dict
def get_frame_nrs(self, vid_nr, start):
n_images = self.n_images_dict[vid_nr]
if self.phase in ('eval', 'test'):
return list(range(start, n_images + 1, self.frame_modulo))
return list(range(start, start + self.clip_len, self.frame_modulo))
def get_data_file(self, vid_nr, f_nr, dkey):
if dkey == 'frame':
folder = 'images'
elif dkey == 'sal':
folder = 'new_maps'
elif dkey == 'fix':
folder = 'fixation'
else:
raise ValueError(f'Unknown data key {dkey}')
###
img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png')
return img_path
def load_data(self, vid_nr, f_nr, dkey):
read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE
data_file = self.get_data_file(vid_nr, f_nr, dkey)
if read_flag is not None:
data = cv2.imread(str(data_file), read_flag)
else:
data = cv2.imread(str(data_file))
if data is None:
raise FileNotFoundError(data_file)
if dkey == 'frame':
data = np.ascontiguousarray(data[:, :, ::-1])
if dkey == 'sal' and self.train and self.sal_offset is not None:
data += self.sal_offset
data[0, 0] = 0
return data
def preprocess_sequence(self, frame_seq, dkey, vid_nr):
transformations = []
if dkey == 'frame':
transformations.append(transforms.ToPILImage())
transformations.append(transforms.Resize(
self.out_size, interpolation=PIL.Image.LANCZOS))
transformations.append(transforms.ToTensor())
if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg:
transformations.append(
transforms.Normalize(
self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std']))
elif dkey == 'sal':
transformations.append(transforms.Lambda(utils.normalize_tensor))
# elif dkey == 'fix':
# transformations.append(
# transforms.Lambda(lambda fix: torch.gt(fix, 0.5)))
##!
processing = transforms.Compose(transformations)
tensor = [processing(img) for img in frame_seq]
tensor = torch.stack(tensor)
return tensor
def get_seq(self, vid_nr, frame_nrs, dkey):
data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs]
return self.preprocess_sequence(data_seq, dkey, vid_nr)
def get_data(self, vid_nr, start):
if start is None:
max_start = self.n_images_dict[vid_nr] - self.clip_len + 1
if max_start == 1:
start = max_start
else:
start = np.random.randint(1, max_start)
frame_nrs = self.get_frame_nrs(vid_nr, start)
frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame')
target_size = self.target_size_dict[vid_nr]
# if self.phase == 'test' and self.source in ('DReye',):
# return frame_nrs, frame_seq, target_size
sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal')
fix_seq = torch.full(self.target_size, 0, dtype=torch.bool)
# fix used for nss aucj and aucs
# fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix')
# 用 sal_seq替换fix_seq
return frame_nrs, frame_seq, sal_seq, fix_seq, target_size
def __getitem__(self, item):
vid_nr, start = self.samples[item]
data = self.get_data(vid_nr, start)
return data
class DADA2000Dataset(Dataset, utils.KwConfigClass):
img_channels = 1
n_train_val_videos = 797
test_vid_nrs = (798, 1013)
frame_rate = 30
source = 'DADA200'
dynamic = True
def __init__(self,
seq_len=12,
frame_modulo=5,
max_seq_len=1e6,
preproc_cfg=None,
out_size=(224, 538), phase='train', target_size=(224, 538),
debug=False, val_size=100, n_x_val=3, x_val_step=2,
x_val_seed=0, seq_per_vid=1, subset=None, verbose=1,
n_images_file='DADA_n_images.dat', seq_per_vid_val=2,
sal_offset=None):
self.phase = phase
self.train = phase == 'train'
if not self.train:
preproc_cfg = {}
elif preproc_cfg is None:
preproc_cfg = {}
preproc_cfg.update({
'rgb_mean': (0.485, 0.456, 0.406),
'rgb_std': (0.229, 0.224, 0.225),
})
self.preproc_cfg = preproc_cfg
self.out_size = out_size
self.debug = debug
self.val_size = val_size
self.n_x_val = n_x_val
self.x_val_step = x_val_step
self.x_val_seed = x_val_seed
self.seq_len = seq_len
self.seq_per_vid = seq_per_vid
self.seq_per_vid_val = seq_per_vid_val
self.frame_modulo = frame_modulo
self.clip_len = seq_len * frame_modulo
self.subset = subset
self.verbose = verbose
self.n_images_file = n_images_file
self.target_size = target_size
self.sal_offset = sal_offset
self.max_seq_len = max_seq_len
self._dir = None
self._n_images_dict = None
self.vid_nr_array = None
# Evaluation
if phase in ('eval', 'test'):
self.seq_len = int(1e6)
if self.phase in ('test',):
self.vid_nr_array = list(range(
self.test_vid_nrs[0], self.test_vid_nrs[1] + 1))
self.samples, self.target_size_dict = self.prepare_samples()
return
# Cross-validation split
n_videos = self.n_train_val_videos
assert(self.val_size <= n_videos // self.n_x_val)
assert(self.x_val_step < self.n_x_val)
vid_nr_array = np.arange(1, n_videos + 1)
if self.x_val_seed > 0:
np.random.seed(self.x_val_seed)
np.random.shuffle(vid_nr_array)
val_start = (len(vid_nr_array) - self.val_size) //\
(self.n_x_val - 1) * self.x_val_step
vid_nr_array = vid_nr_array.tolist()
if not self.train:
self.vid_nr_array =\
vid_nr_array[val_start:val_start + self.val_size]
else:
del vid_nr_array[val_start:val_start + self.val_size]
self.vid_nr_array = vid_nr_array
if self.subset is not None:
self.vid_nr_array =\
self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)]
self.samples, self.target_size_dict = self.prepare_samples()
@property
def n_images_dict(self):
if self._n_images_dict is None:
with open(config_path.parent / self.n_images_file, 'r') as f:
self._n_images_dict = {
idx + 1: int(line) for idx, line in enumerate(f)
if idx + 1 in self.vid_nr_array}
return self._n_images_dict
@property
def dir(self):
if self._dir is None:
self._dir = Path(os.environ["DADA2000_DATA_DIR"])
return self._dir
@property
def n_samples(self):
return len(self.vid_nr_array)
def __len__(self):
return len(self.samples)
def prepare_samples(self):
samples = []
too_short = 0
too_long = 0
for vid_nr, n_images in self.n_images_dict.items():
if self.phase in ('eval', 'test'):
samples += [
(vid_nr, offset + 1) for offset in range(self.frame_modulo)]
continue
# 帧数过小多大直接跳过
if n_images < self.clip_len:
too_short += 1
continue
if n_images // self.frame_modulo > self.max_seq_len:
too_long += 1
continue
#
if self.phase == 'train':
samples += [(vid_nr, None)] * self.seq_per_vid
continue
elif self.phase == 'valid':
x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2
start = max(1, x)
end = min(n_images - self.clip_len, n_images - x)
samples += [
(vid_nr, int(start)) for start in
np.linspace(start, end, self.seq_per_vid_val)]
continue
# 打印数据集加载的基本信息
if self.phase not in ('eval', 'test') and self.n_images_dict:
n_loaded = len(self.n_images_dict) - too_short - too_long
print(f"{n_loaded} videos loaded "
f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_short} videos are too short "
f"({too_short / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_long} videos are too long "
f"({too_long / len(self.n_images_dict) * 100:.1f}%)")
target_size_dict = {
vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()}
return samples, target_size_dict
def get_frame_nrs(self, vid_nr, start):
n_images = self.n_images_dict[vid_nr]
if self.phase in ('eval', 'test'):
return list(range(start, n_images + 1, self.frame_modulo))
return list(range(start, start + self.clip_len, self.frame_modulo))
def get_data_file(self, vid_nr, f_nr, dkey):
if dkey == 'frame':
folder = 'images'
elif dkey == 'sal':
folder = 'maps'
elif dkey == 'fix':
folder = 'fixation'
else:
raise ValueError(f'Unknown data key {dkey}')
###
img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png')
return img_path
def load_data(self, vid_nr, f_nr, dkey):
read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE
data_file = self.get_data_file(vid_nr, f_nr, dkey)
if read_flag is not None:
data = cv2.imread(str(data_file), read_flag)
else:
data = cv2.imread(str(data_file))
if data is None:
raise FileNotFoundError(data_file)
if dkey == 'frame':
data = np.ascontiguousarray(data[:, :, ::-1])
if dkey == 'sal' and self.train and self.sal_offset is not None:
data += self.sal_offset
data[0, 0] = 0
return data
def preprocess_sequence(self, frame_seq, dkey, vid_nr):
transformations = []
if dkey == 'frame':
transformations.append(transforms.ToPILImage())
transformations.append(transforms.Resize(
self.out_size, interpolation=PIL.Image.LANCZOS))
transformations.append(transforms.ToTensor())
if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg:
transformations.append(
transforms.Normalize(
self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std']))
elif dkey == 'sal':
transformations.append(transforms.ToPILImage())
transformations.append(transforms.Resize(
self.out_size, interpolation=PIL.Image.LANCZOS))
transformations.append(transforms.ToTensor())
transformations.append(transforms.Lambda(utils.normalize_tensor))
# elif dkey == 'fix':
# transformations.append(
# transforms.Lambda(lambda fix: torch.gt(fix, 0.5)))
##!
processing = transforms.Compose(transformations)
tensor = [processing(img) for img in frame_seq]
tensor = torch.stack(tensor)
return tensor
def get_seq(self, vid_nr, frame_nrs, dkey):
data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs]
return self.preprocess_sequence(data_seq, dkey, vid_nr)
def get_data(self, vid_nr, start):
if start is None:
max_start = self.n_images_dict[vid_nr] - self.clip_len + 1
if max_start == 1:
start = max_start
else:
start = np.random.randint(1, max_start)
frame_nrs = self.get_frame_nrs(vid_nr, start)
frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame')
target_size = self.target_size_dict[vid_nr]
# if self.phase == 'test' and self.source in ('DADA2000',):
# return frame_nrs, frame_seq, target_size
sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal')
fix_seq = torch.full(self.target_size, 0, dtype=torch.bool)
# fix used for nss aucj and aucs
# fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix')
# 用 sal_seq替换fix_seq
return frame_nrs, frame_seq, sal_seq, fix_seq, target_size
def __getitem__(self, item):
vid_nr, start = self.samples[item]
data = self.get_data(vid_nr, start)
return data
class DT16Dataset(Dataset, utils.KwConfigClass):
img_channels = 1
n_train_val_videos = 115
test_vid_nrs = (115, 153) #1110
frame_rate = 24
source = 'DT16'
dynamic = True
def __init__(self,
seq_len=12,
frame_modulo=4,
max_seq_len=1e6,
preproc_cfg=None,
out_size=(224, 384), phase='train', target_size=(360, 640),
debug=False, val_size=19, n_x_val=3, x_val_step=2,
x_val_seed=0, seq_per_vid=1, subset=None, verbose=1,
n_images_file='DT16_n_images.dat', seq_per_vid_val=2,
sal_offset=None):
self.phase = phase
self.train = phase == 'train'
if not self.train:
preproc_cfg = {}
elif preproc_cfg is None:
preproc_cfg = {}
preproc_cfg.update({
'rgb_mean': (0.485, 0.456, 0.406),
'rgb_std': (0.229, 0.224, 0.225),
})
self.preproc_cfg = preproc_cfg
self.out_size = out_size
self.debug = debug
self.val_size = val_size
self.n_x_val = n_x_val
self.x_val_step = x_val_step
self.x_val_seed = x_val_seed
self.seq_len = seq_len
self.seq_per_vid = seq_per_vid
self.seq_per_vid_val = seq_per_vid_val
self.frame_modulo = frame_modulo
self.clip_len = seq_len * frame_modulo
self.subset = subset
self.verbose = verbose
self.n_images_file = n_images_file
self.target_size = target_size
self.sal_offset = sal_offset
self.max_seq_len = max_seq_len
self._dir = None
self._n_images_dict = None
self.vid_nr_array = None
# Evaluation
if phase in ('eval', 'test'):
self.seq_len = int(1e6)
if self.phase in ('test',):
self.vid_nr_array = list(range(
self.test_vid_nrs[0], self.test_vid_nrs[1] + 1))
self.samples, self.target_size_dict = self.prepare_samples()
return
# Cross-validation split
n_videos = self.n_train_val_videos
assert(self.val_size <= n_videos // self.n_x_val)
assert(self.x_val_step < self.n_x_val)
vid_nr_array = np.arange(1, n_videos + 1)
if self.x_val_seed > 0:
np.random.seed(self.x_val_seed)
np.random.shuffle(vid_nr_array)
val_start = (len(vid_nr_array) - self.val_size) //\
(self.n_x_val - 1) * self.x_val_step
vid_nr_array = vid_nr_array.tolist()
if not self.train:
self.vid_nr_array =\
vid_nr_array[val_start:val_start + self.val_size]
else:
del vid_nr_array[val_start:val_start + self.val_size]
self.vid_nr_array = vid_nr_array
if self.subset is not None:
self.vid_nr_array =\
self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)]
self.samples, self.target_size_dict = self.prepare_samples()
@property
def n_images_dict(self):
if self._n_images_dict is None:
with open(config_path.parent / self.n_images_file, 'r') as f:
self._n_images_dict = {
idx + 1: int(line) for idx, line in enumerate(f)
if idx + 1 in self.vid_nr_array}
return self._n_images_dict
@property
def dir(self):
if self._dir is None:
self._dir = Path(os.environ["DT16_DATA_DIR"])
return self._dir
@property
def n_samples(self):
return len(self.vid_nr_array)
def __len__(self):
return len(self.samples)
def prepare_samples(self):
samples = []
too_short = 0
too_long = 0
for vid_nr, n_images in self.n_images_dict.items():
if self.phase in ('eval', 'test'):
samples += [
(vid_nr, offset + 1) for offset in range(self.frame_modulo)]
continue
# 帧数过小多大直接跳过
if n_images < self.clip_len:
too_short += 1
continue
if n_images // self.frame_modulo > self.max_seq_len:
too_long += 1
continue
#
if self.phase == 'train':
samples += [(vid_nr, None)] * self.seq_per_vid
continue
elif self.phase == 'valid':
x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2
start = max(1, x)
end = min(n_images - self.clip_len, n_images - x)
samples += [
(vid_nr, int(start)) for start in
np.linspace(start, end, self.seq_per_vid_val)]
continue
# 打印数据集加载的基本信息
if self.phase not in ('eval', 'test') and self.n_images_dict:
n_loaded = len(self.n_images_dict) - too_short - too_long
print(f"{n_loaded} videos loaded "
f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_short} videos are too short "
f"({too_short / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_long} videos are too long "
f"({too_long / len(self.n_images_dict) * 100:.1f}%)")
target_size_dict = {
vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()}
return samples, target_size_dict
def get_frame_nrs(self, vid_nr, start):
n_images = self.n_images_dict[vid_nr]
if self.phase in ('eval', 'test'):
return list(range(start, n_images + 1, self.frame_modulo))
return list(range(start, start + self.clip_len, self.frame_modulo))
def get_data_file(self, vid_nr, f_nr, dkey):
if dkey == 'frame':
folder = 'images'
elif dkey == 'sal':
folder = 'maps'
elif dkey == 'fix':
folder = 'fixation'
else:
raise ValueError(f'Unknown data key {dkey}')
###
img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png')
return img_path
def load_data(self, vid_nr, f_nr, dkey):
read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE
data_file = self.get_data_file(vid_nr, f_nr, dkey)
if read_flag is not None:
data = cv2.imread(str(data_file), read_flag)
else:
data = cv2.imread(str(data_file))
if data is None:
raise FileNotFoundError(data_file)
if dkey == 'frame':
data = np.ascontiguousarray(data[:, :, ::-1])
if dkey == 'sal' and self.train and self.sal_offset is not None:
data += self.sal_offset
data[0, 0] = 0
return data
def preprocess_sequence(self, frame_seq, dkey, vid_nr):
transformations = []
if dkey == 'frame':
transformations.append(transforms.ToPILImage())
transformations.append(transforms.Resize(
self.out_size, interpolation=PIL.Image.LANCZOS))
transformations.append(transforms.ToTensor())
if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg:
transformations.append(
transforms.Normalize(
self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std']))
elif dkey == 'sal':
transformations.append(transforms.Lambda(utils.normalize_tensor))
# elif dkey == 'fix':
# transformations.append(
# transforms.Lambda(lambda fix: torch.gt(fix, 0.5)))
##!
processing = transforms.Compose(transformations)
tensor = [processing(img) for img in frame_seq]
tensor = torch.stack(tensor)
return tensor
def get_seq(self, vid_nr, frame_nrs, dkey):
data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs]
return self.preprocess_sequence(data_seq, dkey, vid_nr)
def get_data(self, vid_nr, start):
if start is None:
max_start = self.n_images_dict[vid_nr] - self.clip_len + 1
if max_start == 1:
start = max_start
else:
start = np.random.randint(1, max_start)
# print('vid_nr:', vid_nr, '\t start:', start)
frame_nrs = self.get_frame_nrs(vid_nr, start)
frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame')
target_size = self.target_size_dict[vid_nr]
# if self.phase == 'test' and self.source in ('DReye',):
# return frame_nrs, frame_seq, target_size
sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal')
fix_seq = torch.full(self.target_size, 0, dtype=torch.bool)
# fix used for nss aucj and aucs
# fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix')
# 用 sal_seq替换fix_seq
return frame_nrs, frame_seq, sal_seq, fix_seq, target_size
def __getitem__(self, item):
vid_nr, start = self.samples[item]
data = self.get_data(vid_nr, start)
return data
class BDDADataset(Dataset, utils.KwConfigClass):
img_channels = 1
n_train_val_videos = 926
test_vid_nrs = (1127, 1429) #1110
frame_rate = 30
source = 'BDDA'
dynamic = True
def __init__(self,
seq_len=12,
frame_modulo=5,
max_seq_len=1e6,
preproc_cfg=None,
out_size=(224, 384), phase='train', target_size=(360, 640),
debug=False, val_size=200, n_x_val=3, x_val_step=2,
x_val_seed=0, seq_per_vid=1, subset=None, verbose=1,
n_images_file='BDDA_n_images.dat', seq_per_vid_val=2,
sal_offset=None):
self.phase = phase
self.train = phase == 'train'
if not self.train:
preproc_cfg = {}
elif preproc_cfg is None:
preproc_cfg = {}
preproc_cfg.update({
'rgb_mean': (0.485, 0.456, 0.406),
'rgb_std': (0.229, 0.224, 0.225),
})
self.preproc_cfg = preproc_cfg
self.out_size = out_size
self.debug = debug
self.val_size = val_size
self.n_x_val = n_x_val
self.x_val_step = x_val_step
self.x_val_seed = x_val_seed
self.seq_len = seq_len
self.seq_per_vid = seq_per_vid
self.seq_per_vid_val = seq_per_vid_val
self.frame_modulo = frame_modulo
self.clip_len = seq_len * frame_modulo
self.subset = subset
self.verbose = verbose
self.n_images_file = n_images_file
self.target_size = target_size
self.sal_offset = sal_offset
self.max_seq_len = max_seq_len
self._dir = None
self._n_images_dict = None
self.vid_nr_array = None
# Evaluation
if phase in ('eval', 'test'):
self.seq_len = int(1e6)
if self.phase in ('test',):
self.vid_nr_array = list(range(
self.test_vid_nrs[0], self.test_vid_nrs[1] + 1))
self.samples, self.target_size_dict = self.prepare_samples()
return
# Cross-validation split
n_videos = self.n_train_val_videos
assert(self.val_size <= n_videos // self.n_x_val)
assert(self.x_val_step < self.n_x_val)
vid_nr_array = np.arange(1, n_videos + 1)
if self.x_val_seed > 0:
np.random.seed(self.x_val_seed)
np.random.shuffle(vid_nr_array)
val_start = (len(vid_nr_array) - self.val_size) //\
(self.n_x_val - 1) * self.x_val_step
vid_nr_array = vid_nr_array.tolist()
if not self.train:
self.vid_nr_array =\
vid_nr_array[val_start:val_start + self.val_size]
else:
del vid_nr_array[val_start:val_start + self.val_size]
self.vid_nr_array = vid_nr_array
if self.subset is not None:
self.vid_nr_array =\
self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)]
self.samples, self.target_size_dict = self.prepare_samples()
@property
def n_images_dict(self):
if self._n_images_dict is None:
with open(config_path.parent / self.n_images_file, 'r') as f:
self._n_images_dict = {
idx + 1: int(line) for idx, line in enumerate(f)
if idx + 1 in self.vid_nr_array}
return self._n_images_dict
@property
def dir(self):
if self._dir is None:
self._dir = Path(os.environ["BDDA_DATA_DIR"])
return self._dir
@property
def n_samples(self):
return len(self.vid_nr_array)
def __len__(self):
return len(self.samples)
def prepare_samples(self):
samples = []
too_short = 0
too_long = 0
for vid_nr, n_images in self.n_images_dict.items():
if self.phase in ('eval', 'test'):
samples += [
(vid_nr, offset + 1) for offset in range(self.frame_modulo)]
continue
# 帧数过小多大直接跳过
if n_images < self.clip_len:
too_short += 1
continue
if n_images // self.frame_modulo > self.max_seq_len:
too_long += 1
continue
#
if self.phase == 'train':
samples += [(vid_nr, None)] * self.seq_per_vid
continue
elif self.phase == 'valid':
x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2
start = max(1, x)
end = min(n_images - self.clip_len, n_images - x)
samples += [
(vid_nr, int(start)) for start in
np.linspace(start, end, self.seq_per_vid_val)]
continue
# 打印数据集加载的基本信息
if self.phase not in ('eval', 'test') and self.n_images_dict:
n_loaded = len(self.n_images_dict) - too_short - too_long
print(f"{n_loaded} videos loaded "
f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_short} videos are too short "
f"({too_short / len(self.n_images_dict) * 100:.1f}%)")
print(f"{too_long} videos are too long "
f"({too_long / len(self.n_images_dict) * 100:.1f}%)")
target_size_dict = {
vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()}
return samples, target_size_dict
def get_frame_nrs(self, vid_nr, start):
n_images = self.n_images_dict[vid_nr]
if self.phase in ('eval', 'test'):
return list(range(start, n_images + 1, self.frame_modulo))
return list(range(start, start + self.clip_len, self.frame_modulo))
def get_data_file(self, vid_nr, f_nr, dkey):
if dkey == 'frame':
folder = 'images'
elif dkey == 'sal':
folder = 'new_maps'
elif dkey == 'fix':
folder = 'fixation'
else:
raise ValueError(f'Unknown data key {dkey}')
###
img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png')
return img_path
def load_data(self, vid_nr, f_nr, dkey):
read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE
data_file = self.get_data_file(vid_nr, f_nr, dkey)
if read_flag is not None:
data = cv2.imread(str(data_file), read_flag)
else:
data = cv2.imread(str(data_file))
if data is None:
raise FileNotFoundError(data_file)
if dkey == 'frame':
data = np.ascontiguousarray(data[:, :, ::-1])
if dkey == 'sal' and self.train and self.sal_offset is not None:
data += self.sal_offset
data[0, 0] = 0
return data
def preprocess_sequence(self, frame_seq, dkey, vid_nr):
transformations = []
if dkey == 'frame':
transformations.append(transforms.ToPILImage())
transformations.append(transforms.Resize(
self.out_size, interpolation=PIL.Image.LANCZOS))
transformations.append(transforms.ToTensor())
if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg:
transformations.append(
transforms.Normalize(
self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std']))
elif dkey == 'sal':
transformations.append(transforms.Lambda(utils.normalize_tensor))
# elif dkey == 'fix':
# transformations.append(
# transforms.Lambda(lambda fix: torch.gt(fix, 0.5)))
##!
processing = transforms.Compose(transformations)
tensor = [processing(img) for img in frame_seq]
tensor = torch.stack(tensor)
return tensor
def get_seq(self, vid_nr, frame_nrs, dkey):
data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs]
return self.preprocess_sequence(data_seq, dkey, vid_nr)
def get_data(self, vid_nr, start):
if start is None:
max_start = self.n_images_dict[vid_nr] - self.clip_len + 1
if max_start == 1:
start = max_start
else:
start = np.random.randint(1, max_start)
frame_nrs = self.get_frame_nrs(vid_nr, start)
frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame')
target_size = self.target_size_dict[vid_nr]
# if self.phase == 'test' and self.source in ('DReye',):
# return frame_nrs, frame_seq, target_size
sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal')
fix_seq = torch.full(self.target_size, 0, dtype=torch.bool)
# fix used for nss aucj and aucs
# fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix')
# 用 sal_seq替换fix_seq
return frame_nrs, frame_seq, sal_seq, fix_seq, target_size
def __getitem__(self, item):
vid_nr, start = self.samples[item]
data = self.get_data(vid_nr, start)
return data
|
nilq/baby-python
|
python
|
from matplotlib import mlab
def SY_PeriodVital(x):
f1 = 1
f2 = 6
z = np.diff(x)
[F, t, p] = signal.spectrogram(z,fs = 60)
f = np.logical_and(F >= f1,F <= f2)
p = p[f]
F = F[f]
Pmean = np.mean(p)
Pmax = np.max(p)
ff = np.argmax(p)
if ff >= len(F):
Pf = np.nan
else:
Pf = F[ff]
Pr = Pmax / Pmean
Pstat = np.log(Pr)
return {'Pstat':Pstat,'Pmax':Pmax,'Pmean':Pmean,'Pf':Pf}
|
nilq/baby-python
|
python
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
import math
from typing import Optional
from typing import Tuple
import paddle
import paddle.nn.functional as F
from paddle import nn
class ResidualBlock(nn.Layer):
"""Residual block module in WaveNet."""
def __init__(
self,
kernel_size: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
aux_channels: int=80,
global_channels: int=-1,
dropout_rate: float=0.0,
dilation: int=1,
bias: bool=True,
scale_residual: bool=False, ):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Number of local conditioning channels.
dropout (float): Dropout probability.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
scale_residual (bool): Whether to scale the residual outputs.
"""
super().__init__()
self.dropout_rate = dropout_rate
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.scale_residual = scale_residual
# check
assert (
kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert gate_channels % 2 == 0
# dilation conv
padding = (kernel_size - 1) // 2 * dilation
self.conv = nn.Conv1D(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias_attr=bias, )
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = nn.Conv1D(
aux_channels, gate_channels, kernel_size=1, bias_attr=False)
else:
self.conv1x1_aux = None
# global conditioning
if global_channels > 0:
self.conv1x1_glo = nn.Conv1D(
global_channels, gate_channels, kernel_size=1, bias_attr=False)
else:
self.conv1x1_glo = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
# NOTE: concat two convs into a single conv for the efficiency
# (integrate res 1x1 + skip 1x1 convs)
self.conv1x1_out = nn.Conv1D(
gate_out_channels,
residual_channels + skip_channels,
kernel_size=1,
bias_attr=bias)
def forward(
self,
x: paddle.Tensor,
x_mask: Optional[paddle.Tensor]=None,
c: Optional[paddle.Tensor]=None,
g: Optional[paddle.Tensor]=None,
) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
x_mask Optional[paddle.Tensor]: Mask tensor (B, 1, T).
c (Optional[Tensor]): Local conditioning tensor (B, aux_channels, T).
g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1).
Returns:
Tensor: Output tensor for residual connection (B, residual_channels, T).
Tensor: Output tensor for skip connection (B, skip_channels, T).
"""
residual = x
x = F.dropout(x, p=self.dropout_rate, training=self.training)
x = self.conv(x)
# split into two part for gated activation
splitdim = 1
xa, xb = paddle.split(x, 2, axis=splitdim)
# local conditioning
if c is not None:
c = self.conv1x1_aux(c)
ca, cb = paddle.split(c, 2, axis=splitdim)
xa, xb = xa + ca, xb + cb
# global conditioning
if g is not None:
g = self.conv1x1_glo(g)
ga, gb = paddle.split(g, 2, axis=splitdim)
xa, xb = xa + ga, xb + gb
x = paddle.tanh(xa) * F.sigmoid(xb)
# residual + skip 1x1 conv
x = self.conv1x1_out(x)
if x_mask is not None:
x = x * x_mask
# split integrated conv results
x, s = paddle.split(
x, [self.residual_channels, self.skip_channels], axis=1)
# for residual connection
x = x + residual
if self.scale_residual:
x = x * math.sqrt(0.5)
return x, s
|
nilq/baby-python
|
python
|
import random
from collections import deque
from mesh.generic.nodeConfig import NodeConfig
from mesh.generic.formationClock import FormationClock
from mesh.generic.nodeState import NodeState, LinkStatus
from mesh.generic.cmdDict import CmdDict
class NodeParams():
def __init__(self, configFile=[], config=[]):
if configFile:
self.config = NodeConfig(configFile)
elif config:
self.config = config
# Configuration update holder
self.newConfig = None
# Mesh status
self.restartTime = None
self.restartRequested = False
self.restartConfirmed = False
self.setupParams()
def setupParams(self):
self.configConfirmed = False
#self.commStartTime = None
#self.cmdRelayBuffer = []
self.cmdHistory = deque(maxlen=100) # FIFO list of last commands received
self.cmdResponse = dict()
# Initialize node status
self.initNodeStatus()
# Formation clock
self.clock = FormationClock()
def initNodeStatus(self):
# Node status
self.nodeStatus = [NodeState(node+1) for node in range(self.config.maxNumNodes)]
# Comm link status
self.linkStatus = [[LinkStatus.NoLink for i in range(self.config.maxNumNodes)] for j in range(self.config.maxNumNodes)]
def get_cmdCounter(self):
#if self.commStartTime: # time-based counter
# return int((self.clock.getTime() - self.commStartTime)*1000)
#else: # random counter
cmdCounter = random.randint(1, 65536)
# Add counter value to history
self.cmdHistory.append(cmdCounter)
return cmdCounter
def loadConfig(self, newConfig, hashValue):
'''Verify and queue new configuration for loading.'''
# Convert from protobuf to json
jsonConfig = NodeConfig.fromProtoBuf(newConfig)
jsonConfig['node']['nodeId'] = self.config.nodeId # Don't overwrite node id via update
# Create, verify, and store new configuration
newConfig = NodeConfig(configData=jsonConfig)
if (newConfig.calculateHash() == hashValue and newConfig.loadSuccess): # configuration verified
#self.newConfig = newConfig
return [True, newConfig]
else:
#self.newConfig = None
return [False, None]
def updateConfig(self):
retValue = False
if (self.newConfig and self.newConfig.loadSuccess): # load pending configuration update
print("Node " + str(self.config.nodeId) + ": Updating to new configuration")
self.config = self.newConfig
retValue = True
self.newConfig = None
return retValue
def updateStatus(self):
"""Update status information."""
self.nodeStatus[self.config.nodeId-1].status = 0
if (self.configConfirmed == True):
self.nodeStatus[self.config.nodeId-1].status += 64 # bit 6
def checkNodeLinks(self):
"""Checks status of links to other nodes."""
thisNode = self.config.nodeId - 1
for i in range(self.config.maxNumNodes):
# Check for direct link
if (self.nodeStatus[i].present and (self.clock.getTime() - self.nodeStatus[i].lastMsgRcvdTime) < self.config.commConfig['linkTimeout']):
self.linkStatus[thisNode][i] = LinkStatus.GoodLink
# Check for indirect link
elif (self.nodeStatus[i].updating == True): # state data is updating, so at least an indirect link
self.linkStatus[thisNode][i] = LinkStatus.IndirectLink
else: # no link
self.linkStatus[thisNode][i] = LinkStatus.NoLink
def addCmdResponse(self, cmdCounter, cmdResponse, sourceId):
if (cmdCounter in self.cmdResponse): # update existing responses
self.cmdResponse[cmdCounter][sourceId] = cmdResponse
else: # add new command response
self.cmdResponse[cmdCounter] = dict()
self.cmdResponse[cmdCounter][sourceId] = cmdResponse
|
nilq/baby-python
|
python
|
"""
Reference :
https://github.com/mattalcock/blog/blob/master/2012/12/5/python-spell-checker.rst
"""
import re
import collections
class SpellCorrect:
def __init__(self,
text=None,
files=[],
initialize=True):
self.NWORDS = collections.defaultdict(lambda: 1)
self.alphabet = 'abcdefghijklmnopqrstuvwxyz'
if initialize:
self.initialize(text, files)
def initialize(self, text, files):
for f in files:
self.train(self.words(open(f, encoding='utf-8').read()))
if isinstance(text, str):
self.train(self.words(text))
def words(self, text):
return re.findall('[a-z0-9]+', text.lower())
def train(self, features):
for f in features:
self.NWORDS[f] += 1
def edits1(self, word):
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b) > 1]
replaces = [a + c + b[1:] for a, b in s for c in self.alphabet if b]
inserts = [a + c + b for a, b in s for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2
for e1 in self.edits1(word)
for e2 in self.edits1(e1)
if e2 in self.NWORDS)
def known(self, words):
return set(w for w in words
if w in self.NWORDS)
def correct(self, word):
candidates = self.known([word]) or\
self.known(self.edits1(word)) or\
self.known_edits2(word) or\
[word]
return max(candidates, key=self.NWORDS.get)
def sentence_correct(self, sentence, joined=True, ignore_case=True):
if ignore_case:
sentence = sentence.lower()
if joined:
sentence = sentence.split()
sent = [word.lower()
if word.isupper()
else self.correct(word.lower())
for word in sentence]
return " ".join(sent)
|
nilq/baby-python
|
python
|
ERROR_CODES = {
0: "EFW_SUCCESS",# = 0,
1: "EFW_ERROR_INVALID_INDEX",#,
3: "EFW_ERROR_INVALID_ID",#,
4: "EFW_ERROR_INVALID_VALUE",#,
5: "EFW_ERROR_REMOVED",#, //failed to find the filter wheel, maybe the filter wheel has been removed
6: "EFW_ERROR_MOVING",#,//filter wheel is moving
7: "EFW_ERROR_ERROR_STATE",#,//filter wheel is in error state
8: "EFW_ERROR_GENERAL_ERROR",#,//other error
9: "EFW_ERROR_NOT_SUPPORTED",#,
10: "EFW_ERROR_CLOSED",#,
-1: "EFW_ERROR_END",# = -1
}
class EFWError(IOError):
def __init__(self, errno):
self.errno = errno
def __str__(self):
return f'EFWError {self.errno}: {ERROR_CODES[self.errno]}'
@classmethod
def from_errno(cls, errno):
return cls(errno)
def check_error(errno):
if errno != 0:
raise EFWError.from_errno(errno)
|
nilq/baby-python
|
python
|
from django.http import Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from seaserv import get_repo, is_passwd_set
from winguhub.utils import check_and_get_org_by_repo, check_and_get_org_by_group
def sys_staff_required(func):
"""
Decorator for views that checks the user is system staff.
"""
def _decorated(request, *args, **kwargs):
if request.user.is_staff:
return func(request, *args, **kwargs)
raise Http404
return _decorated
# def ctx_switch_required(func):
# """
# Decorator for views to change navigation bar automatically that render
# same template when both in org context and personal context.
# """
# def _decorated(request, *args, **kwargs):
# if not request.cloud_mode:
# # no need to switch context when `CLOUD_MODE` is false
# request.user.org = None
# request.base_template = 'myhome_base.html'
# return func(request, *args, **kwargs)
# repo_id = kwargs.get('repo_id', '')
# group_id = kwargs.get('group_id', '')
# if repo_id and group_id:
# return func(request, *args, **kwargs)
# if not repo_id and not group_id:
# return func(request, *args, **kwargs)
# user = request.user.username
# if repo_id:
# org, base_template = check_and_get_org_by_repo(repo_id, user)
# if group_id:
# org, base_template = check_and_get_org_by_group(int(group_id), user)
# if org:
# request.user.org = org._dict
# else:
# request.user.org = None
# request.base_template = base_template
# return func(request, *args, **kwargs)
# return _decorated
def repo_passwd_set_required(func):
"""
Decorator for views to redirect user to repo decryption page if repo is
encrypt and password is not set by user.
"""
def _decorated(request, *args, **kwargs):
repo_id = kwargs.get('repo_id', None)
if not repo_id:
raise Exception, 'Repo id is not found in url.'
repo = get_repo(repo_id)
if not repo:
raise Http404
username = request.user.username
if repo.encrypted and not is_passwd_set(repo_id, username):
# Redirect uesr to decrypt repo page.
return render_to_response('decrypt_repo_form.html', {
'repo': repo,
'next': request.get_full_path(),
}, context_instance=RequestContext(request))
return func(request, *args, **kwargs)
return _decorated
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
@dataclass
class CheckpointCallback:
_target_: str = "pytorch_lightning.callbacks.ModelCheckpoint"
monitor: str = "loss/Validation"
save_top_k: int = 1
save_last: bool = True
mode: str = "min"
verbose: bool = False
dirpath: str = "./logs/checkpoints/" # use relative path, so it can be adjusted by hydra
filename: str = "{epoch:02d}"
@dataclass
class GPUMonitur:
_target_: str = "pytorch_lightning.callbacks.DeviceStatsMonitor"
@dataclass
class EarlyStoppingCallback:
_target_: str = "pytorch_lightning.callbacks.EarlyStopping"
monitor: str = "Accuracy/Validation"
min_delta: float = 0.00
patience: int = 20
verbose: bool = True
mode: str = "max"
@dataclass
class LRMonitor:
_target_: str = "pytorch_lightning.callbacks.lr_monitor.LearningRateMonitor"
logging_interval: str = "step"
|
nilq/baby-python
|
python
|
import redis
from twisted.python import log
def open_redis(config):
global redis_pool, redis_info
host = config.get("redis", "host")
port = int(config.get("redis", "port"))
socket = config.get("redis", "socket")
redis_info = ( host, port, socket )
if socket != "":
redis_pool = redis.ConnectionPool(
connection_class = redis.connection.UnixDomainSocketConnection,
path = socket
)
else:
redis_pool = redis.ConnectionPool(
host = host,
port = port,
db = 0
)
def get_redis():
global redis_pool
return redis.StrictRedis(connection_pool = redis_pool)
def get_redis_pubsub():
global redis_info
host, port, socket = redis_info
if socket != "":
conn = redis.StrictRedis(
connection_class = redis.connection.UnixDomainSocketConnection,
path = socket
)
else:
conn = redis.StrictRedis(
host = host,
port = port,
db = 0
)
return conn.pubsub()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Quiz."""
# Python imports
import base64
import logging
import md5
import operator
import os
import re
import time
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import memcache
class QuizBaseModel(db.Model):
"""Base class for quiz models."""
class QuizTrunkModel(QuizBaseModel):
"""Maintains trunk for quiz model.
Attributes:
head: Maintians the head of a quiz.
"""
head = db.StringProperty()
class QuizRevisionModel(QuizBaseModel):
"""Maintains list of revisions for a quiz.
Quiz trunk associated with the revision is made parent of the model.
Attributes:
quiz_id: Id (key) for particular version of the quiz.
time_stamp: Time_stamp for a new revision.
commit_message: Commit message associated with new version.
"""
quiz_id = db.StringProperty()
time_stamp = db.DateTimeProperty(auto_now=True)
commit_message = db.StringProperty(default='Commiting a new version')
class QuizPropertyModel(QuizBaseModel):
"""Defines various properties for a quiz.
Attributes:
shuffle_questions: If set questions are presented in random order.
min_options: minimum number of options to be presented.
max_options: maximum number of options to be presented.
min_questions: minimum number of questions required to complete the quiz.
Used to track the progress.
repeat_questions: If set questions are repeated.
repeat_wrongly_answered_questions: If set wrongly answered questions are
repeated.
"""
shuffle_questions = db.BooleanProperty(default=True)
min_options = db.IntegerProperty(default=2)
max_options = db.IntegerProperty(default=10) # 0 implies all
min_questions = db.IntegerProperty(default=0) # 0 implies all
repeat_questions = db.BooleanProperty(default=False)
repeat_wrongly_answered_questions = db.BooleanProperty(default=False)
class QuizModel(QuizBaseModel):
"""Represents a quiz.
Attributes:
difficulty_level: Difficulty level for the quiz (range 0-10).
quiz_property: Reference to property asscociated with quiz.
title: Title of the quiz.
tags: Associated tags with quiz.
trunk: Reference to asscociated trunk with the quiz.
introduction: Introduction text to be shown on the start page for quiz.
"""
# implicit id
difficulty_level = db.RatingProperty(default=5)
quiz_property = db.ReferenceProperty(QuizPropertyModel)
title = db.StringProperty()
tags = db.ListProperty(db.Category)
trunk = db.ReferenceProperty(QuizTrunkModel)
introduction = db.StringProperty()
class ChoiceModel(QuizBaseModel):
"""Represents a choice/option provided to user for a question model.
Attributes:
body: Body of the choice.
message: Message to be displayed when choice is selected.
May act like a hint.
is_correct: If the choice selected is correct.
"""
# implicit id
body = db.TextProperty()
message = db.StringProperty()
is_correct = db.BooleanProperty(default=False)
def dump_to_dict(self):
"""Dumps choice to a dictionary for passing around as JSON object."""
data_dict = {'body': self.body,
'id': str(self.key())}
return data_dict
class QuestionModel(QuizBaseModel):
"""Represents a question.
Attributes:
body: Text asscociated with quiz.
choices: List of possible choices.
shuffle_choices: If set choices are randomly shuffled.
hints: Ordered list of progressive hints
"""
# implicit id
body = db.TextProperty()
choices = db.ListProperty(db.Key)
shuffle_choices = db.BooleanProperty(default=True)
hints = db.StringListProperty()
def dump_to_dict(self):
"""Dumps the question model to a dictionary for passing
around as JSON object."""
data_dict = {'id': str(self.key()),
'body': self.body,
'hints': self.hints,
'choices': [db.get(el).dump_to_dict() for el in self.choices]
}
if self.shuffle_choices and data_dict['choices']:
data_dict['choices'] = random.shuffle(data_dict['choices'])
return data_dict
class QuizQuestionListModel(QuizBaseModel):
"""Maintains a list of question with its quiz id.
This is necessary because questions may be shared between different quizes.
Attributes:
quiz: Reference to quiz object.
question: Reference to question object asscociated with quiz.
time_stamp: Time stamp.
"""
quiz = db.ReferenceProperty(QuizModel)
question = db.ReferenceProperty(QuestionModel)
time_stamp = db.DateTimeProperty(auto_now_add=True)
class ResponseModel(QuizBaseModel):
"""Stores response data required for producing next question.
Attributes:
session_id: Session Identifier.
answered_correctly: Set if the response resulted in correct answer.
question: Reference to question being answered.
quiz: Reference to associated quiz.
quiz_trunk: Reference to associated quiz trunk.
time_stamp: Time stamp of the response
attempts: Number of attempts so far, useful for scoring.
"""
session_id = db.StringProperty(required=True)
answered_correctly = db.BooleanProperty(db.Key)
question = db.ReferenceProperty(QuestionModel)
quiz = db.ReferenceProperty(QuizModel)
quiz_trunk = db.ReferenceProperty(QuizTrunkModel)
time_stamp = db.DateTimeProperty(auto_now=True)
attempts = db.IntegerProperty(default=0)
class QuizScoreModel(QuizBaseModel):
"""Stores progress status associated with a quiz and session.
Both score and progress are out of 100.
Attributes:
session_id: Session Identifier.
quiz: Reference to associated quiz.
quiz_trunk: Reference to associated quiz trunk.
score: Current score.
progress: Current progress status
questions_attempted: Number of questions attempted so far.
"""
quiz_trunk = db.ReferenceProperty(QuizTrunkModel)
session_id = db.StringProperty(required=True)
quiz = db.ReferenceProperty(QuizModel)
score = db.FloatProperty(default=0.0)
progress = db.FloatProperty(default=0.0)
questions_attempted = db.IntegerProperty(default=0)
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard set of plugins."""
import base64
import datetime
import os
import sys
import netaddr
from oslo_config import cfg
from oslo_utils import units
import six
from ironic_inspector.common.i18n import _, _LC, _LE, _LI, _LW
from ironic_inspector import conf
from ironic_inspector.plugins import base
from ironic_inspector import utils
CONF = cfg.CONF
LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard')
class RootDiskSelectionHook(base.ProcessingHook):
"""Smarter root disk selection using Ironic root device hints.
This hook must always go before SchedulerHook, otherwise root_disk field
might not be updated.
"""
def before_update(self, introspection_data, node_info, **kwargs):
"""Detect root disk from root device hints and IPA inventory."""
hints = node_info.node().properties.get('root_device')
if not hints:
LOG.debug('Root device hints are not provided',
node_info=node_info, data=introspection_data)
return
inventory = introspection_data.get('inventory')
if not inventory:
raise utils.Error(
_('Root device selection requires ironic-python-agent '
'as an inspection ramdisk'),
node_info=node_info, data=introspection_data)
disks = inventory.get('disks', [])
if not disks:
raise utils.Error(_('No disks found'),
node_info=node_info, data=introspection_data)
for disk in disks:
properties = disk.copy()
# Root device hints are in GiB, data from IPA is in bytes
properties['size'] //= units.Gi
for name, value in hints.items():
actual = properties.get(name)
if actual != value:
LOG.debug('Disk %(disk)s does not satisfy hint '
'%(name)s=%(value)s, actual value is %(actual)s',
{'disk': disk.get('name'), 'name': name,
'value': value, 'actual': actual},
node_info=node_info, data=introspection_data)
break
else:
LOG.debug('Disk %(disk)s of size %(size)s satisfies '
'root device hints',
{'disk': disk.get('name'), 'size': disk['size']},
node_info=node_info, data=introspection_data)
introspection_data['root_disk'] = disk
return
raise utils.Error(_('No disks satisfied root device hints'),
node_info=node_info, data=introspection_data)
class SchedulerHook(base.ProcessingHook):
"""Nova scheduler required properties."""
KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb')
def before_update(self, introspection_data, node_info, **kwargs):
"""Update node with scheduler properties."""
inventory = introspection_data.get('inventory')
errors = []
root_disk = introspection_data.get('root_disk')
if root_disk:
introspection_data['local_gb'] = root_disk['size'] // units.Gi
if CONF.processing.disk_partitioning_spacing:
introspection_data['local_gb'] -= 1
elif inventory:
errors.append(_('root disk is not supplied by the ramdisk and '
'root_disk_selection hook is not enabled'))
if inventory:
try:
introspection_data['cpus'] = int(inventory['cpu']['count'])
introspection_data['cpu_arch'] = six.text_type(
inventory['cpu']['architecture'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing CPU information: %s') %
inventory.get('cpu'))
try:
introspection_data['memory_mb'] = int(
inventory['memory']['physical_mb'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing memory information: %s; '
'introspection requires physical memory size '
'from dmidecode') %
inventory.get('memory'))
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'),
node_info=node_info, data=introspection_data)
missing = [key for key in self.KEYS
if not introspection_data.get(key)]
if missing:
raise utils.Error(
_('The following required parameters are missing: %s') %
missing,
node_info=node_info, data=introspection_data)
if errors:
raise utils.Error(_('The following problems encountered: %s') %
'; '.join(errors),
node_info=node_info, data=introspection_data)
LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, '
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
{key: introspection_data.get(key) for key in self.KEYS},
node_info=node_info, data=introspection_data)
overwrite = CONF.processing.overwrite_existing
properties = {key: str(introspection_data[key])
for key in self.KEYS if overwrite or
not node_info.node().properties.get(key)}
node_info.update_properties(**properties)
class ValidateInterfacesHook(base.ProcessingHook):
"""Hook to validate network interfaces."""
def __init__(self):
if CONF.processing.add_ports not in conf.VALID_ADD_PORTS_VALUES:
LOG.critical(_LC('Accepted values for [processing]add_ports are '
'%(valid)s, got %(actual)s'),
{'valid': conf.VALID_ADD_PORTS_VALUES,
'actual': CONF.processing.add_ports})
sys.exit(1)
if CONF.processing.keep_ports not in conf.VALID_KEEP_PORTS_VALUES:
LOG.critical(_LC('Accepted values for [processing]keep_ports are '
'%(valid)s, got %(actual)s'),
{'valid': conf.VALID_KEEP_PORTS_VALUES,
'actual': CONF.processing.keep_ports})
sys.exit(1)
def _get_interfaces(self, data=None):
"""Convert inventory to a dict with interfaces.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
result = {}
inventory = data.get('inventory', {})
if inventory:
for iface in inventory.get('interfaces', ()):
name = iface.get('name')
mac = iface.get('mac_address')
ip = iface.get('ipv4_address')
if not name:
LOG.error(_LE('Malformed interface record: %s'),
iface, data=data)
continue
LOG.debug('Found interface %(name)s with MAC "%(mac)s" and '
'IP address "%(ip)s"',
{'name': name, 'mac': mac, 'ip': ip}, data=data)
result[name] = {'ip': ip, 'mac': mac}
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'), data=data)
result = data.get('interfaces')
return result
def _validate_interfaces(self, interfaces, data=None):
"""Validate interfaces on correctness and suitability.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
if not interfaces:
raise utils.Error(_('No interfaces supplied by the ramdisk'),
data=data)
pxe_mac = utils.get_pxe_mac(data)
if not pxe_mac and CONF.processing.add_ports == 'pxe':
LOG.warning(_LW('No boot interface provided in the introspection '
'data, will add all ports with IP addresses'))
result = {}
for name, iface in interfaces.items():
mac = iface.get('mac')
ip = iface.get('ip')
if not mac:
LOG.debug('Skipping interface %s without link information',
name, data=data)
continue
if not utils.is_valid_mac(mac):
LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not '
'valid, skipping'),
{'mac': mac, 'name': name},
data=data)
continue
mac = mac.lower()
if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()):
LOG.debug('Skipping local interface %s', name, data=data)
continue
if (CONF.processing.add_ports == 'pxe' and pxe_mac
and mac != pxe_mac):
LOG.debug('Skipping interface %s as it was not PXE booting',
name, data=data)
continue
elif CONF.processing.add_ports != 'all' and not ip:
LOG.debug('Skipping interface %s as it did not have '
'an IP address assigned during the ramdisk run',
name, data=data)
continue
result[name] = {'ip': ip, 'mac': mac.lower()}
if not result:
raise utils.Error(_('No suitable interfaces found in %s') %
interfaces, data=data)
return result
def before_processing(self, introspection_data, **kwargs):
"""Validate information about network interfaces."""
bmc_address = utils.get_ipmi_address_from_data(introspection_data)
if bmc_address:
introspection_data['ipmi_address'] = bmc_address
else:
LOG.debug('No BMC address provided in introspection data, '
'assuming virtual environment', data=introspection_data)
all_interfaces = self._get_interfaces(introspection_data)
interfaces = self._validate_interfaces(all_interfaces,
introspection_data)
LOG.info(_LI('Using network interface(s): %s'),
', '.join('%s %s' % (name, items)
for (name, items) in interfaces.items()),
data=introspection_data)
introspection_data['all_interfaces'] = all_interfaces
introspection_data['interfaces'] = interfaces
valid_macs = [iface['mac'] for iface in interfaces.values()]
introspection_data['macs'] = valid_macs
def before_update(self, introspection_data, node_info, **kwargs):
"""Drop ports that are not present in the data."""
if CONF.processing.keep_ports == 'present':
expected_macs = {
iface['mac']
for iface in introspection_data['all_interfaces'].values()
}
elif CONF.processing.keep_ports == 'added':
expected_macs = set(introspection_data['macs'])
else:
return
# list is required as we modify underlying dict
for port in list(node_info.ports().values()):
if port.address not in expected_macs:
LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is "
"not in expected MAC list %(expected)s"),
{'port': port.uuid,
'mac': port.address,
'expected': list(sorted(expected_macs))},
node_info=node_info, data=introspection_data)
node_info.delete_port(port)
class RamdiskErrorHook(base.ProcessingHook):
"""Hook to process error send from the ramdisk."""
DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f'
def before_processing(self, introspection_data, **kwargs):
error = introspection_data.get('error')
logs = introspection_data.get('logs')
if error or CONF.processing.always_store_ramdisk_logs:
if logs:
self._store_logs(logs, introspection_data)
else:
LOG.debug('No logs received from the ramdisk',
data=introspection_data)
if error:
raise utils.Error(_('Ramdisk reported error: %s') % error,
data=introspection_data)
def _store_logs(self, logs, introspection_data):
if not CONF.processing.ramdisk_logs_dir:
LOG.warning(
_LW('Failed to store logs received from the ramdisk '
'because ramdisk_logs_dir configuration option '
'is not set'),
data=introspection_data)
return
if not os.path.exists(CONF.processing.ramdisk_logs_dir):
os.makedirs(CONF.processing.ramdisk_logs_dir)
time_fmt = datetime.datetime.utcnow().strftime(self.DATETIME_FORMAT)
bmc_address = introspection_data.get('ipmi_address', 'unknown')
file_name = 'bmc_%s_%s' % (bmc_address, time_fmt)
with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name),
'wb') as fp:
fp.write(base64.b64decode(logs))
LOG.info(_LI('Ramdisk logs stored in file %s'), file_name,
data=introspection_data)
|
nilq/baby-python
|
python
|
def content_length_check(content, allow_short=False):
maxlen = 40000
if len(content)>maxlen:
raise Exception('content too long {}/{}'.format(len(content), maxlen))
if (len(content)<2 and allow_short==False) or len(content)==0:
raise Exception('content too short')
def title_length_check(title):
if len(title)>140:
raise Exception('title too long')
if len(title)<2:
raise Exception('title too short')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 14 17:36:13 2019
@author: Mangifera
"""
import seaborn as sns
import pandas as pd
from scipy import stats
def is_it_random(filename):
with open(filename, "r") as text_file:
demon = text_file.read()
demon = [int(x) for x in demon.split('\n')]
occurrence = {}
for i in demon:
if i in occurrence:
occurrence[i] += 1
else:
occurrence[i] = 1
return occurrence
def make_df(filename_ctrl, filename_sample):
# occurrence_ctrl = is_it_random(filename_ctrl)
# occurrences_ctrl = pd.DataFrame.from_dict(occurrence_ctrl, orient = "index", columns=['rolls_ctrl'])
# occurrences_ctrl = occurrences_ctrl.reset_index()
# occurrences_ctrl = occurrences_ctrl.rename(index=str, columns={"index": "die_side"})
occurrence_samp = is_it_random(filename_sample)
occurrences_samp = pd.DataFrame.from_dict(occurrence_samp, orient = "index", columns=['rolls_samp'])
occurrences_samp = occurrences_samp.reset_index()
occurrences_samp = occurrences_samp.rename(index=str, columns={"index": "die_side"})
# occurrences = pd.merge(occurrences_ctrl, occurrences_samp, on='die_side')
max_die_no = max(occurrences_samp['die_side'])
total_rolls = sum(occurrence_samp.values())
uniform_prediction = total_rolls/max_die_no
occurrences = occurrences_samp.set_index("die_side")
occurrences['uniform_dist'] = pd.Series(uniform_prediction, index=occurrences.index)
sns.set(style="whitegrid")
ax = sns.barplot(x=occurrences.index, y="rolls_samp", data=occurrences)
chi2 = stats.chi2_contingency(occurrences)
chi_square_stat = chi2[0]
p_value = chi2[1]
degrees_of_freedom = chi2[2]
print (f"chi_square_stat: {chi_square_stat}, p-value: {p_value}, degrees_of_freedom: {degrees_of_freedom}")
filename_sample = "actual_data_yeenoghu.txt"
filename_ctrl = "yeenoghu_my_pc.txt"
z = make_df(filename_ctrl, filename_sample)
|
nilq/baby-python
|
python
|
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
from sqlalchemy import func, desc
from matplotlib.ticker import NullFormatter
import matplotlib.dates as mdates
from datetime import datetime, timedelta
import seaborn as sns
from flask import Flask, jsonify
import datetime as dt
engine = create_engine("sqlite:///belly_button_biodiversity.sqlite", echo=False)
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Otu = Base.classes.otu
Samples = Base.classes.samples
Samples_MD = Base.classes.samples_metadata
session = Session(engine)
def get_sample_names():
samples_query = session.query(Samples)
samples_df = pd.read_sql(samples_query.statement, samples_query.session.bind)
return list(samples_df.columns[1:])
def otu_descriptions():
otu_query = session.query(Otu)
otu_df = pd.read_sql(otu_query.statement, otu_query.session.bind)
return list(otu_df['lowest_taxonomic_unit_found'].values)
|
nilq/baby-python
|
python
|
import sys
import socket
import threading
class Server:
def __init__(self, hostname='localhost', port=8080):
self.host = hostname
self.port = port
self.clients = []
# crea un socket TCP
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
# asigna al socket la direccion y puerto del server
self.socket.bind((self.host, self.port))
# espera por la conexion de los clientes
self.socket.listen(10)
# desbloquea el socket
self.socket.setblocking(False)
# crea los hilos para aceptar y procesar las conexiones
self.create_threads()
# hilo principal
while True:
message = input('=> ')
if message == 'exit':
# cerrar la conexion
self.socket.close()
sys.exit()
def create_threads(self):
'''
Crea los hilos para aceptar y procesar las conexiones.
'''
accept_connection_thread = threading.Thread(target=self.accept_connection)
process_connection_thread = threading.Thread(target=self.process_connection)
accept_connection_thread.daemon = True
accept_connection_thread.start()
process_connection_thread.daemon = True
process_connection_thread.start()
def message_to_all(self, message, client):
'''
Permite enviar los mensajes a todos
los clientes conectados.
'''
for _client in self.clients:
try:
if _client != client:
_client.send(message)
except:
self.clients.remove(_client)
def accept_connection(self):
'''
Acepta las conexiones de los clientes y las almacena.
'''
while True:
try:
connection, address = self.socket.accept()
connection.setblocking(False)
self.clients.append(connection)
except:
pass
def process_connection(self):
'''
Recorre la lista de clientes para
saber cuando recibe un mensaje.
'''
while True:
if len(self.clients) > 0:
for client in self.clients:
try:
data = client.recv(1024)
if data:
self.message_to_all(data, client)
except:
pass
def main():
if len(sys.argv) == 3:
hostname = str(sys.argv[1])
port = int(sys.argv[2])
server = Server(hostname, port)
elif len(sys.argv) == 1:
server = Server()
else:
print('Debe ingresar direccion y puerto del servidor')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from Server.models.business.ListenThread import ListenThread
listenThread = ListenThread()
listenThread.main_execution()
|
nilq/baby-python
|
python
|
'''
Description on how to produce metadata file.
'''
input_filter = None
treename = 'deepntuplizer/tree'
reweight_events = -1
reweight_bins = [list(range(200, 2051, 50)), [-10000, 10000]]
metadata_events = 1000000
selection = '''jet_tightId \
&& ( !label_H_cc )'''
# && ( (sample_isQCD && fj_isQCD) || (!sample_isQCD && !fj_isQCD)) \
var_groups = {
# 'group_name': ( ('regex1', 'regex2', ...), list_length )
'fjvars': (('fj_sdmass',), None),
}
var_blacklist = [
'fj_gen_pt',
'fj_gen_eta',
]
var_no_transform_branches = [
'fj_labelJMAR', 'fjJMAR_gen_pt', 'fjJMAR_gen_eta', 'fjJMAR_gen_pdgid',
'fj_label',
'fj_isQCD', 'fj_isTop', 'fj_isW', 'fj_isZ', 'fj_isH',
'npv',
'n_pfcands', 'n_tracks', 'n_sv',
'fj_pt', 'fj_eta', 'fj_phi', 'fj_mass',
'fj_n_sdsubjets',
'fjPuppi_tau21', 'fjPuppi_tau32', 'fjPuppi_corrsdmass',
'fj_doubleb', 'pfCombinedInclusiveSecondaryVertexV2BJetTags',
"fj_tau21",
"fj_tau32",
"fj_sdmass",
"fj_sdsj1_pt",
"fj_sdsj1_eta",
"fj_sdsj1_phi",
"fj_sdsj1_mass",
"fj_sdsj1_csv",
"fj_sdsj1_ptD",
"fj_sdsj1_axis1",
"fj_sdsj1_axis2",
"fj_sdsj1_mult",
"fj_sdsj2_pt",
"fj_sdsj2_eta",
"fj_sdsj2_phi",
"fj_sdsj2_mass",
"fj_sdsj2_csv",
"fj_sdsj2_ptD",
"fj_sdsj2_axis1",
"fj_sdsj2_axis2",
"fj_sdsj2_mult",
"fj_ptDR",
"fj_relptdiff",
"fj_sdn2",
'fj_z_ratio',
'fj_trackSipdSig_3',
'fj_trackSipdSig_2',
'fj_trackSipdSig_1',
'fj_trackSipdSig_0',
'fj_trackSipdSig_1_0',
'fj_trackSipdSig_0_0',
'fj_trackSipdSig_1_1',
'fj_trackSipdSig_0_1',
'fj_trackSip2dSigAboveCharm_0',
'fj_trackSip2dSigAboveBottom_0',
'fj_trackSip2dSigAboveBottom_1',
'fj_tau1_trackEtaRel_0',
'fj_tau1_trackEtaRel_1',
'fj_tau1_trackEtaRel_2',
'fj_tau0_trackEtaRel_0',
'fj_tau0_trackEtaRel_1',
'fj_tau0_trackEtaRel_2',
'fj_tau_vertexMass_0',
'fj_tau_vertexEnergyRatio_0',
'fj_tau_vertexDeltaR_0',
'fj_tau_flightDistance2dSig_0',
'fj_tau_vertexMass_1',
'fj_tau_vertexEnergyRatio_1',
'fj_tau_flightDistance2dSig_1',
'fj_jetNTracks',
'fj_nSV',
]
# label_list = ['fj_isQCD', 'fj_isTop', 'fj_isW', 'fj_isZ', 'fj_isH']
label_list = ['label_Top_bcq', 'label_Top_bqq', 'label_Top_bc', 'label_Top_bq',
'label_W_cq', 'label_W_qq',
'label_Z_bb', 'label_Z_cc', 'label_Z_qq',
'label_H_bb', 'label_H_qqqq',
'label_QCD_bb', 'label_QCD_cc', 'label_QCD_b', 'label_QCD_c', 'label_QCD_others',
]
reweight_var = ['fj_pt', 'fj_sdmass']
reweight_classes = ['fj_isTop', 'fj_isW', 'fj_isZ', 'fj_isH', 'fj_isQCD']
reweight_method = 'flat'
var_img = None
var_pos = None
n_pixels = None
img_ranges = None
|
nilq/baby-python
|
python
|
from argparse import ArgumentTypeError
import numpy as np
from PIL import Image
from convolution_functions import apply_filter, filters
debug_mode = False
"""
Seznam pouzitelnych funkci pro tento program na upravu obrazku.
Pro pridani fuknce ji napiste zde, a pridejte do action_dict (seznam pouzitelnych fci)
a pote ji udelejte CLI callable v Main pres add_argument.
"""
def read_image(file_name: str) -> np.array:
"""
pomocna funkce na nacteni obrazku
:param file_name: cesta k souboru
:return: numpy array, pripravene na upravy pomoci nasich funkcni
"""
return np.asarray(Image.open(file_name), dtype=np.int32)
def save_image(array, file_path):
"""
pomocna funkce na ulozeni obrazku, sama prevede pole z int32 na unit8 a ulozi
:param array:
:param file_path:
:return:
"""
out = array.astype("uint8")
Image.fromarray(out).save(file_path)
def percentage(val):
"""
Vlastni datovy typ pro argparse, pouze kontroluje zda uzivatel zadal cislo vetsi nez nula
:param val: vstup z argparse
:return: int v rozmezi 0 - 100 (bez upravy)
"""
try:
n = int(val)
if 0 <= n:
return n
else:
msg = "Cislo nemuze byt mensi nez nula"
raise ArgumentTypeError(msg)
except ValueError:
msg = 'Zadaný vstup se nepodařilo převést na číslo!'
raise ArgumentTypeError(msg)
"""
image edit functions
"""
def do_rotate(np_image, args=None):
out = np.rot90(np_image)
if debug_mode:
print("a do_rotate")
return out
def do_mirror(np_image, args=None):
assert np_image.ndim > 1
out = np_image[::, ::-1]
if debug_mode:
print("a do_mirror")
return out
def do_inverse(np_image, args=None):
"""
funkce inverze barev (z cerne se stane bila apod).
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
if len(np_image.shape) > 2:
out = np.abs(np_image[::, ::, 0:min(np_image.shape[2], 3)] - 255)
else:
out = np.abs(np_image - 255)
if debug_mode:
print("a do_inverse")
return out
def do_bw(np_image, args=None):
"""
funkce do prevodu sedi, pouzivame ITU-R 601-2 luma vzorec.
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
if np_image.ndim is not 3: # obrazek je uz v grayscale, takze neni treba ho opakovat
print("Jiz ve stupni sedi, redudantni --bw")
return np_image
result_red = (np_image[::, ::, 0] * 0.299)
result_green = (np_image[::, ::, 1] * 0.587)
result_blue = (np_image[::, ::, 2] * 0.114)
final = (result_red + result_green + result_blue)
if debug_mode:
print("a do_bw")
return final
def do_lighten(np_image, args):
"""
funkce ktera zesvetla vsechny pixely o dane procento
:param np_image: numpy obrazek co chceme upravit
:param args: Bere z argparseru lighten value
:return: upraaveny obrazek v Numpy array
"""
if args is None:
raise ValueError
value = args.lighten.pop(0)
# vime ze 100% = 1, 50% = 0.5, proto prenasobime a pricteme 1 abychom obrazek omylem neztmavili
percentil_value = (value * 0.01) + 1
if len(np_image.shape) > 2:
out = np.minimum(np_image[::, ::, 0:min(np_image.shape[2], 3)] * percentil_value, 255)
else:
out = np.minimum(np_image * percentil_value, 255)
if debug_mode:
print("a do_lighten")
return out
def do_darken(np_image, args):
"""
funkce ktera ztmavi vsechny pixely o dane procento
:param np_image: numpy obrazek co chceme upravit
:param args: Bere z argparseru lighten value
:return: upraaveny obrazek v Numpy array
"""
if args is None:
raise ValueError
value = args.darken.pop(0)
if len(np_image.shape) > 2:
out = np_image[::, ::, 0:min(np_image.shape[2], 3)] * (value * 0.01)
else:
out = (np_image * (value * 0.01))
if debug_mode:
print("a do_darken")
return out
def do_sharpen(np_image, args=None):
"""
funkce zostreni, zavola konvolucni metodu s danym filtrem a vrati vysledek
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
out = apply_filter(np_image, filters["Sharpening"])
if debug_mode:
print("a do_sharpen")
return out
def do_blur_3x3(np_image, args=None):
"""
funkce rozmazani, zavola konvolucni metodu s danym filtrem a vrati vysledek
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
out = apply_filter(np_image, filters['Gaussian blur 3x3 (approx)'])
if debug_mode:
print("a do_blur_3x3")
return out
def do_blur_5x5(np_image, args=None):
"""
funkce rozmazani s vetsim zaberem okolim, zavola konvolucni metodu s danym filtrem a vrati vysledek
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
out = apply_filter(np_image, filters['Gaussian blur 5x5 (approx)'])
if debug_mode:
print("a do_blur_5x5")
return out
def do_edge_detection(np_image, args=None):
"""
funkce detekce hran, zavola konvolucni metodu s danym filtrem a vrati vysledek
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
out = apply_filter(np_image, filters['Edge detection'])
if debug_mode:
print("a do_edge_detection")
return out
def do_embossing(np_image, args=None):
"""
funkce vyrazeni, zavola konvolucni metodu s danym filtrem a vrati vysledek
:param np_image: numpy obrazek co chceme upravit
:param args: Neni zde potreba, pouze pro kompabilitu
:return: upraaveny obrazek v Numpy array
"""
out = apply_filter(np_image, filters['Embossing'])
if debug_mode:
print("a do_embossing")
return out
"""
Slovník (Dictionary) všech možných úprav obrázku, slouží pro parsování argparse a tohoto programu
pro přidání nové fce je třeba jí napsat do funcions.py a poté jí přidat sem
"""
action_dict = {
"--rotate": do_rotate,
"--mirror": do_mirror,
"--inverse": do_inverse,
"--bw": do_bw,
"--lighten": do_lighten,
"--darken": do_darken,
"--sharpen": do_sharpen,
"--blur_3x3": do_blur_3x3,
"--blur_5x5": do_blur_5x5,
"--edge_detection": do_edge_detection,
"--embossing": do_embossing
}
|
nilq/baby-python
|
python
|
"""Tests for the auth providers."""
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.1 on 2020-05-07 07:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PropelRapp', '0009_auto_20200506_0627'),
]
operations = [
migrations.AddField(
model_name='menu',
name='is_deleted',
field=models.CharField(choices=[('N', 'NO'), ('Y', 'YES')], default='N', max_length=1),
),
migrations.AddField(
model_name='role',
name='is_deleted',
field=models.CharField(choices=[('N', 'NO'), ('Y', 'YES')], default='N', max_length=1),
),
migrations.AddField(
model_name='submenu',
name='is_deleted',
field=models.CharField(choices=[('N', 'NO'), ('Y', 'YES')], default='N', max_length=1),
),
]
|
nilq/baby-python
|
python
|
''' Text Media Matching interface '''
from summarization.text_media_matching.text_media_matching_helper import \
TextMediaMatchingHelper
from summarization.text_media_matching.text_media_matching_preprocessor import \
TextMediaMatchingPreprocessor # noqa
class TextMediaMatcher:
'''Class to integrate the TextMediaMatching utilities'''
def __init__(self, text_contents, media_contents,
distance_metric_type="absolute-difference"):
self.text_contents = text_contents
self.media_contents = media_contents
self.distance_metric_type = distance_metric_type
def _get_matched_and_unmatched_contents(self):
if len(self.text_contents) == 0 or len(self.media_contents) == 0:
return {
"matched_contents": [],
"unused_contents": self.text_contents if len(
self.text_contents) != 0 else self.media_contents,
"unused_content_type": "text" if len(
self.text_contents) != 0 else "media"}
preprocessor = TextMediaMatchingPreprocessor(
self.text_contents,
self.media_contents
)
preprocessed_contents_dict = preprocessor.get_formatted_content()
text_for_matching = preprocessed_contents_dict["sentences"]
media_for_matching = preprocessed_contents_dict["media"]
unused_contents \
= preprocessed_contents_dict["content_unused_for_matching"]
unused_content_type = preprocessed_contents_dict["unused_content_type"]
matcher = TextMediaMatchingHelper(
text_for_matching, media_for_matching, self.distance_metric_type)
matched_contents = matcher.get_text_media_matching()
return {
"matched_contents": matched_contents,
"unused_contents": unused_contents,
"unused_content_type": unused_content_type
}
|
nilq/baby-python
|
python
|
"""
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
*Created with Breaking Point build : EB 9.10v9.10.110.25 -- ENGINEERING BUILD"""
import requests
import json
import pprint
import base64
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
import ssl
import logging
bps_api_log = logging.getLogger(__name__)
requests.packages.urllib3.disable_warnings()
pp = pprint.PrettyPrinter(indent=1).pprint
class TlsAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block):
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block)
### this BPS REST API wrapper is generated for version: 9.10.110.25
class BPS(object):
def __init__(self, host, user, password):
self.host = host
self.user = user
self.password = password
self.sessionId = None
self.session = requests.Session()
self.session.mount('https://', TlsAdapter())
self.evasionProfile = DataModelProxy(wrapper=self, name='evasionProfile')
self.reports = DataModelProxy(wrapper=self, name='reports')
self.capture = DataModelProxy(wrapper=self, name='capture')
self.network = DataModelProxy(wrapper=self, name='network')
self.topology = DataModelProxy(wrapper=self, name='topology')
self.superflow = DataModelProxy(wrapper=self, name='superflow')
self.testmodel = DataModelProxy(wrapper=self, name='testmodel')
self.administration = DataModelProxy(wrapper=self, name='administration')
self.results = DataModelProxy(wrapper=self, name='results')
self.statistics = DataModelProxy(wrapper=self, name='statistics')
self.appProfile = DataModelProxy(wrapper=self, name='appProfile')
self.strikes = DataModelProxy(wrapper=self, name='strikes')
self.loadProfile = DataModelProxy(wrapper=self, name='loadProfile')
self.strikeList = DataModelProxy(wrapper=self, name='strikeList')
def disablePrints(self,disable=True):
if disable:
log=bps_api_log.parent
log.setLevel(logging.CRITICAL)
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
else:
log=bps_api_log.parent
log.setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
### connect to the system
def __connect(self):
r = self.session.post(url='https://' + self.host + '/bps/api/v1/auth/session', data=json.dumps({'username': self.user, 'password': self.password}), headers={'content-type': 'application/json'}, verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
self.sessionId = r.json().get('sessionId')
self.session.headers['sessionId'] = r.json().get('sessionId')
self.session.headers['X-API-KEY'] = r.json().get('apiKey')
bps_api_log.info('Successfully connected to %s.' % self.host)
else:
raise Exception('Failed connecting to %s: (%s, %s)' % (self.host, r.status_code, r.content))
### disconnect from the system
def __disconnect(self):
r = self.session.delete(url='https://' + self.host + '/bps/api/v1/auth/session', verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 204):
self.sessionId = None
if 'sessionId' in self.session.headers:
del self.session.headers['sessionId']
del self.session.headers['X-API-KEY']
bps_api_log.info('Successfully disconnected from %s.' % self.host)
else:
raise Exception('Failed disconnecting from %s: (%s, %s)' % (self.host, r.status_code, r.content))
### login into the bps system
def login(self):
self.__connect()
r = self.session.post(url='https://' + self.host + '/bps/api/v2/core/auth/login', data=json.dumps({'username': self.user, 'password': self.password, 'sessionId': self.sessionId}), headers={'content-type': 'application/json'}, verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
bps_api_log.info('Login successful.\nWelcome %s. \nYour session id is %s' % (self.user, self.sessionId))
else:
raise Exception('Login failed.\ncode:%s, content:%s' % (r.status_code, r.content))
### logout from the bps system
def logout(self):
r = self.session.post(url='https://' + self.host + '/bps/api/v2/core/auth/logout', data=json.dumps({'username': self.user, 'password': self.password, 'sessionId': self.sessionId}), headers={'content-type': 'application/json'}, verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200):
bps_api_log.info('Logout successful. \nBye %s.' % self.user)
self.__disconnect()
else:
raise Exception('Logout failed: (%s, %s)' % (r.status_code, r.content))
### Get from data model
def _get(self, path, responseDepth=None, **kwargs):
requestUrl = 'https://%s/bps/api/v2/core%s%s' % (self.host, path, '?responseDepth=%s' % responseDepth if responseDepth else '')
for key, value in kwargs.items():
requestUrl = requestUrl + "&%s=%s" % (key, value)
headers = {'content-type': 'application/json'}
r = self.session.get(url=requestUrl, headers=headers, verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code in [200, 204]):
return json.loads(r.content) if jsonContent else r.content
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### Get from data model
def _patch(self, path, value):
r = self.session.patch(url='https://' + self.host + '/bps/api/v2/core/' + path, headers={'content-type': 'application/json'}, data=json.dumps(value), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code != 204):
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### Get from data model
def _put(self, path, value):
r = self.session.put(url='https://' + self.host + '/bps/api/v2/core/' + path, headers={'content-type': 'application/json'}, data=json.dumps(value), verify=False)
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code != 204):
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### Get from data model
def _delete(self, path):
requestUrl = 'https://' + self.host + '/bps/api/v2/core/'+ path
headers = {'content-type': 'application/json'}
r = self.session.delete(url=requestUrl, headers=headers, verify=False)
if(r.status_code == 400):
methodCall = '%s'%path.replace('/', '.').replace('.operations', '')
content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall
raise Exception({'status_code': r.status_code, 'content': content_message})
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code in [200, 204]):
return json.loads(r.content) if jsonContent else r.content
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### OPTIONS request
def _options(self, path):
r = self.session.options('https://' + self.host + '/bps/api/v2/core/'+ path)
if(r.status_code == 400):
methodCall = '%s'%path.replace('/', '.').replace('.operations', '')
content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall
raise Exception({'status_code': r.status_code, 'content': content_message})
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code in [200]):
return json.loads(r.content) if jsonContent else r.content
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### generic post operation
def _post(self, path, **kwargs):
requestUrl = 'https://' + self.host + '/bps/api/v2/core/' + path
r = self.session.post(url=requestUrl, headers={'content-type': 'application/json'}, data=json.dumps(kwargs), verify=False)
if(r.status_code == 400):
methodCall = '%s'%path.replace('/', '.').replace('.operations', '')
content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall
raise Exception({'status_code': r.status_code, 'content': content_message})
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code in [200, 204, 202]):
return json.loads(r.content) if jsonContent else r.content
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### generic import operation
def _import(self, path, filename, **kwargs):
requestUrl = 'https://' + self.host + '/bps/api/v2/core/' + path
files = {'file': (kwargs['name'], open(filename, 'rb'), 'application/xml')}
r = self.session.post(url=requestUrl, files=files, data={'fileInfo':str(kwargs)}, verify=False)
if(r.status_code == 400):
methodCall = '%s'%path.replace('/', '.').replace('.operations', '')
content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall
raise Exception({'status_code': r.status_code, 'content': content_message})
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code in [200, 204]):
return json.loads(r.content) if jsonContent else r.content
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### generic post operation
def _export(self, path, **kwargs):
requestUrl = 'https://' + self.host + '/bps/api/v2/core/' + path
r = self.session.post(url=requestUrl, headers={'content-type': 'application/json'}, data=json.dumps(kwargs), verify=False)
if(r.status_code == 400):
methodCall = '%s'%path.replace('/', '.').replace('.operations', '')
content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall
raise Exception({'status_code': r.status_code, 'content': content_message})
jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'['))
if(r.status_code == 200) or r.status_code == 204:
get_url = 'https://' + self.host + r.content
get_req = self.session.get(url = get_url, verify = False)
with open(kwargs['filepath'], 'wb') as fd:
for chunk in get_req.iter_content(chunk_size=1024):
fd.write(chunk)
fd.close()
get_req.close()
return {'status_code': r.status_code, 'content': 'success'}
else:
raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content})
### null
@staticmethod
def _topology_operations_unreserve(self, unreservation):
"""
:param unreservation (list):
list of object with fields
slot (number):
port (number):
"""
return self._wrapper._post('/topology/operations/unreserve', **{'unreservation': unreservation})
### Deletes a given Evasion Profile from the database.
@staticmethod
def _evasionProfile_operations_delete(self, name):
"""
Deletes a given Evasion Profile from the database.
:param name (string): The name of the profile to delete.
"""
return self._wrapper._post('/evasionProfile/operations/delete', **{'name': name})
### Clones a component in the current working Test Model
@staticmethod
def _testmodel_operations_clone(self, template, type, active):
"""
Clones a component in the current working Test Model
:param template (string): The ID of the test component to clone.
:param type (string): Component Type: appsim, sesionsender ..
:param active (bool): Set component enable (by default is active) or disable
"""
return self._wrapper._post('/testmodel/operations/clone', **{'template': template, 'type': type, 'active': active})
### null
@staticmethod
def _loadProfile_operations_load(self, template):
"""
:param template (string):
"""
return self._wrapper._post('/loadProfile/operations/load', **{'template': template})
### Sets the card mode of a board.
@staticmethod
def _topology_operations_setCardMode(self, board, mode):
"""
Sets the card mode of a board.
:param board (number): Slot ID.
:param mode (number): The new mode: 10(BPS-L23), 7(BPS L4-7), 3(IxLoad),
11(BPS QT L2-3), 12(BPS QT L4-7)
"""
return self._wrapper._post('/topology/operations/setCardMode', **{'board': board, 'mode': mode})
### Sets the card speed of a board
@staticmethod
def _topology_operations_setCardSpeed(self, board, speed):
"""
Sets the card speed of a board
:param board (number): Slot ID.
:param speed (number): The new speed.(the int value for 1G is 1000, 10G(10000), 40G(40000))
"""
return self._wrapper._post('/topology/operations/setCardSpeed', **{'board': board, 'speed': speed})
### Sets the card fanout of a board
@staticmethod
def _topology_operations_setCardFanout(self, board, fanid):
"""
Sets the card fanout of a board
:param board (number): Slot ID.
:param fanid (number): The fan type represented by an integer id.
For CloudStorm: 0(100G), 1(40G), 2(25G), 3(10G), 4(50G).
For PerfectStorm 40G: 0(40G), 1(10G).
For PerfectStorm 100G: 0(100G), 1(40G), 2(10G)
"""
return self._wrapper._post('/topology/operations/setCardFanout', **{'board': board, 'fanid': fanid})
### Enables/Disables the performance acceleration for a BPS VE blade.
@staticmethod
def _topology_operations_setPerfAcc(self, board, perfacc):
"""
Enables/Disables the performance acceleration for a BPS VE blade.
:param board (number): Slot ID.
:param perfacc (bool): Boolean value: 'True' to enable the performance Acceleration and 'False' otherwise.
"""
return self._wrapper._post('/topology/operations/setPerfAcc', **{'board': board, 'perfacc': perfacc})
### Deletes a given Application Profile from the database.
@staticmethod
def _appProfile_operations_delete(self, name):
"""
Deletes a given Application Profile from the database.
:param name (string): The name of the Application Profiles.
"""
return self._wrapper._post('/appProfile/operations/delete', **{'name': name})
### Saves the current working Test Model under specified name.
@staticmethod
def _evasionProfile_operations_saveAs(self, name, force):
"""
Saves the current working Test Model under specified name.
:param name (string): The new name given for the current working Evasion Profile
:param force (bool): Force to save the working Evasion Profile using a new name.
"""
return self._wrapper._post('/evasionProfile/operations/saveAs', **{'name': name, 'force': force})
### Saves the working Test Model using the current name. No need to configure. The current name is used.
@staticmethod
def _evasionProfile_operations_save(self, name=None, force=True):
"""
Saves the working Test Model using the current name. No need to configure. The current name is used.
:param name (string): This argument should be empty for saving the profile using it's actual name.
:param force (bool): Force to save the working profile with the same name.
"""
return self._wrapper._post('/evasionProfile/operations/save', **{'name': name, 'force': force})
### Imports a test model, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _testmodel_operations_importModel(self, name, filename, force):
"""
Imports a test model, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the object being imported
:param filename (string): The file containing the object
:param force (bool): Force to import the file and the object having the same name will be replaced.
"""
return self._wrapper._import('/testmodel/operations/importModel', **{'name': name, 'filename': filename, 'force': force})
### Imports an application profile, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _appProfile_operations_importAppProfile(self, name, filename, force):
"""
Imports an application profile, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the object being imported
:param filename (string): The file containing the object
:param force (bool): Force to import the file and the object having the same name will be replaced.
"""
return self._wrapper._import('/appProfile/operations/importAppProfile', **{'name': name, 'filename': filename, 'force': force})
### Imports a network neighborhood model, given as a file.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _network_operations_importNetwork(self, name, filename, force):
"""
Imports a network neighborhood model, given as a file.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the object being imported
:param filename (string): The file containing the object
:param force (bool): Force to import the file and replace the object having the same name.
"""
return self._wrapper._import('/network/operations/importNetwork', **{'name': name, 'filename': filename, 'force': force})
### null
@staticmethod
def _superflow_operations_search(self, searchString, limit, sort, sortorder):
"""
:param searchString (string): Search Super Flow name matching the string given.
:param limit (string): The limit of rows to return
:param sort (string): Parameter to sort by.
:param sortorder (string): The sort order (ascending/descending)
"""
return self._wrapper._post('/superflow/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Adds a new test component to the current working test model
@staticmethod
def _testmodel_operations_add(self, name, component, type, active):
"""
Adds a new test component to the current working test model
:param name (string): Component Name
:param component (string): Component template, preset.
:param type (string): Component Type: appsim, sesionsender ..
:param active (bool): Set component enable (by default is active) or disable
"""
return self._wrapper._post('/testmodel/operations/add', **{'name': name, 'component': component, 'type': type, 'active': active})
### Add a host to the current working Superflow
@staticmethod
def _superflow_operations_addHost(self, hostParams, force):
"""
Add a host to the current working Superflow
:param hostParams (object):
object of object with fields
name (string): The host name.
hostname (string): The NickName of the host.
iface (string): The traffic direction.Values can be: 'origin'(means client) and 'target'(means server)
:param force (bool): The flow id.
"""
return self._wrapper._post('/superflow/operations/addHost', **{'hostParams': hostParams, 'force': force})
### Stops the test run.
@staticmethod
def _testmodel_operations_stopRun(self, runid):
"""
Stops the test run.
:param runid (number): Test RUN ID
"""
return self._wrapper._post('/testmodel/operations/stopRun', **{'runid': runid})
### Stops the test run.
@staticmethod
def _topology_operations_stopRun(self, runid):
"""
Stops the test run.
:param runid (number): Test RUN ID
"""
return self._wrapper._post('/topology/operations/stopRun', **{'runid': runid})
### null
@staticmethod
def _superflow_actions_operations_getActionChoices(self, id):
"""
:param id (number): the flow id
"""
return self._wrapper._post('/superflow/actions/' + self._name + '/operations/getActionChoices', **{'id': id})
### Recompute percentages in the current working Application Profile
@staticmethod
def _appProfile_operations_recompute(self):
"""
Recompute percentages in the current working Application Profile
"""
return self._wrapper._post('/appProfile/operations/recompute', **{})
### null
@staticmethod
def _evasionProfile_operations_search(self, searchString, limit, sort, sortorder):
"""
:param searchString (string): Search evasion profile name matching the string given.
:param limit (string): The limit of rows to return
:param sort (string): Parameter to sort by. (name/createdBy ...)
:param sortorder (string): The sort order (ascending/descending)
:return results (list):
list of object with fields
name (string):
label (string):
createdBy (string):
revision (number):
description (string):
"""
return self._wrapper._post('/evasionProfile/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Searches a strike inside all BPS strike database.To list all the available strikes, leave the arguments empty.
@staticmethod
def _strikes_operations_search(self, searchString='', limit=10, sort='name', sortorder='ascending', offset=0):
"""
Searches a strike inside all BPS strike database.To list all the available strikes, leave the arguments empty.
:param searchString (string): The string used as a criteria to search a strike by.Example: 'strike_name', 'year:2019', 'path:strikes/xml..'
:param limit (number): The limit of rows to return. Use empty string or empty box to get all the available strikes.
:param sort (string): Parameter to sort by.
:param sortorder (string): The sort order (ascending/descending)
:param offset (number): The offset to begin from. Default is 0.
:return results (list):
list of object with fields
id (string):
protocol (string):
category (string):
direction (string):
keyword (string):
name (string):
path (string):
variants (number):
severity (string):
reference (string):
fileSize (string):
fileExtension (string):
year (string):
"""
return self._wrapper._post('/strikes/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder, 'offset': offset})
### Loads an existing network config by name.
@staticmethod
def _network_operations_load(self, template):
"""
Loads an existing network config by name.
:param template (string): The name of the network neighborhood template
"""
return self._wrapper._post('/network/operations/load', **{'template': template})
### Creates a new Network Neighborhood configuration with no name. The template value must remain empty.
@staticmethod
def _network_operations_new(self, template=None):
"""
Creates a new Network Neighborhood configuration with no name. The template value must remain empty.
:param template (string): The name of the template. In this case will be empty. No need to configure.
"""
return self._wrapper._post('/network/operations/new', **{'template': template})
### Removes a flow from the current working SuperFlow.
@staticmethod
def _superflow_operations_removeFlow(self, id):
"""
Removes a flow from the current working SuperFlow.
:param id (number): The flow ID.
"""
return self._wrapper._post('/superflow/operations/removeFlow', **{'id': id})
### Lists all the component presets names.
@staticmethod
def _testmodel_component_operations_getComponentPresetNames(self, type='None'):
"""
Lists all the component presets names.
:param type (string): The Component type.
All the component types are listed under the node testComponentTypesDescription.
If this argument is not set, all the presets will be listed.
:return result (list):
list of object with fields
id (string):
label (string):
type (string):
description (string):
"""
return self._wrapper._post('/testmodel/component/' + self._name + '/operations/getComponentPresetNames', **{'type': type})
### Adds a list of strikes to the current working Strike List.([{id: 'b/b/v/f'}, {id: 'aa/f/h'}])
@staticmethod
def _strikeList_operations_add(self, strike):
"""
Adds a list of strikes to the current working Strike List.([{id: 'b/b/v/f'}, {id: 'aa/f/h'}])
:param strike (list): The list of strikes to add.
list of object with fields
id (string): Strike path.
"""
return self._wrapper._post('/strikeList/operations/add', **{'strike': strike})
### null
@staticmethod
def _superflow_flows_operations_getFlowChoices(self, id, name):
"""
:param id (number): The flow id.
:param name (string): The flow type/name.
:return result (list):
"""
return self._wrapper._post('/superflow/flows/' + self._name + '/operations/getFlowChoices', **{'id': id, 'name': name})
### Runs a Test.
@staticmethod
def _testmodel_operations_run(self, modelname, group, allowMalware=False):
"""
Runs a Test.
:param modelname (string): Test Name to run
:param group (number): Group to run
:param allowMalware (bool): Enable this option to allow malware in test.
"""
return self._wrapper._post('/testmodel/operations/run', **{'modelname': modelname, 'group': group, 'allowMalware': allowMalware})
### Runs a Test.
@staticmethod
def _topology_operations_run(self, modelname, group, allowMalware=False):
"""
Runs a Test.
:param modelname (string): Test Name to run
:param group (number): Group to run
:param allowMalware (bool): Enable this option to allow malware in test.
"""
return self._wrapper._post('/topology/operations/run', **{'modelname': modelname, 'group': group, 'allowMalware': allowMalware})
### Deletes a Test Report from the database.
@staticmethod
def _reports_operations_delete(self, runid):
"""
Deletes a Test Report from the database.
:param runid (number): The test run id that generated the report you want to delete.
"""
return self._wrapper._post('/reports/operations/delete', **{'runid': runid})
### Create a new custom Load Profile.
@staticmethod
def _loadProfile_operations_createNewCustom(self, loadProfile):
"""
Create a new custom Load Profile.
:param loadProfile (string): The Name of The load profile object to create.
"""
return self._wrapper._post('/loadProfile/operations/createNewCustom', **{'loadProfile': loadProfile})
### Saves the current working Test Model under specified name.
@staticmethod
def _testmodel_operations_saveAs(self, name, force):
"""
Saves the current working Test Model under specified name.
:param name (string): The new name given for the current working Test Model
:param force (bool): Force to save the working Test Model using a new name.
"""
return self._wrapper._post('/testmodel/operations/saveAs', **{'name': name, 'force': force})
### Saves the working Test Model using the current name. No need to configure. The current name is used.
@staticmethod
def _testmodel_operations_save(self, name=None, force=True):
"""
Saves the working Test Model using the current name. No need to configure. The current name is used.
:param name (string): The name of the template that should be empty.
:param force (bool): Force to save the working Test Model with the same name.
"""
return self._wrapper._post('/testmodel/operations/save', **{'name': name, 'force': force})
### Deletes a given Test Model from the database.
@staticmethod
def _testmodel_operations_delete(self, name):
"""
Deletes a given Test Model from the database.
:param name (string): The name of the Test Model.
"""
return self._wrapper._post('/testmodel/operations/delete', **{'name': name})
### Load an existing Application Profile and sets it as the current one.
@staticmethod
def _appProfile_operations_load(self, template):
"""
Load an existing Application Profile and sets it as the current one.
:param template (string): The name of the template application profile
"""
return self._wrapper._post('/appProfile/operations/load', **{'template': template})
### Creates a new Application Profile.
@staticmethod
def _appProfile_operations_new(self, template=None):
"""
Creates a new Application Profile.
:param template (string): This argument must remain unset. Do not set any value for it.
"""
return self._wrapper._post('/appProfile/operations/new', **{'template': template})
### Saves the current working Strike List and gives it a new name.
@staticmethod
def _strikeList_operations_saveAs(self, name, force):
"""
Saves the current working Strike List and gives it a new name.
:param name (string): The new name given for the current working Strike List
:param force (bool): Force to save the working Strike List using the given name.
"""
return self._wrapper._post('/strikeList/operations/saveAs', **{'name': name, 'force': force})
### Saves the current working Strike List using the current name
@staticmethod
def _strikeList_operations_save(self, name=None, force=True):
"""
Saves the current working Strike List using the current name
:param name (string): The name of the template. Default is empty.
:param force (bool): Force to save the working Strike List with the same name.
"""
return self._wrapper._post('/strikeList/operations/save', **{'name': name, 'force': force})
### null
@staticmethod
def _testmodel_operations_search(self, searchString, limit, sort, sortorder):
"""
:param searchString (string): Search test name matching the string given.
:param limit (string): The limit of rows to return
:param sort (string): Parameter to sort by: 'createdOn'/'timestamp'/'bandwidth'/'result'/'lastrunby'/'createdBy'/'interfaces'/'testLabType'
:param sortorder (string): The sort order: ascending/descending
:return results (list):
list of object with fields
name (string):
label (string):
createdBy (string):
network (string):
duration (number):
description (string):
"""
return self._wrapper._post('/testmodel/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Adds a list of SuperFlow to the current working Application Profile. ([{'superflow':'adadad', 'weight':'20'},{..}])
@staticmethod
def _appProfile_operations_add(self, add):
"""
Adds a list of SuperFlow to the current working Application Profile. ([{'superflow':'adadad', 'weight':'20'},{..}])
:param add (list):
list of object with fields
superflow (string): The name of the super flow
weight (string): The weight of the super flow
"""
return self._wrapper._post('/appProfile/operations/add', **{'add': add})
### Sets a User Preference.
@staticmethod
def _administration_userSettings_operations_changeUserSetting(self, name, value):
"""
Sets a User Preference.
:param name (string): The setting name.
:param value (string): The new value for setting.
"""
return self._wrapper._post('/administration/userSettings/' + self._name + '/operations/changeUserSetting', **{'name': name, 'value': value})
### Imports an ATI License file (.lic) on a hardware platform. This operation is NOT recommended to be used on BPS Virtual platforms.
@staticmethod
def _administration_atiLicensing_operations_importAtiLicense(self, filename, name):
"""
Imports an ATI License file (.lic) on a hardware platform. This operation is NOT recommended to be used on BPS Virtual platforms.
:param filename (string): import file path
:param name (string): the name of the license file
"""
return self._wrapper._import('/administration/atiLicensing/operations/importAtiLicense', **{'filename': filename, 'name': name})
### null
@staticmethod
def _strikeList_operations_search(self, searchString='', limit=10, sort='name', sortorder='ascending'):
"""
:param searchString (string): Search strike list name matching the string given.
:param limit (number): The limit of rows to return
:param sort (string): Parameter to sort by. Default is by name.
:param sortorder (string): The sort order (ascending/descending). Default is ascending.
"""
return self._wrapper._post('/strikeList/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Deletes a given Network Neighborhood Config from the database.
@staticmethod
def _network_operations_delete(self, name):
"""
Deletes a given Network Neighborhood Config from the database.
:param name (string): The name of the Network Neighborhood Config.
"""
return self._wrapper._post('/network/operations/delete', **{'name': name})
### Removes a SuperFlow from the current working Application Profile.
@staticmethod
def _appProfile_operations_remove(self, superflow):
"""
Removes a SuperFlow from the current working Application Profile.
:param superflow (string): The name of the super flow.
"""
return self._wrapper._post('/appProfile/operations/remove', **{'superflow': superflow})
### Returns stats series for a given component group stat output for a given timestamp
@staticmethod
def _results_operations_getHistoricalSeries(self, runid, componentid, dataindex, group):
"""
Returns stats series for a given component group stat output for a given timestamp
:param runid (number): The test identifier
:param componentid (string): The component identifier. Each component has an id and can be get loading the testand checking it's components info
:param dataindex (number): The table index, equivalent with timestamp.
:param group (string): The data group or one of the BPS component main groups. The group name can be get by executing the operation 'getGroups' from results node.
:return results (list):
list of object with fields
name (string):
content (string):
datasetvals (string):
"""
return self._wrapper._post('/results/' + self._name + '/operations/getHistoricalSeries', **{'runid': runid, 'componentid': componentid, 'dataindex': dataindex, 'group': group})
### Returns main groups of statistics for a single BPS Test Component. These groups can be used then in requesting statistics values from the history of a test run.
@staticmethod
def _results_operations_getGroups(self, name, dynamicEnums=True, includeOutputs=True):
"""
Returns main groups of statistics for a single BPS Test Component. These groups can be used then in requesting statistics values from the history of a test run.
:param name (string): BPS Component name. This argument is actually the component type which can be get from 'statistics' table
:param dynamicEnums (bool):
:param includeOutputs (bool):
:return results (object):
object of object with fields
name (string):
label (string):
groups (object):
"""
return self._wrapper._post('/results/' + self._name + '/operations/getGroups', **{'name': name, 'dynamicEnums': dynamicEnums, 'includeOutputs': includeOutputs})
### Adds an action to the current working SuperFlow
@staticmethod
def _superflow_operations_addAction(self, flowid, type, actionid, source):
"""
Adds an action to the current working SuperFlow
:param flowid (number): The flow id.
:param type (string): The type of the action definition.
:param actionid (number): The new action id.
:param source (string): The action source.
"""
return self._wrapper._post('/superflow/operations/addAction', **{'flowid': flowid, 'type': type, 'actionid': actionid, 'source': source})
### Exports a wanted test model by giving its name or its test run id.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _testmodel_operations_exportModel(self, name, attachments, filepath, runid=None):
"""
Exports a wanted test model by giving its name or its test run id.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the test model to be exported.
:param attachments (bool): True if object attachments are needed.
:param filepath (string): The local path where to save the exported object.
:param runid (number): Test RUN ID
"""
return self._wrapper._export('/testmodel/operations/exportModel', **{'name': name, 'attachments': attachments, 'filepath': filepath, 'runid': runid})
### Load an existing test model template.
@staticmethod
def _testmodel_operations_load(self, template):
"""
Load an existing test model template.
:param template (string): The name of the template testmodel
"""
return self._wrapper._post('/testmodel/operations/load', **{'template': template})
### Creates a new Test Model
@staticmethod
def _testmodel_operations_new(self, template=None):
"""
Creates a new Test Model
:param template (string): The name of the template. In this case will be empty.
"""
return self._wrapper._post('/testmodel/operations/new', **{'template': template})
### Saves the current working Application Profiles and gives it a new name.
@staticmethod
def _superflow_operations_saveAs(self, name, force):
"""
Saves the current working Application Profiles and gives it a new name.
:param name (string): The new name given for the current working Super Flow
:param force (bool): Force to save the working Super Flow using the given name.
"""
return self._wrapper._post('/superflow/operations/saveAs', **{'name': name, 'force': force})
### Saves the working Super Flow using the current name
@staticmethod
def _superflow_operations_save(self, name=None, force=True):
"""
Saves the working Super Flow using the current name
:param name (string): The name of the template that should be empty.
:param force (bool): Force to save the working Super Flow with the same name.
"""
return self._wrapper._post('/superflow/operations/save', **{'name': name, 'force': force})
### Exports an Application profile and all of its dependencies.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _appProfile_operations_exportAppProfile(self, name, attachments, filepath):
"""
Exports an Application profile and all of its dependencies.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the test model to be exported.
:param attachments (bool): True if object attachments are needed.
:param filepath (string): The local path where to save the exported object.
"""
return self._wrapper._export('/appProfile/operations/exportAppProfile', **{'name': name, 'attachments': attachments, 'filepath': filepath})
### Exports the Strike List identified by its name and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _strikeList_operations_exportStrikeList(self, name, filepath):
"""
Exports the Strike List identified by its name and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the strike list to be exported.
:param filepath (string): The local path where to save the exported object. The file should have .bap extension
"""
return self._wrapper._export('/strikeList/operations/exportStrikeList', **{'name': name, 'filepath': filepath})
### null
@staticmethod
def _administration_operations_logs(self, error=False, messages=False, web=False, all=False, audit=False, info=False, system=False, lines=20, drop=0):
"""
:param error (bool):
:param messages (bool):
:param web (bool):
:param all (bool):
:param audit (bool):
:param info (bool):
:param system (bool):
:param lines (number): number lines to return
:param drop (number): number lines to drop
"""
return self._wrapper._post('/administration/operations/logs', **{'error': error, 'messages': messages, 'web': web, 'all': all, 'audit': audit, 'info': info, 'system': system, 'lines': lines, 'drop': drop})
### null
@staticmethod
def _reports_operations_search(self, searchString, limit, sort, sortorder):
"""
:param searchString (string): Search test name matching the string given.
:param limit (string): The limit of rows to return
:param sort (string): Parameter to sort by: 'name'/'endTime'/'duration'/'result'/'startTime'/'iteration'/'network'/'dut'/'user'/'size'
:param sortorder (string): The sort order: ascending/descending
"""
return self._wrapper._post('/reports/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Load an existing Super Flow and sets it as the current one.
@staticmethod
def _superflow_operations_load(self, template):
"""
Load an existing Super Flow and sets it as the current one.
:param template (string): The name of the existing Super Flow template
"""
return self._wrapper._post('/superflow/operations/load', **{'template': template})
### Creates a new Super Flow.
@staticmethod
def _superflow_operations_new(self, template=None):
"""
Creates a new Super Flow.
:param template (string): The name of the template. In this case will be empty.
"""
return self._wrapper._post('/superflow/operations/new', **{'template': template})
### Deletes a given Strike List from the database.
@staticmethod
def _strikeList_operations_delete(self, name):
"""
Deletes a given Strike List from the database.
:param name (string): The name of the Strike List to be deleted.
"""
return self._wrapper._post('/strikeList/operations/delete', **{'name': name})
### Gives abbreviated information about all Canned Flow Names.
@staticmethod
def _superflow_flows_operations_getCannedFlows(self):
"""
Gives abbreviated information about all Canned Flow Names.
:return results (list):
list of object with fields
name (string):
label (string):
"""
return self._wrapper._post('/superflow/flows/' + self._name + '/operations/getCannedFlows', **{})
### Deletes a given Super Flow from the database.
@staticmethod
def _superflow_operations_delete(self, name):
"""
Deletes a given Super Flow from the database.
:param name (string): The name of the Super Flow.
"""
return self._wrapper._post('/superflow/operations/delete', **{'name': name})
### null
@staticmethod
def _results_operations_getHistoricalResultSize(self, runid, componentid, group):
"""
:param runid (number): The test run id
:param componentid (string): The component identifier
:param group (string): The data group or one of the BPS component main groups. The group name can be get by executing the operation 'getGroups' from results node
:return result (string):
"""
return self._wrapper._post('/results/' + self._name + '/operations/getHistoricalResultSize', **{'runid': runid, 'componentid': componentid, 'group': group})
### Adds a note to given port.
@staticmethod
def _topology_operations_addPortNote(self, interface, note):
"""
Adds a note to given port.
:param interface (object): Slot and Port ID.
object of object with fields
slot (number):
port (number):
:param note (string): Note info.
"""
return self._wrapper._post('/topology/operations/addPortNote', **{'interface': interface, 'note': note})
### Search Networks.
@staticmethod
def _network_operations_search(self, searchString, userid, clazz, sortorder, sort, limit, offset):
"""
Search Networks.
:param searchString (string): Search networks matching the string given.
:param userid (string): The owner to search for
:param clazz (string): The 'class' of the object (usually 'canned' or 'custom')
:param sortorder (string): The order in which to sort: ascending/descending
:param sort (string): Parameter to sort by: 'name'/'class'/'createdBy'/'interfaces'/'timestamp'
:param limit (number): The limit of network elements to return
:param offset (number): The offset to begin from.
:return results (list):
list of object with fields
name (string):
label (string):
createdBy (string):
revision (number):
description (string):
"""
return self._wrapper._post('/network/operations/search', **{'searchString': searchString, 'userid': userid, 'clazz': clazz, 'sortorder': sortorder, 'sort': sort, 'limit': limit, 'offset': offset})
### Retrieves the real time statistics for the running test, by giving the run id.
@staticmethod
def _testmodel_operations_realTimeStats(self, runid, rtsgroup, numSeconds, numDataPoints=1):
"""
Retrieves the real time statistics for the running test, by giving the run id.
:param runid (number): Test RUN ID
:param rtsgroup (string): Real Time Stats group name. Values for this can be get from 'statistics' node, inside 'statNames' from each component at 'realtime Group' key/column. Examples: l7STats, all, bpslite, summary, clientStats etc.
:param numSeconds (number): The number of seconds. If negative, means from the end
:param numDataPoints (number): The number of data points, the default is 1.
:return result (object):
object of object with fields
testStuck (bool):
time (number):
progress (number):
values (string):
"""
return self._wrapper._post('/testmodel/operations/realTimeStats', **{'runid': runid, 'rtsgroup': rtsgroup, 'numSeconds': numSeconds, 'numDataPoints': numDataPoints})
### Imports a capture file to the systemThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _capture_operations_importCapture(self, name, filename, force):
"""
Imports a capture file to the systemThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param name (string): The name of the capture being imported
:param filename (string): The file containing the capture object
:param force (bool): Force to import the file and the object having the same name will be replaced.
"""
return self._wrapper._import('/capture/operations/importCapture', **{'name': name, 'filename': filename, 'force': force})
### Reboots the card. Only available for PerfectStorm and CloudStorm cards.
@staticmethod
def _topology_operations_reboot(self, board):
"""
Reboots the card. Only available for PerfectStorm and CloudStorm cards.
:param board (number):
"""
return self._wrapper._post('/topology/operations/reboot', **{'board': board})
### Saves the current working Application Profiles and gives it a new name.
@staticmethod
def _appProfile_operations_saveAs(self, name, force):
"""
Saves the current working Application Profiles and gives it a new name.
:param name (string): The new name given for the current working Application Profile
:param force (bool): Force to save the working Application Profile using the given name.
"""
return self._wrapper._post('/appProfile/operations/saveAs', **{'name': name, 'force': force})
### Saves the current working application profile using the current name. No need to use any parameter.
@staticmethod
def _appProfile_operations_save(self, name=None, force=True):
"""
Saves the current working application profile using the current name. No need to use any parameter.
:param name (string): The name of the template. No need to configure. The current name is used.
:param force (bool): Force to save the working Application Profile with the same name. No need to configure. The default is used.
"""
return self._wrapper._post('/appProfile/operations/save', **{'name': name, 'force': force})
### Get information about an action in the current working Superflow, retrieving also the choices for each action setting.
@staticmethod
def _superflow_actions_operations_getActionInfo(self, id):
"""
Get information about an action in the current working Superflow, retrieving also the choices for each action setting.
:param id (number): The action id
:return result (list):
list of object with fields
label (string):
name (string):
description (string):
choice (object):
"""
return self._wrapper._post('/superflow/actions/' + self._name + '/operations/getActionInfo', **{'id': id})
### null
@staticmethod
def _topology_operations_reserve(self, reservation, force=False):
"""
:param reservation (list):
list of object with fields
group (number):
slot (number):
port (number):
capture (bool):
:param force (bool):
"""
return self._wrapper._post('/topology/operations/reserve', **{'reservation': reservation, 'force': force})
### Removes an action from the current working SuperFlow.
@staticmethod
def _superflow_operations_removeAction(self, id):
"""
Removes an action from the current working SuperFlow.
:param id (number): The action ID.
"""
return self._wrapper._post('/superflow/operations/removeAction', **{'id': id})
### Adds a flow to the current working SuperFlow
@staticmethod
def _superflow_operations_addFlow(self, flowParams):
"""
Adds a flow to the current working SuperFlow
:param flowParams (object): The flow object to add.
object of object with fields
name (string): The name of the flow
from (string): Traffic initiator.
to (string): Traffic responder.
"""
return self._wrapper._post('/superflow/operations/addFlow', **{'flowParams': flowParams})
### Imports a list of strikes residing in a file.
@staticmethod
def _strikeList_operations_importStrikeList(self, name, filename, force):
"""
Imports a list of strikes residing in a file.
:param name (string): The name of the object being imported
:param filename (string): The file containing the object to be imported.
:param force (bool): Force to import the file and the object having the same name will be replaced.
"""
return self._wrapper._import('/strikeList/operations/importStrikeList', **{'name': name, 'filename': filename, 'force': force})
### null
@staticmethod
def _network_operations_list(self, userid, clazz, sortorder, sort, limit, offset):
"""
:param userid (string):
:param clazz (string):
:param sortorder (string):
:param sort (string):
:param limit (number):
:param offset (number):
:return returnArg (list):
list of object with fields
name (string):
type (string):
author (string):
createdOn (string):
"""
return self._wrapper._post('/network/operations/list', **{'userid': userid, 'clazz': clazz, 'sortorder': sortorder, 'sort': sort, 'limit': limit, 'offset': offset})
### Exports everything including test models, network configurations and others from system.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _administration_operations_exportAllTests(self, filepath):
"""
Exports everything including test models, network configurations and others from system.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param filepath (string): The local path where to save the compressed file with all the models. The path must contain the file name and extension (.tar.gz): '/d/c/f/AllTests.tar.gz'
"""
return self._wrapper._export('/administration/operations/exportAllTests', **{'filepath': filepath})
### Retrieves all the security options
@staticmethod
def _evasionProfile_StrikeOptions_operations_getStrikeOptions(self):
"""
Retrieves all the security options
:return result (list):
"""
return self._wrapper._post('/evasionProfile/StrikeOptions/operations/getStrikeOptions', **{})
### Saves the working network config and gives it a new name.
@staticmethod
def _network_operations_saveAs(self, name, regenerateOldStyle=True, force=False):
"""
Saves the working network config and gives it a new name.
:param name (string): The new name given for the current working network config
:param regenerateOldStyle (bool): Force to apply the changes made on the loaded network configuration. Force to generate a network from the old one.
:param force (bool): Force to save the network config. It replaces a pre-existing config having the same name.
"""
return self._wrapper._post('/network/operations/saveAs', **{'name': name, 'regenerateOldStyle': regenerateOldStyle, 'force': force})
### Save the current working network config.
@staticmethod
def _network_operations_save(self, name=None, regenerateOldStyle=True, force=True):
"""
Save the current working network config.
:param name (string): The new name given for the current working network config. No need to configure. The current name is used.
:param regenerateOldStyle (bool): No need to configure. The default is used.
:param force (bool): No need to configure. The default is used.
"""
return self._wrapper._post('/network/operations/save', **{'name': name, 'regenerateOldStyle': regenerateOldStyle, 'force': force})
### null
@staticmethod
def _appProfile_operations_search(self, searchString, limit, sort, sortorder):
"""
:param searchString (string): Search application profile name matching the string given.
:param limit (string): The limit of rows to return
:param sort (string): Parameter to sort by.
:param sortorder (string): The sort order (ascending/descending)
:return results (list):
list of object with fields
name (string):
label (string):
createdBy (string):
revision (number):
description (string):
"""
return self._wrapper._post('/appProfile/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Removes a strike from the current working Strike List.([{id: 'bb/c/d'}, {id: 'aa/f/g'}])
@staticmethod
def _strikeList_operations_remove(self, strike):
"""
Removes a strike from the current working Strike List.([{id: 'bb/c/d'}, {id: 'aa/f/g'}])
:param strike (list): The list of strike ids to remove. The strike id is in fact the it's path.
list of object with fields
id (string):
"""
return self._wrapper._post('/strikeList/operations/remove', **{'strike': strike})
### Load an existing Evasion Profile and sets it as the current one.
@staticmethod
def _evasionProfile_operations_load(self, template):
"""
Load an existing Evasion Profile and sets it as the current one.
:param template (string): The name of an Evasion profile template.
"""
return self._wrapper._post('/evasionProfile/operations/load', **{'template': template})
### Creates a new Evasion Profile.
@staticmethod
def _evasionProfile_operations_new(self, template=None):
"""
Creates a new Evasion Profile.
:param template (string): The name should be empty to create a new object.
"""
return self._wrapper._post('/evasionProfile/operations/new', **{'template': template})
### Removes a component from the current working Test Model.
@staticmethod
def _testmodel_operations_remove(self, id):
"""
Removes a component from the current working Test Model.
:param id (string): The component id.
"""
return self._wrapper._post('/testmodel/operations/remove', **{'id': id})
### Exports the result report of a test, identified by its run id and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _reports_operations_exportReport(self, filepath, runid, reportType, sectionIds='', dataType='ALL'):
"""
Exports the result report of a test, identified by its run id and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param filepath (string): The local path where to export the report, including the report name.
:param runid (number): Test RUN ID
:param reportType (string): Report file format to be exported in.
:param sectionIds (string): Chapter Ids. Can be extracted a chapter or many, a sub-chapter or many or the entire report: (sectionIds='6' / sectionIds='5,6,7' / sectionIds='7.4,8.5.2,8.6.3.1' / sectionIds=''(to export the entire report))
:param dataType (string): Report content data type to export. Default value is 'all data'. For tabular only use 'TABLE' and for graphs only use 'CHARTS'.
"""
return self._wrapper._export('/reports/operations/exportReport', **{'filepath': filepath, 'runid': runid, 'reportType': reportType, 'sectionIds': sectionIds, 'dataType': dataType})
### Exports a port capture from a test run.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
@staticmethod
def _topology_operations_exportCapture(self, filepath, args):
"""
Exports a port capture from a test run.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call.
:param filepath (string): The local path where to save the exported object.
:param args (object): Export filters. The Possible values for: 'dir'(direction) are 'tx','rx','both';for 'sizetype' and 'starttype'(units for size and start) are 'megabytes' or 'frames'
object of object with fields
port (number): Port number
slot (number): Slot number
dir (string): Capturing direction (rx, tx, both)
size (number): The size of the capture to be exported.
start (number): Start at point.
sizetype (string): The size unit: megabytes or frames.
starttype (string): The start unit: megabytes or frames.
"""
return self._wrapper._export('/topology/operations/exportCapture', **{'filepath': filepath, 'args': args})
### Returns the report Table of Contents using the test run id.
@staticmethod
def _reports_operations_getReportContents(self, runid, getTableOfContents=True):
"""
Returns the report Table of Contents using the test run id.
:param runid (number): The test run id.
:param getTableOfContents (bool): Boolean value having the default value set on 'True'. To obtain the Table Contents this value should remain on 'True'.
:return results (list):
list of object with fields
Section Name (string):
Section ID (string):
"""
return self._wrapper._post('/reports/operations/getReportContents', **{'runid': runid, 'getTableOfContents': getTableOfContents})
### Returns the section of a report
@staticmethod
def _reports_operations_getReportTable(self, runid, sectionId):
"""
Returns the section of a report
:param runid (number): The test run id.
:param sectionId (string): The section id of the table desired to extract.
:return results (object):
"""
return self._wrapper._post('/reports/operations/getReportTable', **{'runid': runid, 'sectionId': sectionId})
### null
@staticmethod
def _loadProfile_operations_save(self):
return self._wrapper._post('/loadProfile/operations/save', **{})
### Save the active editing LoadProfile under specified name
@staticmethod
def _loadProfile_operations_saveAs(self, name):
"""
Save the active editing LoadProfile under specified name
:param name (string):
"""
return self._wrapper._post('/loadProfile/operations/saveAs', **{'name': name})
### Deletes a specified load profile from the database.
@staticmethod
def _loadProfile_operations_delete(self, name):
"""
Deletes a specified load profile from the database.
:param name (string): The name of the loadProfile object to delete.
"""
return self._wrapper._post('/loadProfile/operations/delete', **{'name': name})
### null
@staticmethod
def _capture_operations_search(self, searchString, limit, sort, sortorder):
"""
:param searchString (string): Search capture name matching the string given.
:param limit (string): The limit of rows to return
:param sort (string): Parameter to sort by.
:param sortorder (string): The sort order (ascending/descending)
:return results (list):
list of object with fields
name (string):
totalPackets (string):
duration (string):
ipv4Packets (string):
ipv6Packets (string):
avgPacketSize (string):
udpPackets (string):
contentType (string):
pcapFilesize (string):
tcpPackets (string):
avgFlowLength (string):
"""
return self._wrapper._post('/capture/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder})
### Load an existing Strike List and sets it as the current one.
@staticmethod
def _strikeList_operations_load(self, template):
"""
Load an existing Strike List and sets it as the current one.
:param template (string): The name of the Strike List template
"""
return self._wrapper._post('/strikeList/operations/load', **{'template': template})
### Creates a new Strike List.
@staticmethod
def _strikeList_operations_new(self, template=None):
"""
Creates a new Strike List.
:param template (string): The name of the template. In this case will be empty.
"""
return self._wrapper._post('/strikeList/operations/new', **{'template': template})
class DataModelMeta(type):
_dataModel = {
'evasionProfile': {
'lockedBy': {
},
'createdBy': {
},
'author': {
},
'name': {
},
'description': {
},
'label': {
},
'StrikeOptions': {
'TCP': {
'DuplicateBadSyn': {
},
'DuplicateBadChecksum': {
},
'SneakAckHandshake': {
},
'AcknowledgeAllSegments': {
},
'DuplicateBadSeq': {
},
'SkipHandshake': {
},
'SourcePort': {
},
'MaxSegmentSize': {
},
'DestinationPort': {
},
'DuplicateBadReset': {
},
'DestinationPortType': {
},
'DuplicateLastSegment': {
},
'DuplicateNullFlags': {
},
'SegmentOrder': {
},
'SourcePortType': {
}
},
'JAVASCRIPT': {
'Obfuscate': {
},
'Encoding': {
}
},
'FTP': {
'PadCommandWhitespace': {
},
'Username': {
},
'FTPEvasionLevel': {
},
'AuthenticationType': {
},
'Password': {
}
},
'IPv6': {
'TC': {
}
},
'DCERPC': {
'MultiContextBindHead': {
},
'MultiContextBind': {
},
'MultiContextBindTail': {
},
'MaxFragmentSize': {
},
'UseObjectID': {
}
},
'RTF': {
'FictitiousCW': {
},
'ASCII_Escaping': {
},
'MixedCase': {
},
'WhiteSpace': {
}
},
'POP3': {
'PadCommandWhitespace': {
},
'Username': {
},
'POP3UseProxyMode': {
},
'AuthenticationType': {
},
'Password': {
}
},
'Variations': {
'Subset': {
},
'Shuffle': {
},
'VariantTesting': {
},
'Limit': {
},
'TestType': {
}
},
'OLE': {
'RefragmentData': {
}
},
'HTML': {
'HTMLUnicodeUTF8EncodingMode': {
},
'HTMLUnicodeUTF8EncodingSize': {
},
'HTMLUnicodeEncoding': {
},
'HTMLUnicodeUTF7EncodingMode': {
}
},
'EMAIL': {
'EnvelopeType': {
},
'ShuffleHeaders': {
},
'To': {
},
'From': {
}
},
'Global': {
'FalsePositives': {
},
'IOTimeout': {
},
'AllowDeprecated': {
},
'BehaviorOnTimeout': {
},
'MaxTimeoutPerStrike': {
},
'CachePoisoning': {
}
},
'MS_Exchange_Ports': {
'SystemAttendant': {
}
},
'PDF': {
'HexEncodeNames': {
},
'ShortFilterNames': {
},
'RandomizeDictKeyOrder': {
},
'Version': {
},
'PreHeaderData': {
}
},
'SNMP': {
'CommunityString': {
}
},
'COMMAND': {
'PadCommandWhitespace': {
},
'PadPathSlashes': {
},
'Malicious': {
}
},
'ICMP': {
'DoEcho': {
}
},
'UDP': {
'DestinationPortType': {
},
'SourcePort': {
},
'SourcePortType': {
},
'DestinationPort': {
}
},
'IP': {
'ReadWriteWindowSize': {
},
'RFC3128FakePort': {
},
'FragEvasion': {
},
'RFC3128': {
},
'TTL': {
},
'MaxReadSize': {
},
'RFC3514': {
},
'FragPolicy': {
},
'MaxFragSize': {
},
'FragOrder': {
},
'TOS': {
},
'IPEvasionsOnBothSides': {
},
'MaxWriteSize': {
}
},
'SMB': {
'Username': {
},
'RandomPipeOffset': {
},
'MaxReadSize': {
},
'MaxWriteSize': {
},
'AuthenticationType': {
},
'Password': {
}
},
'IMAP4': {
'Username': {
},
'IMAPUseProxyMode': {
},
'AuthenticationType': {
},
'Password': {
}
},
'HTTP': {
'ClientChunkedTransferSize': {
},
'EncodeUnicodeBareByte': {
},
'VirtualHostname': {
},
'EncodeUnicodePercentU': {
},
'GetParameterRandomPrepend': {
},
'EncodeSecondNibbleHex': {
},
'EncodeUnicodeInvalid': {
},
'ServerChunkedTransferSize': {
},
'VersionRandomizeCase': {
},
'URIRandomizeCase': {
},
'AuthenticationType': {
},
'ServerCompression': {
},
'VirtualHostnameType': {
},
'URIPrependAltSpaces': {
},
'URIPrependAltSpacesSize': {
},
'EncodeFirstNibbleHex': {
},
'MethodRandomInvalid': {
},
'VersionRandomInvalid': {
},
'ServerChunkedTransfer': {
},
'EncodeDoublePercentHex': {
},
'URIAppendAltSpacesSize': {
},
'EncodeHexRandom': {
},
'DirectorySelfReference': {
},
'EndRequestFakeHTTPHeader': {
},
'EncodeUnicodeAll': {
},
'EncodeUnicodeRandom': {
},
'Base64EncodePOSTData': {
},
'IgnoreHeaders': {
},
'RequestFullURL': {
},
'HTTPTransportMethods': {
},
'Password': {
},
'MethodRandomizeCase': {
},
'MethodURISpaces': {
},
'ShuffleHeaders': {
},
'DirectoryFakeRelative': {
},
'URIAppendAltSpaces': {
},
'MethodURITabs': {
},
'RequireLeadingSlash': {
},
'EncodeDoubleNibbleHex': {
},
'ForwardToBackSlashes': {
},
'PadHTTPPost': {
},
'MethodURINull': {
},
'Username': {
},
'VersionUse0_9': {
},
'EncodeHexAll': {
},
'PostParameterRandomPrepend': {
},
'ClientChunkedTransfer': {
},
'HTTPServerProfile': {
}
},
'SELF': {
'ApplicationPings': {
},
'TraversalVirtualDirectory': {
},
'AppSimUseNewTuple': {
},
'StartingFuzzerOffset': {
},
'URI': {
},
'FileTransferRandCase': {
},
'UnicodeTraversalWindowsDirectory': {
},
'AREA-ID': {
},
'AppSimAppProfile': {
},
'Repetitions': {
},
'FileTransferExtension': {
},
'Password': {
},
'AppSimSmartflow': {
},
'HTMLPadding': {
},
'MaximumIterations': {
},
'FileTransferFile': {
},
'AS-ID': {
},
'AppSimSuperflow': {
},
'EndingFuzzerOffset': {
},
'ReportCLSIDs': {
},
'DelaySeconds': {
},
'Username': {
},
'UnicodeTraversalVirtualDirectory': {
},
'TraversalWindowsDirectory': {
},
'FileTransferName': {
},
'MaximumRuntime': {
},
'ROUTER-ID': {
},
'TraversalRequestFilename': {
}
},
'SHELLCODE': {
'RandomNops': {
}
},
'SSL': {
'ClientCertificateFile': {
},
'EnableOnAllTCP': {
},
'SecurityProtocol': {
},
'DestPortOverride': {
},
'ServerCertificateFile': {
},
'ServerKeyFile': {
},
'EnableOnAllHTTP': {
},
'ClientKeyFile': {
},
'Cipher': {
},
'DisableDefaultStrikeSSL': {
}
},
'SUNRPC': {
'OneFragmentMultipleTCPSegmentsCount': {
},
'RPCFragmentTCPSegmentDistribution': {
},
'TCPFragmentSize': {
},
'NullCredentialPadding': {
}
},
'FILETRANSFER': {
'SmtpEncoding': {
},
'CompressionMethod': {
},
'FtpTransferMethod': {
},
'TransportProtocol': {
},
'Imap4Encoding': {
},
'Pop3Encoding': {
}
},
'UNIX': {
'PadCommandWhitespace': {
},
'PadPathSlashes': {
}
},
'SMTP': {
'SMTPUseProxyMode': {
},
'PadCommandWhitespace': {
},
'ShuffleHeaders': {
}
},
'Ethernet': {
'MTU': {
}
},
'MALWARE': {
'FilenameInsertEnvVar': {
},
'SmtpEncoding': {
},
'CompressionMethod': {
},
'FtpTransferMethod': {
},
'TransportProtocol': {
},
'Imap4Encoding': {
},
'Pop3Encoding': {
}
},
'SIP': {
'EnvelopeType': {
},
'CompactHeaders': {
},
'PadHeadersWhitespace': {
},
'RandomizeCase': {
},
'ShuffleHeaders': {
},
'To': {
},
'From': {
},
'PadHeadersLineBreak': {
}
},
'operations': {
'getStrikeOptions': [{
'name': {
},
'description': {
},
'realtimeGroup': {
},
'label': {
},
'units': {
},
'choice': [{
'name': {
},
'description': {
},
'label': {
}
}]
}]
}
},
'createdOn': {
},
'contentType': {
},
'revision': {
},
'operations': {
'delete': [{
}],
'saveAs': [{
}],
'save': [{
}],
'search': [{
}],
'load': [{
}],
'new': [{
}]
}
},
'reports': {
'endtime': {
},
'starttime': {
},
'label': {
},
'testname': {
},
'network': {
},
'duration': {
},
'result': {
},
'size': {
},
'isPartOfResiliency': {
},
'name': {
},
'iteration': {
},
'testid': {
},
'user': {
},
'operations': {
'delete': [{
}],
'search': [{
}],
'exportReport': [{
}],
'getReportContents': [{
}],
'getReportTable': [{
}]
}
},
'capture': {
'pcapFilesize': {
},
'avgPacketSize': {
},
'author': {
},
'udpPackets': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'name': {
},
'revision': {
},
'duration': {
},
'ipv4Packets': {
},
'ipv6Packets': {
},
'lockedBy': {
},
'tcpPackets': {
},
'createdBy': {
},
'avgFlowLength': {
},
'totalPackets': {
},
'contentType': {
},
'operations': {
'importCapture': [{
}],
'search': [{
}]
}
},
'network': {
'lockedBy': {
},
'createdBy': {
},
'author': {
},
'name': {
},
'interfaceCount': {
},
'description': {
},
'label': {
},
'networkModel': {
'enodeb': [{
'dns': {
},
'plmn': {
},
'psn': {
},
'psn_netmask': {
},
'sctp_over_udp': {
},
'enodebs': [{
'mme_ip_address': {
},
'enodebCount': {
},
'ip_address': {
}
}],
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
},
'sctp_sport': {
}
}],
'ip_router': [{
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
},
'ip_address': {
}
}],
'ip6_router': [{
'hosts_ip_alloc_container': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'ip_address': {
},
'prefix_length': {
}
}],
'ue_info': [{
'imsi_base': {
},
'secret_key_step': {
},
'count': {
},
'operator_variant': {
},
'secret_key': {
},
'imei_base': {
},
'msisdn_base': {
},
'maxmbps_per_ue': {
},
'mobility_session_infos': [{
'id': {
},
'value': {
}
}],
'id': {
}
}],
'ip_ldap_server': [{
'auth_timeout': {
},
'ldap_username_start_tag': {
},
'ldap_user_min': {
},
'ldap_user_count': {
},
'authentication_rate': {
},
'ldap_password_start_tag': {
},
'ldap_user_max': {
},
'id': {
},
'ldap_server_address': {
},
'dn_fixed_val': {
}
}],
'mme_sgw_pgw6': [{
'ue_info': {
},
'max_sessions': {
},
'lease_address': {
},
'dns': {
},
'plmn': {
},
'ip_address': {
},
'sgw_advertised_sgw': {
},
'sgw_advertised_pgw': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'prefix_length': {
}
}],
'mobility_session_info': [{
'password': {
},
'bearers': [{
'qci_label': {
}
}],
'id': {
},
'access_point_name': {
},
'username': {
},
'initiated_dedicated_bearers': {
}
}],
'ggsn6': [{
'lease_address': {
},
'count': {
},
'dns': {
},
'ggsn_advertised_control_ip_address': {
},
'ip_address': {
},
'ggsn_advertised_data_ip_address': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'prefix_length': {
}
}],
'ip_external_hosts': [{
'proxy': {
},
'count': {
},
'id': {
},
'ip_address': {
},
'behind_snapt': {
},
'tags': {
}
}],
'ip_static_hosts': [{
'mpls_list': [{
'id': {
},
'value': {
}
}],
'ip_selection_type': {
},
'count': {
},
'dns': {
},
'psn': {
},
'psn_netmask': {
},
'ip_address': {
},
'tags': {
},
'proxy': {
},
'maxmbps_per_host': {
},
'gateway_ip_address': {
},
'netmask': {
},
'ldap': {
},
'default_container': {
},
'id': {
},
'dns_proxy': {
},
'behind_snapt': {
},
'enable_stats': {
}
}],
'ggsn': [{
'lease_address': {
},
'count': {
},
'dns': {
},
'ggsn_advertised_control_ip_address': {
},
'ip_address': {
},
'ggsn_advertised_data_ip_address': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
}
}],
'interface': [{
'ignore_pause_frames': {
},
'duplicate_mac_address': {
},
'description': {
},
'packet_filter': {
'not_dest_port': {
},
'not_src_ip': {
},
'filter': {
},
'src_ip': {
},
'src_port': {
},
'vlan': {
},
'not_vlan': {
},
'dest_ip': {
},
'not_dest_ip': {
},
'dest_port': {
},
'not_src_port': {
}
},
'impairments': {
'drop': {
},
'corrupt_lt64': {
},
'rate': {
},
'corrupt_lt256': {
},
'corrupt_rand': {
},
'corrupt_chksum': {
},
'corrupt_gt256': {
},
'frack': {
}
},
'mtu': {
},
'vlan_key': {
},
'number': {
},
'use_vnic_mac_address': {
},
'mac_address': {
},
'id': {
}
}],
'ds_lite_b4': [{
'aftr_addr': {
},
'count': {
},
'ip_address': {
},
'host_ip_base_addr': {
},
'ipv6_addr_alloc_mode': {
},
'gateway_ip_address': {
},
'default_container': {
},
'aftr_count': {
},
'hosts_ip_increment': {
},
'id': {
},
'prefix_length': {
},
'host_ip_addr_alloc_mode': {
}
}],
'ue': [{
'allocation_rate': {
},
'mobility_interval_ms': {
},
'ue_info': {
},
'dns': {
},
'mobility_action': {
},
'tags': {
},
'proxy': {
},
'default_container': {
},
'mobility_with_traffic': {
},
'id': {
},
'behind_snapt': {
},
'request_ipv6': {
},
'enable_stats': {
}
}],
'ip_dns_proxy': [{
'dns_proxy_ip_count': {
},
'dns_proxy_src_ip_base': {
},
'id': {
},
'dns_proxy_ip_base': {
},
'dns_proxy_src_ip_count': {
}
}],
'enodeb_mme_sgw6': [{
'dns': {
},
'plmn': {
},
'ip_allocation_mode': {
},
'mme_ip_address': {
},
'pgw_ip_address': {
},
'ue_address': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'prefix_length': {
}
}],
'ip6_dns_proxy': [{
'dns_proxy_ip_count': {
},
'dns_proxy_src_ip_base': {
},
'id': {
},
'dns_proxy_ip_base': {
},
'dns_proxy_src_ip_count': {
}
}],
'vlan': [{
'tpid': {
},
'duplicate_mac_address': {
},
'description': {
},
'mtu': {
},
'outer_vlan': {
},
'inner_vlan': {
},
'mac_address': {
},
'default_container': {
},
'id': {
}
}],
'mme_sgw_pgw': [{
'ue_info': {
},
'max_sessions': {
},
'lease_address': {
},
'dns': {
},
'plmn': {
},
'ip_address': {
},
'sgw_advertised_sgw': {
},
'sgw_advertised_pgw': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
}
}],
'ds_lite_aftr': [{
'count': {
},
'ip_address': {
},
'ipv6_addr_alloc_mode': {
},
'gateway_ip_address': {
},
'default_container': {
},
'b4_count': {
},
'b4_ip_address': {
},
'id': {
},
'prefix_length': {
}
}],
'ipsec_router': [{
'gateway_ip_address': {
},
'netmask': {
},
'ipsec': {
},
'default_container': {
},
'id': {
},
'ip_address': {
},
'ike_peer_ip_address': {
}
}],
'dhcpv6c_req_opts_cfg': [{
'dhcpv6v_req_preference': {
},
'dhcpv6v_req_dns_list': {
},
'dhcpv6v_req_dns_resolvers': {
},
'dhcpv6v_req_server_id': {
},
'id': {
}
}],
'sgsn': [{
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'ggsn_ip_address': {
},
'id': {
},
'ip_address': {
}
}],
'path_advanced': [{
'destination_port_count': {
},
'destination_port_base': {
},
'source_port_base': {
},
'tags': {
},
'enable_external_file': {
},
'source_container': {
},
'source_port_algorithm': {
},
'tuple_limit': {
},
'file': {
},
'destination_port_algorithm': {
},
'destination_container': {
},
'source_port_count': {
},
'xor_bits': {
},
'stream_group': {
},
'id': {
}
}],
'path_basic': [{
'source_container': {
},
'destination_container': {
},
'id': {
}
}],
'enodeb_mme6': [{
'dns': {
},
'plmn': {
},
'ip_allocation_mode': {
},
'enodebs': [{
'gateway_ip_address': {
},
'default_container': {
},
'enodebCount': {
},
'ip_address': {
},
'prefix_length': {
}
}],
'mme_ip_address': {
},
'pgw_ip_address': {
},
'ue_address': {
},
'gateway_ip_address': {
},
'default_container': {
},
'sgw_ip_address': {
},
'id': {
},
'prefix_length': {
}
}],
'pgw': [{
'max_sessions': {
},
'lease_address': {
},
'dns': {
},
'plmn': {
},
'ip_address': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
}
}],
'pgw6': [{
'max_sessions': {
},
'lease_address': {
},
'dns': {
},
'plmn': {
},
'ip_address': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'prefix_length': {
}
}],
'sgsn6': [{
'gateway_ip_address': {
},
'default_container': {
},
'ggsn_ip_address': {
},
'id': {
},
'ip_address': {
},
'prefix_length': {
}
}],
'ip6_static_hosts': [{
'mpls_list': [{
'id': {
},
'value': {
}
}],
'ip_alloc_container': {
},
'ip_selection_type': {
},
'count': {
},
'dns': {
},
'ip_address': {
},
'tags': {
},
'proxy': {
},
'maxmbps_per_host': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'host_ipv6_addr_alloc_mode': {
},
'prefix_length': {
},
'dns_proxy': {
},
'behind_snapt': {
},
'enable_stats': {
}
}],
'plmn': [{
'mnc': {
},
'description': {
},
'id': {
},
'mcc': {
}
}],
'enodeb_mme_sgw': [{
'dns': {
},
'plmn': {
},
'ip_allocation_mode': {
},
'mme_ip_address': {
},
'pgw_ip_address': {
},
'ue_address': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
}
}],
'sgw_pgw': [{
'max_sessions': {
},
'lease_address': {
},
'dns': {
},
'plmn': {
},
'ip_address': {
},
'sgw_advertised_sgw': {
},
'sgw_advertised_pgw': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'id': {
}
}],
'ip6_dhcp_server': [{
'ia_type': {
},
'pool_size': {
},
'ip_address': {
},
'pool_prefix_length': {
},
'offer_lifetime': {
},
'max_lease_time': {
},
'gateway_ip_address': {
},
'default_container': {
},
'pool_base_address': {
},
'default_lease_time': {
},
'pool_dns_address1': {
},
'id': {
},
'prefix_length': {
},
'pool_dns_address2': {
}
}],
'enodeb6': [{
'dns': {
},
'plmn': {
},
'sctp_over_udp': {
},
'enodebs': [{
'mme_ip_address': {
},
'enodebCount': {
},
'ip_address': {
}
}],
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'prefix_length': {
},
'sctp_sport': {
}
}],
'slaac_cfg': [{
'use_rand_addr': {
},
'enable_dad': {
},
'id': {
},
'stateless_dhcpv6c_cfg': {
},
'fallback_ip_address': {
}
}],
'ip6_external_hosts': [{
'proxy': {
},
'count': {
},
'id': {
},
'ip_address': {
},
'behind_snapt': {
},
'tags': {
}
}],
'ip_dns_config': [{
'dns_domain': {
},
'id': {
},
'dns_server_address': {
}
}],
'dhcpv6c_tout_and_retr_cfg': [{
'dhcp6c_inforeq_attempts': {
},
'dhcp6c_initial_rebind_tout': {
},
'dhcp6c_sol_attempts': {
},
'dhcp6c_max_rebind_tout': {
},
'dhcp6c_release_attempts': {
},
'dhcp6c_initial_release_tout': {
},
'dhcp6c_req_attempts': {
},
'dhcp6c_max_req_tout': {
},
'dhcp6c_max_renew_tout': {
},
'dhcp6c_max_sol_tout': {
},
'dhcp6c_initial_req_tout': {
},
'dhcp6c_max_inforeq_tout': {
},
'dhcp6c_initial_sol_tout': {
},
'dhcp6c_initial_renew_tout': {
},
'dhcp6c_initial_inforeq_tout': {
},
'id': {
}
}],
'ip_dhcp_server': [{
'lease_address': {
},
'count': {
},
'dns': {
},
'ip_address': {
},
'gateway_ip_address': {
},
'netmask': {
},
'lease_time': {
},
'default_container': {
},
'id': {
},
'accept_local_requests_only': {
}
}],
'ip6_dns_config': [{
'dns_domain': {
},
'id': {
},
'dns_server_address': {
}
}],
'sgw_pgw6': [{
'max_sessions': {
},
'lease_address': {
},
'dns': {
},
'plmn': {
},
'ip_address': {
},
'sgw_advertised_sgw': {
},
'sgw_advertised_pgw': {
},
'lease_address_v6': {
},
'gateway_ip_address': {
},
'default_container': {
},
'id': {
},
'prefix_length': {
}
}],
'mpls_settings': [{
'mpls_tags': [{
'mpls_ttl': {
},
'mpls_label': {
},
'mpls_exp': {
}
}],
'id': {
}
}],
'ipsec_config': [{
'ike_dh': {
},
'ipsec_lifetime': {
},
'ike_pfs': {
},
'ike_mode': {
},
'ike_1to1': {
},
'nat_traversal': {
},
'xauth_username': {
},
'ike_encr_alg': {
},
'psk': {
},
'dpd_enabled': {
},
'dpd_timeout': {
},
'init_rate': {
},
'setup_timeout': {
},
'esp_encr_alg': {
},
'ike_lifetime': {
},
'ike_version': {
},
'id': {
},
'left_id': {
},
'ike_prf_alg': {
},
'esp_auth_alg': {
},
'dpd_delay': {
},
'xauth_password': {
},
'initial_contact': {
},
'debug_log': {
},
'wildcard_tsr': {
},
'rekey_margin': {
},
'ike_auth_alg': {
},
'right_id': {
},
'max_outstanding': {
},
'retrans_interval': {
},
'enable_xauth': {
}
}],
'dhcpv6c_cfg': [{
'dhcp6c_max_outstanding': {
},
'dhcp6c_duid_type': {
},
'dhcp6c_ia_type': {
},
'dhcp6c_req_opts_config': {
},
'dhcp6c_tout_and_retr_config': {
},
'dhcp6c_renew_timer': {
},
'dhcp6c_ia_t2': {
},
'id': {
},
'dhcp6c_ia_t1': {
},
'dhcp6c_initial_srate': {
}
}],
'sixrd_ce': [{
'sixrd_prefix': {
},
'count': {
},
'dns': {
},
'sixrd_prefix_length': {
},
'ip_address': {
},
'tags': {
},
'br_ip_address': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'hosts_per_ce': {
},
'ip4_mask_length': {
},
'id': {
},
'enable_stats': {
}
}],
'ip_dhcp_hosts': [{
'allocation_rate': {
},
'count': {
},
'tags': {
},
'proxy': {
},
'ldap': {
},
'default_container': {
},
'accept_local_offers_only': {
},
'id': {
},
'behind_snapt': {
},
'dns_proxy': {
},
'enable_stats': {
}
}],
'enodeb_mme': [{
'dns': {
},
'plmn': {
},
'ip_allocation_mode': {
},
'enodebs': [{
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'enodebCount': {
},
'ip_address': {
}
}],
'mme_ip_address': {
},
'pgw_ip_address': {
},
'ue_address': {
},
'gateway_ip_address': {
},
'netmask': {
},
'default_container': {
},
'sgw_ip_address': {
},
'id': {
}
}]
},
'createdOn': {
},
'contentType': {
},
'revision': {
},
'operations': {
'importNetwork': [{
}],
'load': [{
}],
'new': [{
}],
'delete': [{
}],
'search': [{
}],
'list': [{
}],
'saveAs': [{
}],
'save': [{
}]
}
},
'topology': {
'ixoslicensed': {
},
'ixos': {
},
'runningTest': [{
'phase': {
},
'timeRemaining': {
},
'runtime': {
},
'label': {
},
'completed': {
},
'initProgress': {
},
'result': {
},
'port': [{
}],
'capturing': {
},
'progress': {
},
'testid': {
},
'state': {
},
'user': {
},
'currentTest': {
}
}],
'model': {
},
'slot': [{
'port': [{
'owner': {
},
'number': {
},
'note': {
},
'exportProgress': {
},
'reservedBy': {
},
'capturing': {
},
'model': {
},
'id': {
},
'group': {
},
'link': {
},
'state': {
},
'speed': {
}
}],
'mode': {
},
'model': {
},
'state': {
},
'id': {
},
'serialNumber': {
}
}],
'serialNumber': {
},
'operations': {
'unreserve': [{
}],
'setCardMode': [{
}],
'setCardSpeed': [{
}],
'setCardFanout': [{
}],
'setPerfAcc': [{
}],
'stopRun': [{
}],
'run': [{
}],
'addPortNote': [{
}],
'reboot': [{
}],
'reserve': [{
}],
'exportCapture': [{
}]
}
},
'superflow': {
'percentFlows': {
},
'seed': {
},
'hosts': [{
'iface': {
},
'hostname': {
},
'ip': {
'type': {
}
},
'id': {
}
}],
'author': {
},
'estimate_bytes': {
},
'estimate_flows': {
},
'weight': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'revision': {
},
'lockedBy': {
},
'flows': [{
'singleNP': {
},
'name': {
},
'from': {
},
'label': {
},
'id': {
},
'to': {
},
'params': {
},
'flowcount': {
},
'operations': {
'getFlowChoices': [{
'lockedBy': {
},
'createdBy': {
},
'author': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'contentType': {
},
'revision': {
}
}],
'getCannedFlows': [{
}]
}
}],
'generated': {
},
'createdBy': {
},
'percentBandwidth': {
},
'name': {
},
'actions': [{
'flowlabel': {
},
'gotoBlock': {
},
'exflows': {
},
'matchBlock': {
},
'id': {
},
'source': {
},
'label': {
},
'type': {
},
'params': {
},
'flowid': {
},
'actionInfo': [{
'name': {
},
'description': {
},
'realtimeGroup': {
},
'label': {
},
'units': {
},
'choice': [{
'name': {
},
'description': {
},
'label': {
}
}]
}],
'operations': {
'getActionChoices': [{
}],
'getActionInfo': [{
'name': {
},
'description': {
},
'realtimeGroup': {
},
'label': {
},
'units': {
},
'choice': [{
'name': {
},
'description': {
},
'label': {
}
}]
}]
}
}],
'contentType': {
},
'operations': {
'search': [{
}],
'addHost': [{
}],
'removeFlow': [{
}],
'addAction': [{
}],
'saveAs': [{
}],
'save': [{
}],
'load': [{
}],
'new': [{
}],
'delete': [{
}],
'removeAction': [{
}],
'addFlow': [{
}]
}
},
'testmodel': {
'lastrunby': {
},
'summaryInfo': {
'totalSubnets': {
},
'totalMacAddresses': {
},
'totalUniqueStrikes': {
},
'totalUniqueSuperflows': {
},
'requiredMTU': {
}
},
'author': {
},
'lastrun': {
},
'description': {
},
'label': {
},
'sharedComponentSettings': {
'maximumConcurrentFlows': {
'current': {
},
'original': {
},
'content': {
}
},
'totalAttacks': {
'current': {
},
'original': {
},
'content': {
}
},
'totalBandwidth': {
'current': {
},
'original': {
},
'content': {
}
},
'maxFlowCreationRate': {
'current': {
},
'original': {
},
'content': {
}
},
'totalAddresses': {
'current': {
},
'original': {
},
'content': {
}
},
'samplePeriod': {
'current': {
},
'original': {
},
'content': {
}
}
},
'createdOn': {
},
'network': {
},
'revision': {
},
'duration': {
},
'result': {
},
'component': [{
'author': {
},
'originalPreset': {
},
'active': {
},
'originalPresetLabel': {
},
'description': {
},
'label': {
},
'type': {
},
'@type:liveappsim': {
'app': {
'removeUnknownTcpUdp': {
},
'replace_streams': {
},
'removeUnknownSSL': {
},
'streamsPerSuperflow': {
},
'removedns': {
},
'fidelity': {
}
},
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_icw': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'syn_data_padding': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'inflateDeflate': {
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'tputscalefactor': {
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'concurrencyscalefactor': {
},
'delayStart': {
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'sfratescalefactor': {
},
'liveProfile': {
}
},
'@type:layer3advanced': {
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'rate': {
},
'increment': {
},
'type': {
},
'ramptype': {
}
},
'bidirectional': {
},
'enableTCP': {
},
'slowStart': {
},
'Templates': {
'TemplateType': {
}
},
'slowStartFps': {
},
'duration': {
'disable_nd_probes': {
},
'durationTime': {
},
'durationFrames': {
}
},
'enablePerStreamStats': {
},
'tuple_gen_seed': {
},
'payload': {
'data': {
},
'type': {
},
'dataWidth': {
}
},
'advancedUDP': {
'lengthVal': {
},
'lengthField': {
},
'checksumVal': {
},
'checksumField': {
}
},
'delayStart': {
},
'payloadAdvanced': {
'udfMode': {
},
'udfLength': {
},
'udfDataWidth': {
},
'udfOffset': {
}
},
'sizeDist': {
'increment': {
},
'type': {
},
'min': {
},
'rate': {
},
'mixlen2': {
},
'mixweight6': {
},
'mixlen1': {
},
'mixweight7': {
},
'mixlen4': {
},
'mixweight4': {
},
'mixlen3': {
},
'mixweight5': {
},
'mixlen6': {
},
'mixlen5': {
},
'mixlen8': {
},
'mixweight8': {
},
'mixlen7': {
},
'mixweight9': {
},
'mixlen9': {
},
'mixweight2': {
},
'max': {
},
'mixweight3': {
},
'mixweight1': {
},
'mixlen10': {
},
'mixweight10': {
},
'unit': {
}
},
'advancedIPv4': {
'lengthVal': {
},
'optionHeaderField': {
},
'optionHeaderData': {
},
'lengthField': {
},
'checksumVal': {
},
'tos': {
},
'checksumField': {
},
'ttl': {
}
},
'advancedIPv6': {
'flowLabel': {
},
'lengthVal': {
},
'extensionHeaderField': {
},
'lengthField': {
},
'nextHeader': {
},
'trafficClass': {
},
'extensionHeaderData': {
},
'hopLimit': {
}
}
},
'@type:appsim': {
'app': {
'replace_streams': {
},
'streamsPerSuperflow': {
},
'removedns': {
},
'fidelity': {
}
},
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_icw': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'syn_data_padding': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'profile': {
},
'ip': {
'tos': {
},
'ttl': {
}
},
'experimental': {
'tcpSegmentsBurst': {
},
'unify_l4_bufs': {
}
},
'ssl': {
'ssl_client_keylog': {
},
'sslReuseType': {
},
'server_record_len': {
},
'client_record_len': {
},
'ssl_keylog_max_entries': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
}
},
'@type:security_all': {
'maxConcurrAttacks': {
},
'attackRetries': {
},
'maxPacketsPerSecond': {
},
'attackPlan': {
},
'randomSeed': {
},
'delayStart': {
},
'attackProfile': {
},
'attackPlanIterations': {
},
'attackPlanIterationDelay': {
},
'maxAttacksPerSecond': {
}
},
'@type:security_np': {
'attackRetries': {
},
'sessions': {
'max': {
},
'maxPerSecond': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'attackPlan': {
},
'randomSeed': {
},
'delayStart': {
},
'attackProfile': {
},
'attackPlanIterations': {
},
'attackPlanIterationDelay': {
}
},
'@type:layer3': {
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'rate': {
},
'increment': {
},
'type': {
},
'ramptype': {
}
},
'bidirectional': {
},
'randomizeIP': {
},
'enableTCP': {
},
'slowStart': {
},
'Templates': {
'TemplateType': {
}
},
'srcPort': {
},
'slowStartFps': {
},
'duration': {
'disable_nd_probes': {
},
'durationTime': {
},
'durationFrames': {
}
},
'udpSrcPortMode': {
},
'dstPort': {
},
'payload': {
'data': {
},
'type': {
},
'dataWidth': {
}
},
'syncIP': {
},
'addrGenMode': {
},
'maxStreams': {
},
'dstPortMask': {
},
'udpDstPortMode': {
},
'advancedUDP': {
'lengthVal': {
},
'lengthField': {
},
'checksumVal': {
},
'checksumField': {
}
},
'delayStart': {
},
'payloadAdvanced': {
'udfMode': {
},
'udfLength': {
},
'udfDataWidth': {
},
'udfOffset': {
}
},
'sizeDist': {
'increment': {
},
'type': {
},
'min': {
},
'rate': {
},
'mixlen2': {
},
'mixweight6': {
},
'mixlen1': {
},
'mixweight7': {
},
'mixlen4': {
},
'mixweight4': {
},
'mixlen3': {
},
'mixweight5': {
},
'mixlen6': {
},
'mixlen5': {
},
'mixlen8': {
},
'mixweight8': {
},
'mixlen7': {
},
'mixweight9': {
},
'mixlen9': {
},
'mixweight2': {
},
'max': {
},
'mixweight3': {
},
'mixweight1': {
},
'mixlen10': {
},
'mixweight10': {
},
'unit': {
}
},
'advancedIPv4': {
'lengthVal': {
},
'optionHeaderField': {
},
'optionHeaderData': {
},
'lengthField': {
},
'checksumVal': {
},
'tos': {
},
'checksumField': {
},
'ttl': {
}
},
'srcPortMask': {
},
'advancedIPv6': {
'flowLabel': {
},
'lengthVal': {
},
'extensionHeaderField': {
},
'lengthField': {
},
'nextHeader': {
},
'trafficClass': {
},
'extensionHeaderData': {
},
'hopLimit': {
}
}
},
'@type:layer4': {
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_icw': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'syn_data_padding': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'payload': {
'add_timestamp': {
},
'data': {
},
'http_type': {
},
'transport': {
},
'type': {
}
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'packetsPerSession': {
},
'payloadSizeDist': {
'min': {
},
'max': {
},
'type': {
}
},
'dstPortDist': {
'min': {
},
'max': {
},
'type': {
}
}
},
'@type:playback': {
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_icw': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'syn_data_padding': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'modification': {
'startpacket': {
},
'originalport': {
},
'newport': {
},
'replay': {
},
'bpfstring': {
},
'single': {
},
'loopcount': {
},
'endpacket': {
},
'independentflows': {
},
'serveripinjection': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'file': {
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'behavior': {
}
},
'@type:layer2': {
'bidirectional': {
},
'maxStreams': {
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'rate': {
},
'increment': {
},
'type': {
},
'ramptype': {
}
},
'advanced': {
'ethTypeField': {
},
'ethTypeVal': {
}
},
'slowStart': {
},
'slowStartFps': {
},
'duration': {
'disable_nd_probes': {
},
'durationTime': {
},
'durationFrames': {
}
},
'delayStart': {
},
'payloadAdvanced': {
'udfMode': {
},
'udfLength': {
},
'udfDataWidth': {
},
'udfOffset': {
}
},
'sizeDist': {
'increment': {
},
'type': {
},
'min': {
},
'rate': {
},
'mixlen2': {
},
'mixweight6': {
},
'mixlen1': {
},
'mixweight7': {
},
'mixlen4': {
},
'mixweight4': {
},
'mixlen3': {
},
'mixweight5': {
},
'mixlen6': {
},
'mixlen5': {
},
'mixlen8': {
},
'mixweight8': {
},
'mixlen7': {
},
'mixweight9': {
},
'mixlen9': {
},
'mixweight2': {
},
'max': {
},
'mixweight3': {
},
'mixweight1': {
},
'mixlen10': {
},
'mixweight10': {
},
'unit': {
}
},
'payload': {
'data': {
},
'type': {
},
'dataWidth': {
}
}
},
'@type:stackscrambler': {
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_icw': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'syn_data_padding': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'scrambleOptions': {
'maxCorruptions': {
},
'badIPFlags': {
},
'badIPFragOffset': {
},
'badIPLength': {
},
'badUrgentPointer': {
},
'badIPFlowLabel': {
},
'badEthType': {
},
'badTCPOptions': {
},
'badGTPNext': {
},
'handshakeTCP': {
},
'badIPChecksum': {
},
'badSCTPLength': {
},
'badTCPFlags': {
},
'badICMPType': {
},
'badIPTTL': {
},
'badIPProtocol': {
},
'badSCTPFlags': {
},
'badGTPFlags': {
},
'badIPVersion': {
},
'badL4HeaderLength': {
},
'badL4Checksum': {
},
'badIPOptions': {
},
'badSCTPType': {
},
'badSCTPChecksum': {
},
'badGTPNpdu': {
},
'badICMPCode': {
},
'badSCTPVerificationTag': {
},
'badIPTOS': {
},
'badIPTotalLength': {
},
'badGTPLen': {
},
'badGTPType': {
},
'badGTPSeqno': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'prng': {
'seed': {
},
'offset': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'payload': {
'data': {
},
'transport': {
},
'type': {
}
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'payloadSizeDist': {
'min': {
},
'max': {
},
'type': {
}
},
'dstPortDist': {
'min': {
},
'max': {
},
'type': {
}
}
},
'@type:clientsim': {
'app': {
'replace_streams': {
},
'streamsPerSuperflow': {
},
'removedns': {
},
'fidelity': {
}
},
'tcp': {
'disable_ack_piggyback': {
},
'delay_acks': {
},
'mss': {
},
'raw_flags': {
},
'psh_every_segment': {
},
'ecn': {
},
'tcp_window_scale': {
},
'initial_receive_window': {
},
'reset_at_end': {
},
'dynamic_receive_window_size': {
},
'tcp_connect_delay_ms': {
},
'aging_time_data_type': {
},
'tcp_4_way_close': {
},
'shutdown_data': {
},
'tcp_icw': {
},
'tcp_keepalive_timer': {
},
'aging_time': {
},
'add_timestamps': {
},
'retries': {
},
'handshake_data': {
},
'ack_every_n': {
},
'syn_data_padding': {
},
'retry_quantum_ms': {
},
'delay_acks_ms': {
}
},
'rateDist': {
'unit': {
},
'min': {
},
'max': {
},
'unlimited': {
},
'scope': {
},
'type': {
}
},
'sessions': {
'openFast': {
},
'closeFast': {
},
'max': {
},
'allocationOverride': {
},
'targetPerSecond': {
},
'target': {
},
'targetMatches': {
},
'maxPerSecond': {
},
'engine': {
},
'statDetail': {
},
'emphasis': {
},
'maxActive': {
}
},
'loadprofile': {
'name': {
},
'label': {
}
},
'ip': {
'tos': {
},
'ttl': {
}
},
'ssl': {
'ssl_client_keylog': {
},
'sslReuseType': {
},
'server_record_len': {
},
'client_record_len': {
},
'ssl_keylog_max_entries': {
}
},
'ip6': {
'flowlabel': {
},
'traffic_class': {
},
'hop_limit': {
}
},
'srcPortDist': {
'min': {
},
'max': {
},
'type': {
}
},
'rampUpProfile': {
'min': {
},
'max': {
},
'increment': {
},
'interval': {
},
'type': {
}
},
'delayStart': {
},
'rampDist': {
'upBehavior': {
},
'down': {
},
'steadyBehavior': {
},
'downBehavior': {
},
'up': {
},
'synRetryMode': {
},
'steady': {
}
},
'superflow': {
}
},
'createdOn': {
},
'tags': [{
'id': {
},
'type': {
},
'domainId': {
'name': {
},
'iface': {
},
'external': {
}
}
}],
'revision': {
},
'lockedBy': {
},
'createdBy': {
},
'reportResults': {
},
'timeline': {
'timesegment': [{
'label': {
},
'size': {
},
'type': {
}
}]
},
'id': {
},
'contentType': {
},
'operations': {
'getComponentPresetNames': [{
}]
}
}],
'lockedBy': {
},
'createdBy': {
},
'name': {
},
'contentType': {
},
'testComponentTypesDescription': [{
'template': {
},
'name': {
},
'description': {
},
'label': {
},
'type': {
}
}],
'operations': {
'clone': [{
}],
'importModel': [{
}],
'add': [{
}],
'stopRun': [{
}],
'run': [{
}],
'saveAs': [{
}],
'save': [{
}],
'delete': [{
}],
'search': [{
}],
'exportModel': [{
}],
'load': [{
}],
'new': [{
}],
'realTimeStats': [{
}],
'remove': [{
}]
}
},
'administration': {
'atiLicensing': {
'license': [{
'expires': {
},
'issuedBy': {
},
'name': {
},
'boardserialno': {
},
'issued': {
},
'serialno': {
}
}],
'operations': {
'importAtiLicense': [{
}]
}
},
'systemSettings': {
'strikepackUpdate': {
'password': {
},
'interval': {
},
'check': {
},
'username': {
}
},
'author': {
},
'description': {
},
'label': {
},
'guardrailSettings': {
'enableStrictMode': {
},
'testStop': {
},
'testStatusWarning': {
},
'stopOnLinkdown': {
},
'testStartPrevention': {
}
},
'createdOn': {
},
'revision': {
},
'vacuumSettings': {
'vacuumWindowHigh': {
},
'autoVacuum': {
},
'vacuumWindowLow': {
},
'vacuumWindowTZ': {
}
},
'lockedBy': {
},
'createdBy': {
},
'softwareUpdate': {
'password': {
},
'interval': {
},
'check': {
},
'username': {
}
},
'contentType': {
}
},
'userSettings': [{
'name': {
},
'content': {
},
'operations': {
'changeUserSetting': [{
}]
}
}],
'operations': {
'logs': [{
}],
'exportAllTests': [{
}]
}
},
'results': [{
'name': {
},
'content': {
},
'datasetvals': {
},
'operations': {
'getHistoricalSeries': [{
}],
'getGroups': [{
'lockedBy': {
},
'createdBy': {
},
'author': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'contentType': {
},
'revision': {
}
}],
'getHistoricalResultSize': [{
}]
}
}],
'statistics': {
'component': [{
'statNames': [{
'name': {
},
'description': {
},
'realtimeGroup': {
},
'label': {
},
'units': {
},
'choice': [{
'name': {
},
'description': {
},
'label': {
}
}]
}],
'type': {
},
'label': {
}
}]
},
'appProfile': {
'weightType': {
},
'lockedBy': {
},
'createdBy': {
},
'author': {
},
'name': {
},
'superflow': [{
'percentFlows': {
},
'seed': {
},
'author': {
},
'estimate_bytes': {
},
'estimate_flows': {
},
'weight': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'revision': {
},
'lockedBy': {
},
'generated': {
},
'createdBy': {
},
'percentBandwidth': {
},
'name': {
},
'contentType': {
}
}],
'description': {
},
'label': {
},
'createdOn': {
},
'contentType': {
},
'revision': {
},
'operations': {
'delete': [{
}],
'importAppProfile': [{
}],
'recompute': [{
}],
'load': [{
}],
'new': [{
}],
'add': [{
}],
'remove': [{
}],
'exportAppProfile': [{
}],
'saveAs': [{
}],
'save': [{
}],
'search': [{
}]
}
},
'strikes': {
'severity': {
},
'year': {
},
'variants': {
},
'reference': [{
'label': {
},
'type': {
},
'value': {
}
}],
'path': {
},
'protocol': {
},
'fileSize': {
},
'fileExtension': {
},
'name': {
},
'id': {
},
'category': {
},
'keyword': [{
'name': {
}
}],
'direction': {
},
'operations': {
'search': [{
}]
}
},
'loadProfile': {
'phase': [{
'duration': {
},
'phaseId': {
},
'type': {
},
'sessions.max': {
},
'sessions.maxPerSecond': {
},
'rateDist.unit': {
},
'rateDist.min': {
},
'rampDist.steadyBehavior': {
},
'rateDist.type': {
},
'rateDist.scope': {
}
}],
'author': {
},
'regen': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'summaryData': {
'deviceType': {
},
'unknownUdpAppNames': {
},
'unknownSslSuperflowName': {
},
'magicNumber': {
},
'downloadBytesSum': {
},
'version': {
},
'phaseDuration': {
},
'unknownTcpAppNames': {
},
'uploadBytesSum': {
},
'summaryName': {
},
'basisOfRegeneration': {
},
'activeFlowsSum': {
},
'miniSlotDuration': {
},
'unknownSslAppNames': {
},
'dynamicSuperflowName': {
},
'appStat': [{
}],
'startTime': {
},
'endTime': {
},
'dynamicAppNames': {
}
},
'revision': {
},
'lockedBy': {
},
'createdBy': {
},
'name': {
},
'contentType': {
},
'presets': [{
'phase': [{
'duration': {
},
'phaseId': {
},
'type': {
},
'sessions.max': {
},
'sessions.maxPerSecond': {
},
'rateDist.unit': {
},
'rateDist.min': {
},
'rampDist.steadyBehavior': {
},
'rateDist.type': {
},
'rateDist.scope': {
}
}],
'author': {
},
'regen': {
},
'description': {
},
'label': {
},
'createdOn': {
},
'summaryData': {
'deviceType': {
},
'unknownUdpAppNames': {
},
'unknownSslSuperflowName': {
},
'magicNumber': {
},
'downloadBytesSum': {
},
'version': {
},
'phaseDuration': {
},
'unknownTcpAppNames': {
},
'uploadBytesSum': {
},
'summaryName': {
},
'basisOfRegeneration': {
},
'activeFlowsSum': {
},
'miniSlotDuration': {
},
'unknownSslAppNames': {
},
'dynamicSuperflowName': {
},
'appStat': [{
}],
'startTime': {
},
'endTime': {
},
'dynamicAppNames': {
}
},
'revision': {
},
'lockedBy': {
},
'createdBy': {
},
'name': {
},
'contentType': {
}
}],
'operations': {
'load': [{
}],
'createNewCustom': [{
}],
'save': [{
}],
'saveAs': [{
}],
'delete': [{
}]
}
},
'strikeList': {
'author': {
},
'description': {
},
'label': {
},
'queryString': {
},
'createdOn': {
},
'revision': {
},
'lockedBy': {
},
'createdBy': {
},
'name': {
},
'contentType': {
},
'numStrikes': {
},
'strikes': [{
'severity': {
},
'year': {
},
'variants': {
},
'reference': [{
'label': {
},
'type': {
},
'value': {
}
}],
'path': {
},
'protocol': {
},
'fileSize': {
},
'fileExtension': {
},
'name': {
},
'id': {
},
'category': {
},
'keyword': [{
'name': {
}
}],
'direction': {
}
}],
'operations': {
'add': [{
}],
'saveAs': [{
}],
'save': [{
}],
'search': [{
}],
'exportStrikeList': [{
}],
'delete': [{
}],
'importStrikeList': [{
}],
'remove': [{
}],
'load': [{
}],
'new': [{
}]
}
}
}
@staticmethod
def _get_from_model(path):
model_data = DataModelMeta._dataModel
model_path = ""
for path_part in path.split('/'):
if len(path_part) == 0: continue
if isinstance(model_data, list):
model_data = model_data[0]
continue
if path_part not in model_data: return (None, None)
model_data = model_data[path_part]
model_path = model_path + "/" + path_part
return (model_path, model_data)
@staticmethod
def _decorate_model_object_operations(data_model, data_model_path, obj):
if 'operations' not in data_model:
return
for operation in data_model['operations']:
if obj.__full_path__().replace("/", "") == '':
continue
method_name = data_model_path.replace("/", "_") + '_operations_' + operation
setattr(obj, operation, obj._wrapper.__getattribute__(method_name).__get__(obj))
setattr(getattr(obj, operation).__func__, '__name__', operation)
@staticmethod
def _decorate_model_object(obj):
obj_name = obj._name
(data_model_path, data_model) = DataModelMeta._get_from_model(obj.__data_model_path__())
if data_model is None:
return obj
if isinstance(data_model, list):
setattr(obj, '_getitem_', lambda x: DataModelProxy(wrapper=obj._wrapper, name=str(x), path=obj.__full_path__(), model_path=obj.__data_model_path__()))
if data_model_path.endswith(obj_name):
DataModelMeta._decorate_model_object_operations(data_model[0], data_model_path, obj)
return obj
else:
data_model = data_model[0]
DataModelMeta._decorate_model_object_operations(data_model, data_model_path, obj)
for key in data_model:
if key.startswith("@") or key == 'operations':
continue
setattr(obj, key, DataModelProxy(wrapper=obj._wrapper, name=key, path=obj.__full_path__(), model_path=obj.__data_model_path__()))
if obj_name not in data_model:
for key in data_model:
if not key.startswith("@") or ":" not in key:
continue
[fieldName, fieldValue] = key.split(":")
fieldName = fieldName.replace("@", "")
try:
if obj.__cached_get__(fieldName) != fieldValue:
continue
except:
continue
for extField in data_model[key]:
ext_path = obj.__full_path__()
ext_dm_path = obj.__data_model_path__() + "/" + key
setattr(obj, extField, DataModelProxy(wrapper=obj._wrapper, name=extField, path=ext_path, model_path=ext_dm_path))
return obj
def __call__(cls, *args, **kwds):
return DataModelMeta._decorate_model_object(type.__call__(cls, *args, **kwds))
class DataModelProxy(object):
__metaclass__ = DataModelMeta
def __init__(self, wrapper, name, path='', model_path=None):
self.__cache = {}
self._wrapper = wrapper
self._name = name
self._path = path
if model_path is None:
self._model_path = self._path
else:
self._model_path = model_path
def __full_path__(self):
return '%s/%s' % (self._path, self._name)
def __data_model_path__(self):
return '%s/%s' % (self._model_path, self._name)
def __url__(self):
return 'https://%s/bps/api/v2/core%s' % (self._wrapper.host, self.__full_path__())
def __repr__(self):
return 'proxy object for \'%s\' ' % (self.__url__())
def __getitem__(self, item):
if type(item) == int: item = '{%s}'%item
return self._getitem_(item)
def get(self, responseDepth=None, **kwargs):
return self._wrapper._get(self._path+'/'+self._name, responseDepth, **kwargs)
def __cached_get__(self, field):
if field not in self.__cache: self.__cache[field] = self._wrapper._get(self.__data_model_path__()+"/"+field)
return self.__cache[field]
def patch(self, value):
return self._wrapper._patch(self._path+'/'+self._name, value)
def set(self, value):
return self.patch(value)
def put(self, value):
return self._wrapper._put(self._path+'/'+self._name, value)
def delete(self):
return self._wrapper._delete(self._path+'/'+self._name)
def help(self):
doc_data = self._wrapper._options(self._path+'/'+self._name)
if doc_data and 'custom' in doc_data:
doc_data = doc_data['custom']
if doc_data and 'description' in doc_data:
bps_api_log.info(doc_data['description'])
|
nilq/baby-python
|
python
|
from module import foo, bar
from module import foo, \
bar, \
baz
from module import (foo, bar)
from module import (foo,
bar,
baz)
|
nilq/baby-python
|
python
|
from jsonrpcserver.sentinels import Sentinel
def test_Sentinel():
assert repr(Sentinel("foo")) == "<foo>"
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.core.management.base import BaseCommand
from reviews_manager.models import ReviewsComparison
import logging
logger = logging.getLogger('promort_commands')
class Command(BaseCommand):
help = 'check third reviewer\'s worklist and fix it if necessary'
def add_arguments(self, parser):
parser.add_argument('--keep_reviews', action='store_true',
help='Keep reviews performed by third reviewer even if not necessary')
def _get_review_comparisons(self):
return ReviewsComparison.objects.filter(positive_match=False, positive_quality_control=True)
def _delete_review(self, clinical_annotation):
if len(clinical_annotation.steps.all()) == 0:
clinical_annotation.delete()
logger.info('Clinical annotation %s deleted', clinical_annotation.label)
def _delete_gs_review_step(self, clinical_annotation_step):
fr_ann = clinical_annotation_step.focus_region_annotations.all()
logger.info('Deleting %d focus region annotations', len(fr_ann))
fr_ann.delete()
c_ann = clinical_annotation_step.core_annotations.all()
logger.info('Deleting %d core annotations', len(c_ann))
c_ann.delete()
s_ann = clinical_annotation_step.slice_annotations.all()
logger.info('Deleting %d slice annotations', len(s_ann))
s_ann.delete()
c_ann = clinical_annotation_step.clinical_annotation
clinical_annotation_step.delete()
logger.info('Clinical annotation step %s deleted', clinical_annotation_step.label)
self._delete_review(c_ann)
def _check_and_fix(self, rc_object, keep_review):
if not rc_object.review_1.rois_review_step.is_positive():
logger.info('### ReviewComparison object %d --- NEED TO FIX! ###', rc_object.id)
if rc_object.review_3 is not None and not keep_review:
r3_obj = rc_object.review_3
logger.info('-- Clearing reviews step %s --', r3_obj.label)
# unlink to prevent delete protection error
rc_object.review_3 = None
rc_object.save()
# delete clinical annotation step
self._delete_gs_review_step(r3_obj)
rc_object.positive_match = True
logger.info('Setting RC object positive_match to True')
rc_object.save()
def handle(self, *args, **opts):
logger.info('Collecting ReviewsComparison objects')
r_comp = self._get_review_comparisons()
logger.info('Retrieved %d objects', len(r_comp))
for rc in r_comp:
self._check_and_fix(rc, opts['keep_reviews'])
|
nilq/baby-python
|
python
|
from numpy import zeros
from sklearn.tree import _tree
def _interpret_tree(tree, X, n_labels):
# Tree preprocessing allowing down-top search
parents = [-1 for _ in range(tree.node_count)]
to_pursue = [0]
while len(to_pursue):
node_i = to_pursue.pop()
child_l = tree.children_left[node_i]
if child_l != _tree.TREE_LEAF:
parents[child_l] = node_i
child_r = tree.children_right[node_i]
parents[child_r] = node_i
to_pursue.append(child_l)
to_pursue.append(child_r)
# Values normalization -> probas
values = tree.value.squeeze(axis=1)
values /= values.sum(axis=1)[:, np.newaxis]
n_features = len(X[0])
f_contribs = [ zeros( (1, n_labels) ) for _ in range(n_features) ]
biases = zeros( (1, n_labels) )
f_indices = list(tree.feature)
# For each sample to test, we check in which leaf it lands
leaves = tree.apply(X)
leaves_value = {}
for leaf in leaves:
if leaf not in leaves_value:
l_contribs = [ zeros( (1, n_labels) ) for _ in range(n_features) ]
cur_node = leaf
while cur_node != -1:
par_node = parents[cur_node]
if par_node >= 0:
resp_feature = f_indices[par_node]
l_contribs[resp_feature] += (values[cur_node] - values[par_node])
cur_node = par_node
leaves_value[leaf] = l_contribs, values[leaf]
l_contribs, l_bias = leaves_value[leaf]
f_contribs = [f_i + c_i for f_i, c_i in zip(f_contribs, l_contribs) ]
biases += l_bias
f_contribs = [i/len(X) for i in f_contribs]
biases /= len(X)
return f_contribs, biases
def interpret_forest(forest, X, n_labels):
f_contribs = [ zeros( (1, n_labels) ) for _ in range(len(X[0])) ]
f_biases = 0
for tree in map(lambda x: x.tree_, forest.estimators_):
t_contribs, t_biases = _interpret_tree(tree, X, n_labels)
f_contribs = [x + y/forest.n_estimators for x, y in zip(f_contribs, t_contribs)]
f_biases += t_biases/forest.n_estimators
return f_contribs, f_biases
|
nilq/baby-python
|
python
|
from behavioral.interpreter.logic.tokens.token_type import TokenType
class Token:
def __init__(self, token_type: TokenType, text: str) -> None:
self.type = token_type
self.text = text
def __repr__(self) -> str:
return f"Token '{self.type.name}' with value '{self.text}'"
|
nilq/baby-python
|
python
|
import pytest
from .fixtures import *
@pytest.mark.parametrize(["num_partitions", "rows"], [(7, 30), (3, 125), (27, 36)])
def test_update_table(num_partitions, rows, store):
fixtures = UpdateFixtures(rows)
original_df = fixtures.make_df()
update_df = fixtures.generate_update_values()
partition_size = get_partition_size(original_df, num_partitions)
table = store.select_table(TABLE_NAME)
table.write(original_df, partition_size=partition_size)
partition_names = table._partition_data.keys()
partition_data = table._partition_data.read()
# Act
table.update(update_df)
# Assert
_assert_that_partitons_are_the_same(table, partition_names, partition_data)
def _assert_that_partitons_are_the_same(table, partition_names, partition_data):
# Check that partitions keep the same structure after update
df = table.read_arrow(TABLE_NAME)
index = df['index']
for partition, partition_name in zip(index.chunks, partition_names):
metadata = partition_data[partition_name]
index_start = partition[0].as_py()
index_end = partition[-1].as_py()
num_rows = len(partition)
assert index_start == metadata['min']
assert index_end == metadata['max']
assert num_rows == metadata['num_rows']
def test_update_table(store):
# Arrange
fixtures = UpdateFixtures()
original_df = fixtures.make_df()
update_df = fixtures.generate_update_values()
expected = fixtures.update_table(update_df)
partition_size = get_partition_size(original_df, NUMBER_OF_PARTITIONS)
table = store.select_table(TABLE_NAME)
table.write(original_df, partition_size=partition_size)
# Act
table.update(update_df)
# Assert
df = store.read_pandas(TABLE_NAME)
assert df.equals(expected)
assert not df.equals(original_df)
@pytest.mark.parametrize(["index", "rows"],
[(None, [10, 13, 14, 21]),
(hardcoded_string_index, ["row00010", "row00013",
"row00014", "row00021"]),
(hardcoded_datetime_index, ["2021-01-01", "2021-01-16",
"2021-01-07"])
]
)
def test_update_table_with_pandas_series(index, rows, store):
# Arrange
fixtures = UpdateFixtures(index=index, update_rows=rows, update_cols=['c0'])
original_df = fixtures.make_df()
update_series = fixtures.generate_update_values(cols=1)
expected = fixtures.update_table(update_series)
table = store.select_table(TABLE_NAME)
table.write(original_df)
# Act
table.update(update_series)
# Assert
df = store.read_pandas(TABLE_NAME)
assert df.equals(expected)
assert not df.equals(original_df)
class UpdateFixtures:
def __init__(self, rows=30, index=None, update_rows=(10, 13, 14, 21),
update_cols=('c2', 'c0')):
self.rows = rows
self.index = index
self.update_rows = update_rows
self.update_cols = update_cols
def make_df(self, cols=5):
self.df = make_table(index=self.index, rows=self.rows, cols=cols, astype="pandas")
self.df.index.name = 'index'
return self.df
def generate_update_values(self, cols=5, as_series=False):
update_values = make_table(index=self.index, rows=self.rows, cols=cols, astype='pandas')
update_values = update_values.loc[self.update_rows, self.update_cols]
if as_series:
update_values = update_values.squeeze()
return update_values
def update_table(self, values):
expected = self.df.copy()
expected.loc[self.update_rows, self.update_cols] = values
return expected
def _wrong_index_dtype():
df = make_table(sorted_datetime_index, astype="pandas")
return df
def _wrong_index_values():
df = make_table(astype="pandas")
df = df.head(5)
df.index = [2, 5, 7, 10, 459]
return df
def _duplicate_index_values():
df = make_table(astype="pandas")
df = df.head(5)
df.index = [2, 5, 7, 10, 10]
return df
def _wrong_column_dtype():
df = make_table(sorted_string_index, cols=1, astype="pandas")
df = df.reset_index()
df.columns = ['c1', 'c2']
df = df.head(5)
return df
def _wrong_column_names():
df = make_table(cols=2, astype="pandas")
df = df.head(5)
df.columns = ['c1', 'non-existant_column']
return df
def _duplicate_column_names():
df = make_table(cols=2, astype="pandas")
df = df.head(5)
df.columns = ['c2', 'c2']
return df
@pytest.mark.parametrize(
("update_df", "exception"),
[
(_wrong_index_dtype(), TypeError),
(_wrong_index_values(), ValueError),
(_duplicate_index_values(), IndexError),
(_wrong_column_dtype(), TypeError),
(_wrong_column_names(), IndexError),
(_duplicate_column_names(), IndexError),
],
ids=[
"_wrong_index_dtype",
"_wrong_index_values",
"_duplicate_index_values",
"_wrong_column_dtype",
"_wrong_column_names",
"_duplicate_column_names",
],
)
def test_can_update_table(update_df, exception, store):
# Arrange
original_df = make_table(cols=5, astype='pandas')
store.write_table(TABLE_NAME, original_df)
table = store.select_table(TABLE_NAME)
# Act
with pytest.raises(exception) as e:
table.update(update_df)
# Assert
assert isinstance(e.type(), exception)
|
nilq/baby-python
|
python
|
"""Ghana specific form helpers."""
from django.forms.fields import Select
from .gh_regions import REGIONS
class GHRegionSelect(Select):
"""
A Select widget with option to select a region from
list of all regions of Ghana.
"""
def __init__(self, attrs=None):
super().__init__(attrs, choices=REGIONS)
|
nilq/baby-python
|
python
|
from django.conf import settings
def pytest_configure():
settings.configure(INSTALLED_APPS=["geoipdb_loader"])
|
nilq/baby-python
|
python
|
import datetime
from typing import Any, Optional
from googleapiclient.discovery import build
from jarvis.plugins.auth.google_auth import GoogleAuth
from .config import GoogleCalendar
class GoogleCalendar:
def __init__(self, calendar_id: Optional[str] = None) -> None:
self.calendars: dict = GoogleCalendar.calendars
self.calendar_service: Any = build('calendar', 'v3', credentials=GoogleAuth().creds)
self.current_calendar: dict = {calendar_id: self.calendars[calendar_id]} if calendar_id is not None else self.calendars
self.events: dict = {}
def list_events(self, min_time: Optional[str] = datetime.datetime.utcnow().isoformat() + 'Z', max_results: Optional[int] = 10) -> None:
"""Calendar API List Events
"""
for index, cal in self.current_calendar.items():
events_result = self.calendar_service.events().list(
calendarId=cal,
timeMin=min_time,
maxResults=max_results, singleEvents=True,
orderBy='startTime').execute()
tmp_events = events_result.get('items', [])
self.events = self.events[index] = tmp_events
|
nilq/baby-python
|
python
|
import sys
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
# TODO: fix it
sys.path.append("./")
from calculus_of_variations import MultidimensionalSolver
from web_interface.utils import (
dash_multidimensional_answer,
dash_multidimensional_problem,
get_argparse,
)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(
[
dcc.Markdown("# Multidimensional problem"),
dcc.Markdown("### Input"),
html.Div(
[
dcc.Markdown("Enter **L**:"),
dcc.Input(id="L", value="x1_diff ** 2 + x2_diff ** 2", type="text"),
]
),
html.Br(),
html.Div(
[dcc.Markdown("Enter **t0**:"), dcc.Input(id="t0", value="0", type="text")]
),
html.Br(),
html.Div(
[dcc.Markdown("Enter **t1**:"), dcc.Input(id="t1", value="1", type="text")]
),
html.Br(),
html.Div(
[
dcc.Markdown("Enter **x1_0**:"),
dcc.Input(id="x1_0", value="0", type="text"),
]
),
html.Br(),
html.Div(
[
dcc.Markdown("Enter **x1_1**:"),
dcc.Input(id="x1_1", value="1", type="text"),
]
),
html.Br(),
html.Div(
[
dcc.Markdown("Enter **x2_0**:"),
dcc.Input(id="x2_0", value="0", type="text"),
]
),
html.Br(),
html.Div(
[
dcc.Markdown("Enter **x2_1**:"),
dcc.Input(id="x2_1", value="1", type="text"),
]
),
html.Br(),
html.Button("solve", id="solve"),
html.Br(),
html.Div(id="input"),
]
)
@app.callback(
Output(component_id="input", component_property="children"),
[Input("solve", "n_clicks")],
[
State("L", "value"),
State("t0", "value"),
State("t1", "value"),
State("x1_0", "value"),
State("x1_1", "value"),
State("x2_0", "value"),
State("x2_1", "value"),
],
)
def update_output(
n_clicks, L: str, t0: str, t1: str, x1_0: str, x1_1: str, x2_0: str, x2_1: str
):
# click "solve"
if n_clicks is None:
return
try:
solver = MultidimensionalSolver(
L=L, t0=t0, t1=t1, x1_0=x1_0, x1_1=x1_1, x2_0=x2_0, x2_1=x2_1
)
solver.solve()
except:
to_return = html.Div(dcc.Markdown("### Something went wrong :("))
else:
to_return = html.Div(
[
dcc.Markdown("### Problem"),
dash_multidimensional_problem(solver=solver),
dcc.Markdown("### Answer"),
dash_multidimensional_answer(solver=solver),
]
)
return to_return
if __name__ == "__main__":
# argparse
parser = get_argparse()
args = parser.parse_args()
# run server
app.run_server(host=args.host, port=args.port, debug=args.debug)
|
nilq/baby-python
|
python
|
from datetime import date
from nose.tools import eq_
from nose.plugins.attrib import attr
from allmychanges.crawler import (
_filter_changelog_files,
_extract_version, _parse_item,
_extract_date)
from allmychanges.utils import get_markup_type, get_change_type
from allmychanges.downloaders.utils import normalize_url
def test_changelog_finder():
in_ = [
'./release.sh',
'./HISTORY.rst',
'./docs/RELEASE_NOTES.TXT',
'./docs/releases.rst',
'./kiva/agg/freetype2/docs/release',
'./seed/commands/release.py',
'./doc/source/manual/AppReleaseNotes.rst',
'./src/robotide/application/releasenotes.py',
'./scripts/make-release.py',
'./pypi_release.sh',
'./doc/release.rst',
'./release-process.txt',
'./docs/release_notes/v0.9.15.rst',
'./release.sh',
'./.travis-release-requirements.txt',
'./mkrelease.sh',
'./README.rst',
]
out = [
'./HISTORY.rst',
'./docs/RELEASE_NOTES.TXT',
'./docs/releases.rst',
'./doc/source/manual/AppReleaseNotes.rst',
'./doc/release.rst',
'./release-process.txt',
'./docs/release_notes/v0.9.15.rst',
'./.travis-release-requirements.txt',
'./README.rst',
]
eq_(out, list(_filter_changelog_files(in_)))
def test_extract_version():
def check(v, text=None):
if text:
eq_(v, _extract_version(text))
else:
eq_(v, _extract_version(v))
eq_(v, _extract_version('v' + v))
check(v, '{0} (2013-09-24)'.format(v))
check(v, '{0} (2013.09.24)'.format(v))
check(v, '**{0} (2014-05-16)**'.format(v))
check(v, '**{0} (2014.05.16)**'.format(v))
eq_(v, _extract_version('New version {0}'.format(v)))
eq_(v, _extract_version('New version v{0}'.format(v)))
eq_(v, _extract_version('2015-03-12 {0}'.format(v)))
eq_(v, _extract_version('2015-03-12 v{0}'.format(v)))
eq_(v, _extract_version('2015-03-12 ({0})'.format(v)))
eq_(v, _extract_version('2015-03-12 (v{0})'.format(v)))
# from https://app-updates.agilebits.com/product_history/OPI4
check('5.3.BETA-22')
# from http://spark.apache.org/releases/spark-release-1-3-0.html
check(None, 'Upgrading to Spark 1.3')
# https://archive.apache.org/dist/kafka/0.8.0/RELEASE_NOTES.html
check('0.8.0', u'dist/kafka/0.8.0/RELEASE_NOTES.html')
# https://github.com/numpy/numpy/tree/master/doc/release
check('1.3.0', u'doc/release/1.3.0-notes.rst')
# https://github.com/git/git/blob/master/Documentation/RelNotes/2.3.2.txt
check(None, u'Fixes since v2.3.1')
# this should work because we'll remove stop-words
# like "release notes" and "for"
check('3.0', u'Release Notes for MongoDB 3.0')
# don't consider this a version
# from https://bitbucket.org/cthedot/cssutils/src/d572ac8df6bd18cad203dea1bbf58867ff0d0ebe/docs/html/_sources/CHANGELOG.txt
check(None, '0.3.x')
# from https://github.com/meteor/meteor/blob/devel/History.md#v1032-2015-feb-25
check('1.0.3.2', 'v.1.0.3.2, 2015-Feb-25')
# from https://itunes.apple.com/ru/app/chrome-web-browser-by-google/id535886823?l=en&mt=8
check('40.0.2214.73')
check('05.10.2014.73')
check('3.05.10.2014')
# # from https://github.com/inliniac/suricata/blob/master/ChangeLog
check('2.0.1rc1')
check('2.0beta2')
# from https://github.com/textmate/textmate/blob/master/Applications/TextMate/about/Changes.md
check('2.0-beta.6.7', '2015-01-19 (v2.0-beta.6.7)')
# # from https://github.com/ansible/ansible/blob/devel/CHANGELOG.md
check('1.6.8', '1.6.8 "And the Cradle Will Rock" - Jul 22, 2014')
check('0.2.1')
# this horror is from the https://github.com/Test-More/TB2/blob/master/Changes
check('1.005000_003')
check('1.005000_003', '1.005000_003 Thu Mar 22 17:48:08 GMT 2012')
check('3.0.0-pre', 'v3.0.0-pre (wip)')
check('1.0.12')
check('2.0.0-beta.1')
check(None, 'Just a text with some 1 33 nubers')
check('1.0')
check('0.10.2')
check('2.0.0')
check('1.5.6')
check('0.1.1', 'release-notes/0.1.1.md')
check('1.3', 'doc/go1.3.html')
check(None, ' some number in the item\'s text 0.1')
check(None, 'This is the first version compatible with Django 1.7.')
# this text is too long
check(None, 'SWIG 3.0 required for programs that use SWIG library')
check(None, 'HTTP/1.1 302 Found')
check(None, '<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>')
def test_parse_item():
eq_((True, 0, 'Feature #1155: Log packet payloads in eve alerts'),
_parse_item('Feature #1155: Log packet payloads in eve alerts'))
eq_((False, 0, None),
_parse_item('Some very long feature: doing blah'))
eq_((False, 0, None), _parse_item('Blah minor'))
eq_((False, 2, 'Blah minor'), _parse_item(' Blah minor'))
eq_((True, 2, 'Blah minor'), _parse_item('- Blah minor'))
eq_((True, 3, 'Blah minor'), _parse_item(' - Blah minor'))
eq_((True, 5, 'Blah minor'), _parse_item(' - Blah minor'))
eq_((True, 5, 'Blah minor'), _parse_item(' * Blah minor'))
eq_((True, 5, 'Damn Nginx'), _parse_item(' *) Damn Nginx'))
def test_extract_date():
# from https://github.com/lepture/mistune/blob/master/CHANGES.rst
eq_(date(2014, 12, 5), _extract_date('Released on Dec. 5, 2014.'))
eq_(date(2014, 10, 31), _extract_date('31/10/2014'))
eq_(date(2013, 3, 13), _extract_date('13th March 2013'))
eq_(date(2014, 11, 3), _extract_date('3rd November 2014'))
eq_(date(2013, 2, 22), _extract_date('22nd Feb 2013'))
eq_(None, _extract_date(''))
eq_(None, _extract_date('ejwkjw kjjwk 20'))
eq_(None, _extract_date('2009 thouth 15 fne 04'))
eq_(None, _extract_date('11'))
eq_(None, _extract_date('12.2009'))
eq_(None, _extract_date('4.2-3252'))
eq_(None, _extract_date('2009-05/23'))
# https://github.com/lodash/lodash/wiki/Changelog#aug-17-2012--diff--docs
eq_(date(2012, 8, 17), _extract_date('Aug. 17, 2012'))
eq_(date(2009, 5, 23), _extract_date('2009-05-23'))
eq_(date(2009, 5, 23), _extract_date('2009-5-23'))
eq_(date(2009, 5, 3), _extract_date('2009-05-03'))
eq_(date(2014, 5, 17), _extract_date('2014/05/17'))
eq_(date(2009, 5, 23), _extract_date('05-23-2009'))
eq_(date(2009, 5, 23), _extract_date('05.23.2009'))
eq_(date(2009, 5, 23), _extract_date('23.05.2009'))
eq_(date(2013, 3, 31), _extract_date('1.2.0 (2013-03-31)'))
eq_(date(2009, 5, 23), _extract_date('(2009-05-23)'))
eq_(date(2009, 5, 23), _extract_date('v 1.0.0 (2009-05-23)'))
eq_(date(2014, 5, 16), _extract_date('**1.5.6 (2014-05-16)**'))
eq_(date(2009, 5, 23), _extract_date('in a far far 2009-05-23 there were star wars'))
eq_(date(2009, 5, 23), _extract_date('in a far far 23-05-2009 there were star wars'))
eq_(date(2009, 5, 23), _extract_date('in a far far 23.05.2009 there were star wars'))
# this variant is from Nginx's changelog
eq_(date(2014, 4, 24), _extract_date(' 24 Apr 2014'))
eq_(date(2014, 4, 28), _extract_date('April 28, 2014')) # from django
# these two are from python's click
eq_(date(2014, 5, 23), _extract_date('(bugfix release, released on May 23rd 2014)'))
eq_(date(2014, 5, 21), _extract_date('(no codename, released on May 21st 2014)'))
eq_(date(2014, 8, 13), _extract_date('August 13th 2014'))
# like click's but from handlebars.js
eq_(date(2014, 9, 1), _extract_date('September 1st, 2014'))
# and this one from https://enterprise.github.com/releases
eq_(date(2012, 2, 9), _extract_date('February 9, 2012'))
eq_(date(2014, 9, 2), _extract_date('September 2, 2014'))
# from https://github.com/ingydotnet/boolean-pm/blob/master/Changes
# https://github.com/miyagawa/Perlbal-Plugin-PSGI/blob/master/Changes
eq_(date(2014, 8, 8), _extract_date('Fri Aug 8 19:12:51 PDT 2014'))
# from https://github.com/tadam/Test-Mock-LWP-Dispatch/blob/master/Changes
eq_(date(2013, 5, 28), _extract_date('Tue May 28, 2013'))
eq_(date(2013, 4, 1), _extract_date('Mon Apr 01, 2013'))
eq_(date(2013, 3, 29), _extract_date('Fri Mar 29, 2013'))
# from https://github.com/alex/django-taggit/blob/develop/CHANGELOG.txt
# we consider that first number is a month
# all dates which use day in first position, should be normalized
# by sed expressions
eq_(date(2014, 10, 8), _extract_date('10.08.2014'))
def test_url_normalization():
eq_(('https://github.com/lodash/lodash/wiki/Changelog', None, None),
normalize_url('https://github.com/lodash/lodash/wiki/Changelog'))
eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'),
normalize_url('https://github.com/svetlyak40wt/blah'))
eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'),
normalize_url('https://github.com/svetlyak40wt/blah/'))
eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'),
normalize_url('https://github.com/svetlyak40wt/blah.git'))
eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'),
normalize_url('http://github.com/svetlyak40wt/blah'))
eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'),
normalize_url('git@github.com:svetlyak40wt/blah.git'))
eq_(('https://some-server.com/repo', None, 'repo'),
normalize_url('git+https://some-server.com/repo'))
eq_(('https://github.com/sass/sass', 'sass', 'sass'),
normalize_url('git@github.com:sass/sass.git', for_checkout=False))
eq_(('https://github.com/sass/sass', 'sass', 'sass'),
normalize_url('https://github.com/sass/sass/releases', for_checkout=False))
def test_get_markup_type():
eq_('markdown', get_markup_type('README.MD'))
eq_('markdown', get_markup_type('README.md'))
eq_('markdown', get_markup_type('readme.mD'))
eq_('markdown', get_markup_type('readme.txt.md'))
eq_('markdown', get_markup_type('readme.markdown'))
eq_('markdown', get_markup_type('readme.MARKDOWN'))
eq_('markdown', get_markup_type('readme.mdown'))
eq_('rest', get_markup_type('README.RST'))
eq_('rest', get_markup_type('README.rst'))
eq_('rest', get_markup_type('README.rSt'))
eq_('rest', get_markup_type('readme.txt.rst'))
eq_(None, get_markup_type('README'))
eq_(None, get_markup_type('readme.rd'))
eq_(None, get_markup_type('readme.txt'))
eq_(None, get_markup_type('readme.rst.'))
def test_get_change_type():
eq_('new', get_change_type('add new feature'))
eq_('new', get_change_type('new feature was added'))
eq_('fix', get_change_type('fix 100 bags'))
eq_('fix', get_change_type('100 bags were fixed'))
eq_('fix', get_change_type('change some bugfix'))
eq_('fix', get_change_type('some fixes'))
eq_('fix', get_change_type('[Fix] Resolved'))
eq_('new', get_change_type('change something'))
eq_('sec', get_change_type('This issue solves CVE-2014-3556 report'))
eq_('dep', get_change_type('pip install --build and pip install --no-clean are now deprecated'))
eq_('inc', get_change_type('BACKWARD INCOMPATIBLE Removed the bundle support which was deprecated in 1.4.'))
eq_('fix', get_change_type('bug fix: HANDLER-{BIND,CASE} no longer drop into ldb when a clause'))
eq_('fix', get_change_type('BUG/MINOR: http: fix typos in previous patch'))
|
nilq/baby-python
|
python
|
# coding=utf-8
__author__ = 'cheng.hu'
import logging
# 第一步,创建一个logger
logger = logging.getLogger()
logger.setLevel(logging.INFO) # Log等级总开关
# 第二步,创建一个handler,用于写入日志文件
logfile = '/Users/CalvinHu/Documents/python/hurnado/src/test/log.txt'
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.INFO) # 输出到file的log等级的开关
# 第三步,再创建一个handler,用于输出到控制台
# ch = logging.StreamHandler()
# ch.setLevel(logging.WARNING) # 输出到console的log等级的开关
# 第四步,定义handler的输出格式
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# 第五步,将logger添加到handler里面
logger.addHandler(fh)
# logger.addHandler(ch)
|
nilq/baby-python
|
python
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.shortcuts import render
# Create your views here.
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.views.decorators.http import require_http_methods
from django.views.generic import ListView, DeleteView
from sonsuz.news.models import News
from sonsuz.utils.utils import ajax_required, AuthorRequiredMixin
class NewsListView(ListView):
# model = News
paginate_by = 10
template_name = 'news/news_list.html'
context_object_name = 'news_list'
def get_queryset(self, *kwargs):
return News.objects.filter(reply=False).select_related('user').prefetch_related('likers')
class NewsManageView(ListView):
# model = News
paginate_by = 10
template_name = 'news/news_manages.html'
context_object_name = 'news_manages_list'
def get_queryset(self, *kwargs):
return News.objects.filter(reply=False).select_related('user').prefetch_related('likers')
@login_required
@ajax_required
@require_http_methods(["POST"])
def post_news(request):
"""发送动态,AJAX POST请求"""
newsContent = request.POST['news_content'].strip()
newsTitle = request.POST['news_title'].strip()
if newsContent:
news = News.objects.create(user=request.user, content=newsContent, title=newsTitle)
html = render_to_string('news/news_single.html', {'news': news, 'request': request})
return HttpResponse(html)
else:
return HttpResponseBadRequest("内容不能为空!")
# class NewsDeleteView(LoginRequiredMixin, AuthorRequiredMixin, DeleteView)
class NewsDeleteView(LoginRequiredMixin, DeleteView):
# class NewsDeleteView(DeleteView):
"""删除一条新闻记录"""
model = News
template_name = 'news/news_confirm_delete.html'
success_url = reverse_lazy('news:news_manage') # 在项目的URLConf未加载前使用
@login_required
@ajax_required
@require_http_methods(["POST"])
def like(request):
"""点赞,响应AJAX POST请求"""
news_id = request.POST['newsId']
news = News.objects.get(pk=news_id)
# 取消或者添加赞
news.switch_like(request.user)
# 返回赞的数量
return JsonResponse({"likers_count": news.likers_count()})
# @login_required
@ajax_required
@require_http_methods(["POST"])
def contents(request):
news_id = request.POST['newsId']
news = News.objects.get(pk=news_id)
like_flag = "outline"
if request.user in news.get_likers():
like_flag = "inline"
comment_flag = "outline"
if news.replies_count() != 0:
comment_flag = "inline"
return JsonResponse({"news_conent": news.get_content(),
"news_title": news.title,
"news_like_count": news.likers_count(),
"news_like_flag": like_flag,
"news_comment_flag": comment_flag,
"news_cocmment_count": news.replies_count()
})
@login_required
@ajax_required
@require_http_methods(["POST"])
def post_reply(request):
"""发送回复,AJAX POST请求"""
# replyContent = request.POST['reply-content'].strip()
replyContent = request.POST['replyContent'].strip()
parentId = request.POST['newsId']
parent = News.objects.get(pk=parentId)
if replyContent:
parent.reply_this(request.user, replyContent)
return JsonResponse({'newsid': parent.pk,'replies_count': parent.replies_count()})
else:
return HttpResponseBadRequest("内容不能为空!")
#
#
@ajax_required
@require_http_methods(["GET"])
def get_replies(request):
"""返回新闻的评论,AJAX GET请求"""
news_id = request.GET['newsId']
news = News.objects.get(pk=news_id)
# render_to_string()表示加载模板,填充数据,返回字符串
replies_html = render_to_string("news/reply_list.html", {"replies": news.get_children()}) # 有评论的时候
return JsonResponse({
"newsid": news_id,
"replies_html": replies_html,
})
@login_required
def update_interactions(request):
"""更新互动信息"""
data_point = request.GET['id_value']
news = News.objects.get(pk=data_point)
return JsonResponse({'likes': news.likers_count(), 'replies': news.replies_count()})
|
nilq/baby-python
|
python
|
# Copyright 2017 University of Maryland.
#
# This file is part of Sesame. It is subject to the license terms in the file
# LICENSE.rst found in the top-level directory of this distribution.
import numpy as np
from .observables import *
from .defects import defectsF
def getF(sys, v, efn, efp, veq):
###########################################################################
# organization of the right hand side vector #
###########################################################################
# A site with coordinates (i,j,k) corresponds to a site number s as follows:
# k = s//(Nx*Ny)
# j = s - s//Nx
# i = s - j*Nx - k*Nx*Ny
#
# Rows for (efn_s, efp_s, v_s)
# ----------------------------
# fn_row = 3*s
# fp_row = 3*s+1
# fv_row = 3*s+2
Nx, Ny, Nz = sys.xpts.shape[0], sys.ypts.shape[0], sys.zpts.shape[0]
# right hand side vector
global vec
vec = np.zeros((3*Nx*Ny*Nz,))
def update(fn, fp, fv, sites):
global vec
vec[3*sites] = fn
vec[3*sites+1] = fp
vec[3*sites+2] = fv
###########################################################################
# For all sites in the system #
###########################################################################
# carrier densities
n = sys.Nc * np.exp(+sys.bl + efn + v)
p = sys.Nv * np.exp(-sys.Eg - sys.bl - efp - v)
# equilibrium carrier densities
n_eq = sys.Nc * np.exp(+sys.bl + veq)
p_eq = sys.Nv * np.exp(-sys.Eg - sys.bl - veq)
# bulk charges
rho = sys.rho - n + p
# recombination rates
r = get_bulk_rr(sys, n, p)
# charge defects
if len(sys.defects_list) != 0:
defectsF(sys, sys.defects_list, n, p, rho, r)
# charge devided by epsilon
rho = rho / sys.epsilon
# reshape the array as array[y-indices, x-indices]
_sites = np.arange(Nx*Ny*Nz, dtype=int).reshape(Nz, Ny, Nx)
def currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites):
jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN = 0, 0, 0, 0, 0, 0
jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = 0, 0, 0, 0, 0, 0
if dx.all() != 0:
jnx_s = get_jn(sys, efn, v, sites, sites + 1, dx)
jpx_s = get_jp(sys, efp, v, sites, sites + 1, dx)
if dxm1.all() != 0:
jnx_sm1 = get_jn(sys, efn, v, sites - 1, sites, dxm1)
jpx_sm1 = get_jp(sys, efp, v, sites - 1, sites, dxm1)
if dy.all() != 0:
jny_s = get_jn(sys, efn, v, sites, sites + Nx, dy)
jpy_s = get_jp(sys, efp, v, sites, sites + Nx, dy)
if dym1.all() != 0:
jny_smN = get_jn(sys, efn, v, sites - Nx, sites, dym1)
jpy_smN = get_jp(sys, efp, v, sites - Nx, sites, dym1)
if dz.all() != 0:
jnz_s = get_jn(sys, efn, v, sites, sites + Nx*Ny, dz)
jpz_s = get_jp(sys, efp, v, sites, sites + Nx*Ny, dz)
if dzm1.all() != 0:
jnz_smNN = get_jn(sys, efn, v, sites - Nx*Ny, sites, dzm1)
jpz_smNN = get_jp(sys, efp, v, sites - Nx*Ny, sites, dzm1)
return jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\
jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN
def ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites):
# Drift diffusion Poisson equations that determine fn, fp, fv
# lattice distances
dxbar = (dx + dxm1) / 2.
dybar = (dy + dym1) / 2.
dzbar = (dz + dzm1) / 2.
# compute currents
jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\
jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = \
currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
# drift diffusion
u = sys.g[sites] - r[sites]
fn = (jnx_s - jnx_sm1) / dxbar + (jny_s - jny_smN) / dybar \
+ (jnz_s - jnz_smNN) / dzbar + u
fp = (jpx_s - jpx_sm1) / dxbar + (jpy_s - jpy_smN) / dybar \
+ (jpz_s - jpz_smNN) / dzbar - u
# Poisson
dv_sm1, dv_sp1, dv_smN, dv_spN, dv_smNN, dv_spNN = 0, 0, 0, 0, 0, 0
v_s = v[sites]
if dx.all() != 0:
dv_sp1 = (v[sites+1] - v_s) / dx
if dxm1.all() != 0:
dv_sm1 = (v_s - v[sites-1]) / dxm1
if dy.all() != 0:
dv_spN = (v[sites+Nx] - v_s) / dy
if dym1.all() != 0:
dv_smN = (v_s - v[sites-Nx]) / dym1
if dz.all() != 0:
dv_spNN = (v[sites+Nx*Ny] - v_s) / dz
if dzm1.all() != 0:
dv_smNN = (v_s - v[sites-Nx*Ny]) / dzm1
fv = (dv_sm1 - dv_sp1) / dxbar + (dv_smN - dv_spN) / dybar\
+ (dv_smNN - dv_spNN) / dzbar - rho[sites]
# update vector
update(fn, fp, fv, sites)
def right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites):
# Boundary conditions on the right contact
# lattice distances and sites
dx = np.array([0])
dxm1 = sys.dx[-1]
dxbar = (dx + dxm1) / 2.
dybar = (dy + dym1) / 2.
dzbar = (dz + dzm1) / 2.
# compute currents
_, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\
_, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = \
currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
# compute jx_s with continuity equation
jnx_s = jnx_sm1 + dxbar * (r[sites] - sys.g[sites] - (jny_s - jny_smN)/dybar\
- (jnz_s - jnz_smNN)/dzbar)
jpx_s = jpx_sm1 + dxbar * (sys.g[sites] - r[sites] - (jpy_s - jpy_smN)/dybar\
- (jpz_s - jpz_smNN)/dzbar)
# b_n, b_p and b_v values
bn = jnx_s + sys.Scn[1] * (n[sites] - n_eq[sites])
bp = jpx_s - sys.Scp[1] * (p[sites] - p_eq[sites])
bv = 0 # Dirichlet BC
# update right hand side vector
update(bn, bp, bv, sites)
###########################################################################
# inside the system: 0 < i < Nx-1, 0 < j < Ny-1, 0 < k < Nz-1 #
###########################################################################
# We compute fn, fp, fv on the inner part of the system.
# list of the sites inside the system
sites = _sites[1:Nz-1, 1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], (Ny-2)*(Nz-2))
dy = np.repeat(sys.dy[1:], (Nx-2)*(Nz-2))
dz = np.repeat(sys.dz[1:], (Nx-2)*(Ny-2))
dxm1 = np.tile(sys.dx[:-1], (Ny-2)*(Nz-2))
dym1 = np.repeat(sys.dy[:-1], (Nx-2)*(Nz-2))
dzm1 = np.repeat(sys.dz[:-1], (Nx-2)*(Ny-2))
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# left boundary: i = 0, 0 <= j <= Ny-1, 0 <= k <= Nz-1 #
###########################################################################
# list of the sites on the left side
sites = _sites[:, :, 0].flatten()
# compute the currents
jnx = get_jn(sys, efn, v, sites, sites + 1, sys.dx[0])
jpx = get_jp(sys, efp, v, sites, sites + 1, sys.dx[0])
# compute an, ap, av
an = jnx - sys.Scn[0] * (n[sites] - n_eq[sites])
ap = jpx + sys.Scp[0] * (p[sites] - p_eq[sites])
av = 0 # to ensure Dirichlet BCs
update(an, ap, av, sites)
###########################################################################
# right boundaries #
###########################################################################
###########################################################################
# right boundary: i = Nx-1, 0 < j < Ny-1, 0 < k < Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[1:Nz-1, 1:Ny-1, Nx-1].flatten()
# lattice distances
dy = np.repeat(sys.dy[1:], Nz-2)
dym1 = np.repeat(sys.dy[:-1], Nz-2)
dz = np.repeat(sys.dz[1:], Ny-2)
dzm1 = np.repeat(sys.dz[:-1], Ny-2)
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = Ny-1, 0 < k < Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[1:Nz-1, Ny-1, Nx-1].flatten()
# lattice distances
dy = np.array([0])
dym1 = np.repeat(sys.dy[-1], Nz-2)
dz = sys.dz[1:]
dzm1 = sys.dz[:-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = 0, 0 < k < Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[1:Nz-1, 0, Nx-1].flatten()
# lattice distances
dy = np.repeat(sys.dy[-1], Nz-2)
dym1 = np.array([0])
dz = sys.dz[1:]
dzm1 = sys.dz[:-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, 0 < j < Ny-1, k = Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[Nz-1, 1:Ny-1, Nx-1].flatten()
# lattice distances
dy = sys.dy[1:]
dym1 = sys.dy[:-1]
dz = np.array([0])
dzm1 = np.repeat(sys.dz[-1], Ny-2)
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, 0 < j < Ny-1, k = 0 #
###########################################################################
# list of the sites on the right side
sites = _sites[0, 1:Ny-1, Nx-1].flatten()
# lattice distances
dy = sys.dy[1:]
dym1 = sys.dy[:-1]
dz = np.repeat(sys.dz[0], Ny-2)
dzm1 = np.array([0])
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = Ny-1, k = 0 #
###########################################################################
# list of the sites on the right side
sites = _sites[0, Ny-1, Nx-1].flatten()
# lattice distances
dy = np.array([0])
dym1 = sys.dy[-1]
dz = sys.dz[0]
dzm1 = np.array([0])
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = Ny-1, k = Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[Nz-1, Ny-1, Nx-1].flatten()
# lattice distances
dy = np.array([0])
dym1 = sys.dy[-1]
dz = np.array([0])
dzm1 = sys.dz[-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = 0, k = Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[Nz-1, 0, Nx-1].flatten()
# lattice distances
dy = sys.dy[0]
dym1 = np.array([0])
dz = np.array([0])
dzm1 = sys.dz[-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = 0, k = 0 #
###########################################################################
# list of the sites on the right side
sites = _sites[0, 0, Nx-1].flatten()
# lattice distances
dy = sys.dy[0]
dym1 = np.array([0])
dz = sys.dz[0]
dzm1 = np.array([0])
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# faces between contacts: 0 < i < Nx-1, j or k fixed #
###########################################################################
# Here we focus on the faces between the contacts.
###########################################################################
# z-face top: 0 < i < Nx-1, 0 < j < Ny-1, k = Nz-1 #
###########################################################################
# list of the sites
sites = _sites[Nz-1, 1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Ny-2)
dy = np.repeat(sys.dy[1:], Nx-2)
dz = np.array([0])
dxm1 = np.tile(sys.dx[:-1], Ny-2)
dym1 = np.repeat(sys.dy[:-1], Nx-2)
dzm1 = np.repeat(sys.dz[-1], (Nx-2)*(Ny-2))
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# z- face bottom: 0 < i < Nx-1, 0 < j < Ny-1, k = 0 #
###########################################################################
# list of the sites
sites = _sites[0, 1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Ny-2)
dy = np.repeat(sys.dy[1:], Nx-2)
dz = np.repeat(sys.dz[0], (Nx-2)*(Ny-2))
dxm1 = np.tile(sys.dx[:-1], Ny-2)
dym1 = np.repeat(sys.dy[:-1], Nx-2)
dzm1 = np.array([0])
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# y-face front: 0 < i < Nx-1, j = 0, 0 < k < Nz-1 #
###########################################################################
# list of the sites
sites = _sites[1:Nz-1, 0, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Nz-2)
dy = np.repeat(sys.dy[0], (Nx-2)*(Nz-2))
dz = np.repeat(sys.dz[1:], (Nx-2))
dxm1 = np.tile(sys.dx[:-1], Nz-2)
dym1 = np.array([0])
dzm1 = np.repeat(sys.dz[:-1], Nx-2)
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# y-face back: 0 < i < Nx-1, j = Ny-1, 0 < k < Nz-1 #
###########################################################################
# list of the sites
sites = _sites[1:Nz-1, Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Nz-2)
dy = np.array([0])
dz = np.repeat(sys.dz[1:], Nx-2)
dxm1 = np.tile(sys.dx[:-1], Nz-2)
dym1 = np.repeat(sys.dy[0], (Nx-2)*(Nz-2))
dzm1 = np.repeat(sys.dz[:-1], Nx-2)
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# edges between contacts: 0 < i < Nx-1, j and k fixed #
###########################################################################
# Here we focus on the edges between the contacts.
# lattice distances
dx = sys.dx[1:]
dxm1 = sys.dx[:-1]
###########################################################################
# edge z top // y back: 0 < i < Nx-1, j = Ny-1, k = Nz-1 #
###########################################################################
# list of the sites
sites = _sites[Nz-1, Ny-1, 1:Nx-1].flatten()
# lattice distances
dy = np.array([0])
dz = np.array([0])
dym1 = np.repeat(sys.dy[-1], Nx-2)
dzm1 = np.repeat(sys.dz[-1], Nx-2)
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# edge z top // y front: 0 < i < Nx-1, j = 0, k = Nz-1 #
###########################################################################
# list of the sites
sites = _sites[Nz-1, 0, 1:Nx-1].flatten()
# lattice distances
dy = np.repeat(sys.dy[0], Nx-2)
dz = np.array([0])
dym1 = np.array([0])
dzm1 = np.repeat(sys.dz[-1], Nx-2)
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# edge z bottom // y back: 0 < i < Nx-1, j = Ny-1, k = 0 #
###########################################################################
# list of the sites
sites = _sites[0, Ny-1, 1:Nx-1].flatten()
# lattice distances
dy = np.array([0])
dz = np.repeat(sys.dz[0], Nx-2)
dym1 = np.repeat(sys.dy[-1], Nx-2)
dzm1 = np.array([0])
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# edge z bottom // y front: 0 < i < Nx-1, j = 0, k = 0 #
###########################################################################
# list of the sites
sites = _sites[0, 0, 1:Nx-1].flatten()
# lattice distances
dy = np.repeat(sys.dy[0], Nx-2)
dz = np.repeat(sys.dz[0], Nx-2)
dym1 = np.array([0])
dzm1 = np.array([0])
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
return vec
|
nilq/baby-python
|
python
|
import tornado.web
import mallory
class HeartbeatHandler(tornado.web.RequestHandler):
def initialize(self, circuit_breaker):
self.circuit_breaker = circuit_breaker
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
if self.circuit_breaker.is_tripped():
self.set_status(503)
status_message = "Circuit Breaker Tripped"
else:
self.set_status(200)
status_message = "OK"
self.write("Mallory " + mallory.Version + "\n")
self.write(status_message)
self.finish()
|
nilq/baby-python
|
python
|
from petroleum.conditional_task import ConditionalTask
from petroleum.exceptions import PetroleumException
from petroleum.task import Task
class ExclusiveChoice(Task):
def __init__(self, name=None, *args, **kwargs):
self._conditional_tasks = []
super().__init__(name=None, *args, **kwargs)
def get_next_task(self, task_status):
for conditional_task in self._conditional_tasks:
result = conditional_task.condition(task_status)
if not isinstance(result, bool):
raise PetroleumException(
"Condition %s did not return bool"
% conditional_task.condition
)
if result is True:
return conditional_task.task
return getattr(self, "_next_task", None)
def connect_if(self, task, condition):
conditional_task = ConditionalTask(task=task, condition=condition)
self._conditional_tasks.append(conditional_task)
|
nilq/baby-python
|
python
|
"""
AmberTools utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from collections import OrderedDict
from cStringIO import StringIO
import numpy as np
import os
import shutil
import subprocess
import tempfile
from rdkit import Chem
from vs_utils.utils.pdb_utils import PdbReader
class Antechamber(object):
"""
Wrapper methods for Antechamber functionality.
Calculations are carried out in a temporary directory because
Antechamber writes out several files to disk.
Parameters
----------
charge_type : str, optional (default 'bcc')
Antechamber charge type string. Defaults to AM1-BCC charges.
"""
def __init__(self, charge_type='bcc'):
self.charge_type = charge_type
# temporary directory
self.temp_dir = tempfile.mkdtemp()
def __del__(self):
"""
Cleanup.
"""
shutil.rmtree(self.temp_dir)
def get_charges_and_radii(self, mol):
"""
Use Antechamber to calculate partial charges and atomic radii.
Antechamber requires file inputs and output, so the molecule is
written to SDF and Antechamber writes out a modified PDB (mpdb)
containing charge and radius information.
Note that Antechamber only processes the first molecule or
conformer in the input file.
Parameters
----------
mol : RDMol
Molecule.
"""
net_charge = self.get_net_charge(mol)
# write molecule to temporary file
_, input_filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
writer = Chem.SDWriter(input_filename)
writer.write(mol)
writer.close()
# calculate charges and radii with Antechamber
output_fd, output_filename = tempfile.mkstemp(suffix='.mpdb',
dir=self.temp_dir)
os.close(output_fd) # close temp file
args = ['antechamber', '-i', input_filename, '-fi', 'sdf', '-o',
output_filename, '-fo', 'mpdb', '-c', self.charge_type, '-nc',
str(net_charge)] # all arguments must be strings
try:
subprocess.check_output(args, cwd=self.temp_dir)
except subprocess.CalledProcessError as e:
name = ''
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
print "Antechamber: molecule '{}' failed.".format(name)
with open(input_filename) as f:
print f.read()
raise e
# extract charges and radii
reader = ModifiedPdbReader()
with open(output_filename) as f:
charges, radii = reader.get_charges_and_radii(f)
return charges, radii
@staticmethod
def get_net_charge(mol):
"""
Calculate the net charge on a molecule.
Parameters
----------
mol : RDMol
Molecule.
"""
net_charge = 0
for atom in mol.GetAtoms():
net_charge += atom.GetFormalCharge()
return net_charge
class PBSA(object):
"""
Wrapper methods for PBSA functionality.
Calculations are carried out in a temporary directory because PBSA
writes out several files to disk.
Parameters
----------
size : float, optional (default 30.)
Length of each side of the grid, in Angstroms. Used to calculate
PBSA parameters xmin, xmax, etc.
resolution : float, optional (default 0.5)
Space between grid points, in Angstroms. Corresponds to PBSA space
parameter.
nb_cutoff : float, optional (default 5.)
Cutoff distance for van der Waals interactions. Corresponds to PBSA
cutnb parameter.
ionic_strength : float, optional (default 150.)
Ionic strength of the solvent, in mM. Corresponds to PBSA istrng
parameter.
"""
def __init__(self, size=30., resolution=0.5, nb_cutoff=5.,
ionic_strength=150.):
self.size = float(size)
self.resolution = float(resolution)
self.nb_cutoff = float(nb_cutoff)
self.ionic_strength = float(ionic_strength)
# temporary directory
self.temp_dir = tempfile.mkdtemp()
def __del__(self):
"""
Cleanup.
"""
shutil.rmtree(self.temp_dir)
def get_esp_grid(self, mol, charges, radii, conf_id=None):
"""
Use PBSA to calculate an electrostatic potential grid for a
molecule conformer.
Parameters
----------
mol : RDKit Mol
Molecule.
charges : array_like
Atomic partial charges.
radii : array_like
Atomic radii.
conf_id : int, optional
Conformer ID.
"""
# generate a PQR file for this conformer
pqr = self.mol_to_pqr(mol, charges, radii, conf_id=conf_id)
# get ESP grid
grid = self.get_esp_grid_from_pqr(pqr)
return grid
@staticmethod
def mol_to_pqr(mol, charges, radii, conf_id=None):
"""
Generate a PQR block for a molecule conformer.
Parameters
----------
mol : RDKit Mol
Molecule.
charges : array_like
Atomic partial charges.
radii : array_like
Atomic radii.
conf_id : int, optional
Conformer ID.
"""
if conf_id is None:
conf_id = -1
pdb = Chem.MolToPDBBlock(mol, confId=conf_id)
reader = PdbReader()
pqr = reader.pdb_to_pqr(StringIO(pdb), charges, radii)
return pqr
def get_esp_grid_from_pqr(self, pqr):
"""
Use PBSA to calculate an electrostatic potential grid for a
molecule (one conformer only) in PQR format.
The grid is written is ASCII format to pbsa.phi.
Parameters
----------
pqr : file_like
Input PQR file.
"""
# write PQR to disk
pqr_fd, pqr_filename = tempfile.mkstemp(suffix='.pqr',
dir=self.temp_dir)
os.close(pqr_fd) # close temp file
with open(pqr_filename, 'wb') as f:
f.write(pqr)
# write PBSA parameter file
param_fd, param_filename = tempfile.mkstemp(suffix='.in',
dir=self.temp_dir)
os.close(param_fd) # close temp file
with open(param_filename, 'wb') as f:
f.write(self.get_pbsa_parameter_file())
# run PBSA
output_fd, output_filename = tempfile.mkstemp(suffix='.out',
dir=self.temp_dir)
os.close(output_fd) # close temp file
os.remove(output_filename) # PBSA won't overwrite existing file
args = ['pbsa', '-i', param_filename, '-o', output_filename, '-pqr',
pqr_filename]
try:
subprocess.check_output(args, cwd=self.temp_dir)
except subprocess.CalledProcessError as e:
with open(output_filename) as f:
print f.read()
raise e
# extract ESP grid
with open(os.path.join(self.temp_dir, 'pbsa.phi')) as f:
grid, center = self.parse_esp_grid(f)
return grid, center
def get_pbsa_parameter_file(self):
"""
Construct a PBSA parameter file.
"""
params = """
Calculate ESP for a small molecule
&cntrl
inp=0, ! required for PQR input
/
&pb
npbverb=1, ! be verbose
phiout=1, phiform=1, ! write grid to Amber ASCII file
istrng={istrng}, ! ionic strength
space={space}, ! grid spacing
xmin={xmin}, xmax={xmax},
ymin={ymin}, ymax={ymax},
zmin={zmin}, zmax={zmax},
eneopt=1, cutnb={cutnb},
/
"""
delta = self.size / 2.
params = params.format(
space=self.resolution,
istrng=self.ionic_strength,
xmin=-delta, xmax=delta,
ymin=-delta, ymax=delta,
zmin=-delta, zmax=delta,
cutnb=self.nb_cutoff)
return params
def parse_esp_grid(self, grid):
"""
Parse PBSA ASCII electrostatic potential grid.
Variables used in the ASCII format:
* h : grid spacing
* (gox, goy, goz) : grid origin
* (xm, ym, zm) : grid dimensions
* phi : electrostatic potential in kcal/mol-e
The mapping between one-based grid points (i, j, k) and phi indices
is p_i = i + xm * (j - 1 + ym * (k - 1)). However, since phi is a
flattened version of the grid (with Fortran ordering), we can use
np.reshape to get the 3D grid.
Spatial coordinates (x, y, z) in the grid are given by
(gox + h * i, goy + h * j, goz + h * k).
The grid center is therefore
(gox + h * (xm + 1) / 2,
goy + h * (ym + 1) / 2,
goz + h * (zm + 1) / 2).
Parameters
----------
grid : file_like
Amber ASCII format file.
"""
h = gox = goy = goz = None
xm = ym = zm = None
phi = None
for line in grid:
line = line.strip()
if line.startswith('#'):
continue
if h is None:
h, gox, goy, goz = np.asarray(line.split(), dtype=float)
elif xm is None:
xm, ym, zm = np.asarray(line.split(), dtype=int)
else:
phi = np.asarray(line.split(), dtype=float)
dim = (xm, ym, zm)
grid = np.reshape(phi, dim, order='F')
origin = (gox, goy, goz)
center = tuple(o + h * (m + 1) / 2. for o, m in zip(origin, dim))
# sanity checks
assert h == self.resolution
return grid, center
class ModifiedPdbReader(PdbReader):
"""
Handle Amber modified PDB files and generate Amber-style PQR files.
"""
def _parse_atom_record(self, line):
"""
Parse optional fields in ATOM and HETATM records.
Amber modified PDB files contain charge, radius and atom type
information in the fields following the x, y, z coordinates for
atoms.
Parameters
----------
line : str
Amber modified PDB ATOM or HETATM line.
"""
fields = OrderedDict()
charge, radius, amber_type = line[54:].strip().split()
fields['charge'] = charge
fields['radius'] = radius
fields['amber_type'] = amber_type
return fields
def get_charges_and_radii(self, mpdb):
"""
Extract atomic charges and radii from an Antechamber modified PDB
file.
Parameters
----------
mpdb : file_like
Antechamber modified PDB file.
"""
charges = []
radii = []
for line in mpdb:
if line.startswith('ATOM') or line.startswith('HETATM'):
fields = self.parse_atom_record(line)
charges.append(fields['charge'])
radii.append(fields['radius'])
charges = np.asarray(charges, dtype=float)
radii = np.asarray(radii, dtype=float)
return charges, radii
|
nilq/baby-python
|
python
|
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from scipy.interpolate import griddata
import copy
def visualize_source(
points,
values,
ax=None,
enlarge_factor=1.1,
npixels=100,
cmap='jet',
):
"""
Points is defined as autolens [(y1,x1), (y2,x2), ...] order
"""
points = np.asarray(points)
points = points[:, ::-1] #change to numpy/scipy api format -- [(x1,y2), (x2,y2),...] order
half_width = max(np.abs(points.min()), np.abs(points.max()))
half_width *= enlarge_factor
extent = [-1.0*half_width, half_width, -1.0*half_width, half_width]
coordinate_1d, dpix = np.linspace(-1.0*half_width, half_width, npixels, endpoint=True, retstep=True)
xgrid, ygrid = np.meshgrid(coordinate_1d, coordinate_1d)
extent = [-1.0*half_width-0.5*dpix, half_width+0.5*dpix, -1.0*half_width-0.5*dpix, half_width+0.5*dpix]
source_image = griddata(points, values, (xgrid, ygrid), method='linear', fill_value=0.0)
im = ax.imshow(source_image, origin='lower', extent=extent, cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
def visualize_unmasked_1d_image(
unmasked_1d_image,
mask,
dpix,
ax=None,
cmap='jet',
origin='upper',
):
"""
mask: the 2d data mask
"""
cmap = copy.copy(plt.get_cmap(cmap))
cmap.set_bad(color='white')
unmasked_2d_image = np.zeros_like(mask, dtype='float')
unmasked_2d_image[~mask] = unmasked_1d_image
half_width = len(mask)*0.5*dpix
extent = [-1.0*half_width, half_width, -1.0*half_width, half_width]
unmasked_2d_image = np.ma.masked_array(unmasked_2d_image, mask=mask)
im = ax.imshow(unmasked_2d_image, origin=origin, extent=extent, cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
coordinate_1d = np.arange(len(mask)) * dpix
coordinate_1d = coordinate_1d - np.mean(coordinate_1d)
xgrid, ygrid = np.meshgrid(coordinate_1d, coordinate_1d)
rgrid = np.sqrt(xgrid**2 + ygrid**2)
limit = np.max(rgrid[~mask])
ax.set_xlim(-1.0*limit, limit)
ax.set_ylim(-1.0*limit, limit)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'MacSeNet'
import torch
import torch.nn as nn
from torch.autograd import Variable
class SkipFiltering(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks of the skip filtering connections.
Reference: - https://arxiv.org/abs/1709.00611
- https://arxiv.org/abs/1711.01437
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(SkipFiltering, self).__init__()
print('Constructing Skip-filtering model')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Encoder
self.ih_matrix = nn.Linear(self._N, self._ldim)
# Decoder
self.ho_matrix = nn.Linear(self._ldim, self._N)
# Initialize the weights
self.initialize_skip_filt()
def initialize_skip_filt(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
# Biases
self.ih_matrix.bias.data.zero_()
self.ho_matrix.bias.data.zero_()
print('Initialization of the skip-filtering connection(s) model done...')
return None
def forward(self, input_x, mask_return=False):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
# Encoder
hl_rep = self.activation_function(self.ih_matrix(x))
# Decoder
mask = self.activation_function(self.ho_matrix(hl_rep))
# Skip-Filtering connection(s)
y_out = torch.mul(x, mask)
if mask_return:
return y_out, x, mask
else:
return y_out, x
# EOF
|
nilq/baby-python
|
python
|
# __init__.py
import logging
import os
from task_manager.views import (
HomeView,
ErrorView,
InfoView,
LoginView,
LogoutView,
ProfileView,
RegistrationView,
TaskListView,
TaskView
)
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import options, define
from tornado_sqlalchemy import SQLAlchemy
from tornado.web import Application
SQLALCHEMY_URL = os.environ.get('DATABASE_URL', '')
define('port', default=8888, help='port to listen on')
def main():
"""Construct and serve the tornado application."""
api_root = '/api/v1'
app = Application(handlers=[
(r'/', HomeView),
(r'/favicon.ico', HomeView),
(r'/error_500', ErrorView),
(api_root, InfoView),
(api_root + r'/login', LoginView),
(api_root + r'/accounts', RegistrationView),
(api_root + r'/accounts/([\w]+)', ProfileView),
(api_root + r'/accounts/([\w]+)/tasks', TaskListView),
(api_root + r'/accounts/([\w]+)/tasks/([\d]+)', TaskView),
(api_root + r'/accounts/([\w]+)/logout', LogoutView),
],
db=SQLAlchemy(os.environ.get('DATABASE_URL', 'postgres://postgres:postgres@localhost:5432/task_manager')),
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
**options.group_dict('application'),
login_url="/api/v1/login",
xsrf_cookies=True,
debug=True,
static_path=os.path.join(os.path.dirname(__file__), "static"),
template_path=os.path.join(os.path.dirname(__file__), "templates")
)
http_server = HTTPServer(app)
http_server.listen(options.port)
print('Listening on http://localhost:%d' % options.port)
logging.info('Listening on http://localhost:%d' % options.port)
IOLoop.current().start()
|
nilq/baby-python
|
python
|
from dnaweaver import (
CommercialDnaOffer,
DnaAssemblyStation,
GibsonAssemblyMethod,
OligoAssemblyMethod,
TmSegmentSelector,
FixedSizeSegmentSelector,
PerBasepairPricing,
SequenceLengthConstraint,
)
# OLIGO COMPANY
oligo_com = CommercialDnaOffer(
name="Oligo vendor",
sequence_constraints=[SequenceLengthConstraint(max_length=200)],
pricing=PerBasepairPricing(0.10),
lead_time=7,
)
oligo_assembly_station = DnaAssemblyStation(
name="Oligo Assembly Station",
assembly_method=OligoAssemblyMethod(
overhang_selector=TmSegmentSelector(
min_size=15, max_size=25, min_tm=50, max_tm=70
),
min_segment_length=40,
max_segment_length=200,
sequence_constraints=[SequenceLengthConstraint(max_length=1500)],
duration=8,
cost=2,
),
supplier=oligo_com,
coarse_grain=20,
fine_grain=False,
a_star_factor="auto",
)
gibson_blocks_assembly_station = DnaAssemblyStation(
name="Gibson Blocks Assembly",
assembly_method=GibsonAssemblyMethod(
overhang_selector=FixedSizeSegmentSelector(80),
min_segment_length=1000,
max_segment_length=4000,
duration=8,
cost=16,
),
supplier=oligo_assembly_station,
coarse_grain=300,
fine_grain=False,
memoize=True,
a_star_factor="auto",
)
chunks_assembly_station = DnaAssemblyStation(
name="Chunks assembly (Yeast)",
assembly_method=GibsonAssemblyMethod(
overhang_selector=FixedSizeSegmentSelector(300),
min_segment_length=7000,
max_segment_length=15000,
duration=8,
),
supplier=gibson_blocks_assembly_station,
coarse_grain=1000,
fine_grain=None,
logger="bar",
a_star_factor="auto",
memoize=True,
)
with open("50kb_sequence.txt", "r") as f:
sequence = f.read()
print("Generating an assembly plan...")
chunks_assembly_station.prepare_network_on_sequence(sequence)
quote = chunks_assembly_station.get_quote(sequence, with_assembly_plan=True)
print(quote.assembly_step_summary())
print("Generating report...")
assembly_plan_report = quote.to_assembly_plan_report()
assembly_plan_report.write_full_report("report")
print("Done! (see 'report' folder)")
|
nilq/baby-python
|
python
|
import sys
import os
import json
# date and time
from datetime import datetime, timedelta
from email.utils import parsedate_tz
from dateutil import tz
import time
from api_extractor_config import DATETIME_FORMAT
def load_credentials(access):
credentials = {}
if access == 'AgProCanada_TableauDEV':
credentials = {
'MSSQL_HOST': os.environ['PYMSSQL_HOST'],
'MSSQL_DB': os.environ['PYMSSQL_DB'],
'MSSQL_USER': os.environ['PYMSSQL_USERNAME'],
'MSSQL_PASS': os.environ['PYMSSQL_PASS'],
'MSSQL_PORT': int(os.environ['PYMSSQL_PORT']),
'MSSQL_DRIVER': os.environ['PYMSSQL_DRIVER']
}
elif access == 'Youtube_API':
credentials = os.environ['YOUTUBE_API_CRED']
elif access == 'GA_API':
credentials = os.environ['GA_API_CRED']
elif access == 'Twitter_API':
credentials = {
"consumer_key": os.environ['TWITTER_CONSUMER_KEY'],
"consumer_secret": os.environ['TWITTER_CONSUMER_SECRET'],
"access_token_key": os.environ['TWITTER_ACCESS_TOKEN_KEY'],
"access_token_secret": os.environ['TWITTER_ACCESS_TOKEN_SECRET']
}
return credentials
def log(s):
timestamp = datetime.now().strftime(DATETIME_FORMAT)
print('> [%s]: %s' % (timestamp, s))
def remove_dups(l):
"""Remove duplcates from a list"""
return list(set(l))
def file_to_str(file_relative_path):
with open(file_relative_path, 'r') as file:
return file.read()
def str_to_datetime(datestring):
"""
String should be RFC822 compliant. Eg. 'Tue Mar 29 08:11:25 +0000 2011'
Used for twitter API dates
https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
"""
time_tuple = parsedate_tz(datestring.strip())
dt = datetime(*time_tuple[:6]) - timedelta(seconds=time_tuple[-1])
return dt
def utc_to_eastern(utc_dt):
"""
Convert a datetime obejct in UTC to one in Eastern Time Zone
The utc_dt can be 'naive' (meaning that it does not have tzinfo)
"""
eastern = tz.gettz('America/Eastern')
utc_dt = utc_dt.replace(tzinfo=tz.tzutc())
return utc_dt.astimezone(eastern)
def time_func(func, params):
"""
Time how long does it take to run a function.
"""
t0 = time.time()
return_val = func(*params)
t1 = time.time()
log("'%s' took %.3f seconds to run." % (func.__name__, t1 - t0))
return return_val
|
nilq/baby-python
|
python
|
import json
BATCH_SIZE = 128
RNN_SIZE = 128
EMBED_SIZE = 128
LEARNING_RATE = 0.001
KEEP_PROB = 0.75
EPOCHS = 500
DISPLAY_STEP = 30
MODEL_DIR = 'Saved_Model_Weights'
SAVE_PATH = 'model_saver'
MIN_LEARNING_RATE = 0.01
LEARNING_RATE_DECAY = 0.9
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import cProfile
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import swn
def stats():
grouperLabels = ['Random',
'Min Dist Stars',
'Max Dist Stars',
'1/4 Min Dist Stars',
'1/3 Min Dist Stars',
'1/2 Min Dist Stars',
'Link Most Isolated Group',
'Link Smallest Group',
'Link Largest Group']
# Queue for returning counts
q = mp.Queue()
# Create processes
pList = list()
for gType in xrange(9):
p = mp.Process(target=statsgen,args=(q,gType))
pList.append(p)
p.start()
# Join processes
countsList = list()
for gType in xrange(9):
print('Grouper Method ' + str(gType))
pList[gType].join()
countsList.append(q.get())
# Plot statistics
font = {'size' : 8}
plt.rc('font', **font)
plt.figure(figsize=(8,10))
for gType in xrange(9):
plt.subplot(3,3,countsList[gType][0]+1)
plt.title(str(countsList[gType][0]) + ' - ' + grouperLabels[countsList[gType][0]],fontsize=8)
plt.imshow(countsList[gType][1])
plt.savefig('groupingStats.png')
def statsgen(q,gType):
# Define statistics
counts = np.zeros([21,16])
numSectors = 1000
# Generate sectors
for i in xrange(numSectors):
# Create generator
gen = swn.generator.Generator()
# Generate sector
sec = gen.sector(gType)
# Calculate statistics
for s in sec.system_hex_list():
if (s[1] % 2 == 0):
counts[s[0]*2, s[1]*2] += 1.0
counts[s[0]*2, s[1]*2+1] += 1.0
counts[s[0]*2+1,s[1]*2] += 1.0
counts[s[0]*2+1,s[1]*2+1] += 1.0
else:
counts[s[0]*2+1,s[1]*2] += 1.0
counts[s[0]*2+1,s[1]*2+1] += 1.0
counts[s[0]*2+2,s[1]*2] += 1.0
counts[s[0]*2+2,s[1]*2+1] += 1.0
q.put((gType,counts))
def gen(gType=1):
# Create generator
gen = swn.generator.Generator()
# Set seed
gen.set_seed('Bipiw')
# Print seed
#print(gen.seed)
# Generate sector
sec = gen.sector(gType)
# Print sector map
#sec.print_sector_map()
# Print system orbit maps
sec.print_orbit_maps()
# Print sector info
#sec.print_sector_info()
# Print sector corporations
#sec.print_corporations()
# Print sector religions
#sec.print_religions()
# Create sector images
sec.update_images()
# Draw sector images
sec.draw_sector()
# Save sector images
sec.images.save_sector_map('test/testmap.png')
sec.images.save_sector_info('test/testinfo.png')
sec.images.save_sector_orbits('test/map.png')
if __name__ == '__main__':
gen()
#stats()
#runStats = cProfile.run('gen()', sort='cumtime')
|
nilq/baby-python
|
python
|
from .abstract_conjunction import AbstractConjunction
from .condition_type import ConditionType
class OrConjunction(AbstractConjunction):
def __init__(self, conditions):
super().__init__(type_=ConditionType.OR.value, conditions=conditions)
|
nilq/baby-python
|
python
|
import socket
from enum import IntEnum
import json
import argparse
# Enum of available commands
class Command(IntEnum):
Undefined = 1
SafeModeEnable = 2
SafeModeDisable = 3
ShowNumCommands = 4
ShowNumSafeModes = 5
ShowUpTime = 6
ResetCommandCounter = 7
Shutdown = 8
MAX_COMMAND_NUM = 9
# defaalt IP address to connect to
ADDRESS = '127.0.0.1'
# default port to connect to
PORT = 8080
# user prompt to request input
PROMPT = (
"\n"
"invalid: " + str(int(Command.Undefined)) + "\n"
"safe mode enable: " + str(int(Command.SafeModeEnable)) + "\n"
"safe mode disable: " + str(int(Command.SafeModeDisable)) + "\n"
"show number of commands received: " + str(int(Command.ShowNumCommands)) + "\n"
"show number of safe modes: " + str(int(Command.ShowNumSafeModes)) + "\n"
"show up time: " + str(int(Command.ShowUpTime)) + "\n"
"reset command counter: "+ str(int(Command.ResetCommandCounter)) + "\n"
"shutdown: " + str(int(Command.Shutdown)) + "\n"
"\n"
"So... what will it be, boss?\n"
"Type a number: "
)
# check if a string is an int
def IsInt(s):
try:
int(s)
return True
except ValueError:
return False
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-a", type=str, help="IP address to connect to")
parser.add_argument("-p", type=int, help="Port to connect to")
args = parser.parse_args()
if args.a:
ADDRESS = args.a
if args.p:
PORT = args.p
# connect to server and issue commands
print("Just wait a hot second, my dude.")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((ADDRESS, PORT))
print("Alright, alright, cool. Connection established. YOU'RE IN!")
while True:
command = input(PROMPT)
if (IsInt(command) and (int(command) >= 0) and (int(command) <= MAX_COMMAND_NUM)):
s.sendall(bytes(command, 'utf-8'))
data = s.recv(1024).decode("utf-8")
data = json.loads(data)
print("\nServer says:")
for key in data:
print(key, '->', data[key])
print("")
if (Command(int(command)) == Command.Shutdown):
break
else:
print("\nHmm, no. Did I say that was an option?")
print("This is Bravo Six, going dark.")
|
nilq/baby-python
|
python
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateSessionTargetResourceDetails(object):
"""
Details about a bastion session's target resource.
"""
#: A constant which can be used with the session_type property of a CreateSessionTargetResourceDetails.
#: This constant has a value of "MANAGED_SSH"
SESSION_TYPE_MANAGED_SSH = "MANAGED_SSH"
#: A constant which can be used with the session_type property of a CreateSessionTargetResourceDetails.
#: This constant has a value of "PORT_FORWARDING"
SESSION_TYPE_PORT_FORWARDING = "PORT_FORWARDING"
def __init__(self, **kwargs):
"""
Initializes a new CreateSessionTargetResourceDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.bastion.models.CreateManagedSshSessionTargetResourceDetails`
* :class:`~oci.bastion.models.CreatePortForwardingSessionTargetResourceDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param session_type:
The value to assign to the session_type property of this CreateSessionTargetResourceDetails.
Allowed values for this property are: "MANAGED_SSH", "PORT_FORWARDING"
:type session_type: str
:param target_resource_port:
The value to assign to the target_resource_port property of this CreateSessionTargetResourceDetails.
:type target_resource_port: int
"""
self.swagger_types = {
'session_type': 'str',
'target_resource_port': 'int'
}
self.attribute_map = {
'session_type': 'sessionType',
'target_resource_port': 'targetResourcePort'
}
self._session_type = None
self._target_resource_port = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['sessionType']
if type == 'MANAGED_SSH':
return 'CreateManagedSshSessionTargetResourceDetails'
if type == 'PORT_FORWARDING':
return 'CreatePortForwardingSessionTargetResourceDetails'
else:
return 'CreateSessionTargetResourceDetails'
@property
def session_type(self):
"""
**[Required]** Gets the session_type of this CreateSessionTargetResourceDetails.
The session type.
Allowed values for this property are: "MANAGED_SSH", "PORT_FORWARDING"
:return: The session_type of this CreateSessionTargetResourceDetails.
:rtype: str
"""
return self._session_type
@session_type.setter
def session_type(self, session_type):
"""
Sets the session_type of this CreateSessionTargetResourceDetails.
The session type.
:param session_type: The session_type of this CreateSessionTargetResourceDetails.
:type: str
"""
allowed_values = ["MANAGED_SSH", "PORT_FORWARDING"]
if not value_allowed_none_or_none_sentinel(session_type, allowed_values):
raise ValueError(
"Invalid value for `session_type`, must be None or one of {0}"
.format(allowed_values)
)
self._session_type = session_type
@property
def target_resource_port(self):
"""
Gets the target_resource_port of this CreateSessionTargetResourceDetails.
The port number to connect to on the target resource.
:return: The target_resource_port of this CreateSessionTargetResourceDetails.
:rtype: int
"""
return self._target_resource_port
@target_resource_port.setter
def target_resource_port(self, target_resource_port):
"""
Sets the target_resource_port of this CreateSessionTargetResourceDetails.
The port number to connect to on the target resource.
:param target_resource_port: The target_resource_port of this CreateSessionTargetResourceDetails.
:type: int
"""
self._target_resource_port = target_resource_port
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
nilq/baby-python
|
python
|
from django.test import TestCase
from dfirtrack_config.filter_forms import AssignmentFilterForm
class AssignmentFilterFormTestCase(TestCase):
"""assignment filter form tests"""
def test_case_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['case'].label, 'Filter for case')
def test_case_form_empty_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['case'].empty_label, 'Filter for case')
def test_tag_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['tag'].label, 'Filter for tag')
def test_tag_form_empty_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['tag'].empty_label, 'Filter for tag')
def test_user_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['user'].label, 'Filter for user')
def test_user_form_empty_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(form.fields['user'].empty_label, 'No user assigned')
def test_filter_assignment_view_keep_form_label(self):
"""test form label"""
# get object
form = AssignmentFilterForm()
# compare
self.assertEqual(
form.fields['filter_assignment_view_keep'].label,
'Remember filter settings (confirm by applying)',
)
def test_assignment_filter_form_empty(self):
"""test minimum form requirements / VALID"""
# get object
form = AssignmentFilterForm(data={})
# compare
self.assertTrue(form.is_valid())
|
nilq/baby-python
|
python
|
from guy import Guy,http
@http(r"/item/(\d+)")
def getItem(web,number):
web.write( "item %s"%number )
def test_hook_with_classic_fetch(runner):
class T(Guy):
__doc__="""Hello
<script>
async function testHook() {
var r=await window.fetch("/item/42")
return await r.text()
}
</script>
"""
async def init(self):
retour =await self.js.testHook()
self.exit(retour)
t=T()
retour=runner(t)
assert retour == "item 42"
def test_hook_with_guy_fetch(runner):
class T(Guy):
__doc__="""Hello
<script>
async function testHook() {
var r=await guy.fetch("/item/42") // not needed in that case (no cors trouble!)
return await r.text()
}
</script>
"""
async def init(self):
retour =await self.js.testHook()
self.exit(retour)
t=T()
retour=runner(t)
assert retour == "item 42"
|
nilq/baby-python
|
python
|
'''Google Sheets Tools'''
import os
from pathlib import Path
import subprocess
import pandas as pd
def save_csv(url: str, save_path: Path, sheet_name: str, show_summary=False):
'''Download a data sheet from Google Sheets and save to csv file'''
sheet_url = f'{url}&sheet={sheet_name}'
subprocess.run(('wget', '-o', '/dev/null', '-O', str(save_path), sheet_url), check=True)
recordings = pd.read_csv(str(save_path))
if show_summary:
print(recordings.head())
def main():
env_var = 'GOOGLE_SHEETS_URL'
url = os.environ.get(env_var)
assert url, f'Invalid {env_var}'
csv_path = Path('/tmp/road_roughness.csv')
save_csv(url, csv_path, 'recordings', show_summary=True)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# flake8: noqa
from __future__ import absolute_import
from __future__ import print_function
import io
from os import path
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
from setuptools.command.develop import develop
from setuptools.command.install import install
#here = path.abspath(path.dirname(__file__
here = os.path.abspath(os.path.dirname(__file__))
def read(*names, **kwargs):
return io.open(
path.join(here, *names),
encoding=kwargs.get("encoding", "utf8")
).read()
long_description = read("README.md")
requirements = read("requirements.txt").split("\n")
optional_requirements = {}
conda_prefix = os.getenv('CONDA_PREFIX')
windows = os.name == 'nt'
def get_pybind_include():
if windows:
return os.path.join(conda_prefix, 'Library', 'include')
return os.path.join(conda_prefix, 'include')
def get_eigen_include():
if windows:
return os.path.join(conda_prefix, 'Library', 'include', 'eigen3')
return os.path.join(conda_prefix, 'include', 'eigen3')
def get_library_dirs():
if windows:
return os.path.join(conda_prefix, 'Library', 'lib')
return os.path.join(conda_prefix, 'lib')
ext_modules = [
Extension(
'compas_wood._wood',
sorted([
'src/clipper.cpp',
'src/connection_zones.cpp',
'src/xxx_interop_python.cpp'
]),
include_dirs=[
'./include',
get_eigen_include(),
get_pybind_include()
],
library_dirs=[
get_library_dirs(),
],
libraries=['mpfr', 'gmp'],
language='c++'
),
]
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
import os
with tempfile.NamedTemporaryFile('w', suffix='.cpp', delete=False) as f:
f.write('int main (int argc, char **argv) { return 0; }')
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
# flags = ['-std=c++17', '-std=c++14', '-std=c++11']
flags = ['-std=c++14', '-std=c++11']
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc', '/std:c++14'],
'unix': [],
}
l_opts = {
'msvc': [],
'unix': [],
}
# if sys.platform == 'darwin':
# darwin_opts = ['-stdlib=libc++', '-mmacosx-version-min=10.14']
# c_opts['unix'] += darwin_opts
# l_opts['unix'] += darwin_opts
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
opts.append('-DCGAL_DEBUG=1')
for ext in self.extensions:
ext.define_macros = [('VERSION_INFO', '"{}"'.format(self.distribution.get_version()))]
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
setup(
name="compas_wood",
version="0.1.0",
description="Timber joinery generation based on CGAL library.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ibois-epfl/compas_wood",
author="petras vestartas",
author_email="petrasvestartas@gmail.com",
license="GPL-3 License",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: GPL-3 License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=[],
project_urls={},
packages=["compas_wood"],
package_dir={"": "src"},
# package_data={},
# data_files=[],
# include_package_data=True,
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
setup_requires=['pybind11>=2.5.0'],
install_requires=requirements,
python_requires=">=3.6",
extras_require=optional_requirements,
zip_safe=False,
)
setup(
name="compas_wood",
version="0.1.0",
description="joinery generation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/IBOIS/compas_wood",
author="Petras Vestartas",
author_email="petrasvestartas@gmail.com",
license="MIT license",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=[],
project_urls={},
packages=["compas_wood"],
package_dir={"": "src"},
package_data={},
data_files=[],
include_package_data=True,
zip_safe=False,
install_requires=requirements,
python_requires=">=3.6",
extras_require=optional_requirements,
entry_points={
"console_scripts": [],
},
ext_modules=[],
)
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 Alethea Katherine Flowers.
# Published under the standard MIT License.
# Full text available at: https://opensource.org/licenses/MIT
"""Helps create releases for Winterbloom stuff"""
import atexit
import collections
import datetime
import importlib.util
import mimetypes
import os
import os.path
import shutil
import tempfile
import webbrowser
import requests
from wintertools import git
GITHUB_API_TOKEN = os.environ["GITHUB_API_KEY"]
mimetypes.init()
class _Artifacts:
directory = tempfile.mkdtemp()
items = []
atexit.register(lambda: shutil.rmtree(_Artifacts.directory, ignore_errors=True))
def _import_config(root):
config_path = os.path.join(root, ".github", "releasing", "config.py")
spec = importlib.util.spec_from_file_location("release_config", config_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _day_ordinal(day):
if 4 <= day <= 20 or 24 <= day <= 30:
return "th"
else:
return ["st", "nd", "rd"][day % 10 - 1]
def _git_info() -> dict:
info = {}
info["root"] = git.root()
info["repo"] = git.repo_name()
git.fetch_tags()
info["last_release"] = git.latest_tag()
# List of commits/changes since last version
changes = git.get_change_summary(info["last_release"], "HEAD")
# Arrange changes by category
categorized_changes = collections.defaultdict(list)
for change in changes:
if ": " in change:
category, change = change.split(": ", 1)
category = category.capitalize()
else:
category = "Other"
categorized_changes[category].append(change)
info["changes"] = categorized_changes
# Generate a new tag name
now = datetime.datetime.now()
info["tag"] = now.strftime(f"%Y.%m.{now.day}")
info["name"] = datetime.datetime.now().strftime(
f"%B {now.day}{_day_ordinal(now.day)}, %Y"
)
return info
def _github_session():
session = requests.Session()
session.headers["Accept"] = "application/vnd.github.v3+json"
session.headers["Authorization"] = f"Bearer {GITHUB_API_TOKEN}"
return session
def _create_release(session, git_info, description):
url = f"https://api.github.com/repos/{git_info['repo']}/releases"
response = session.post(
url,
json={
"tag_name": git_info["tag"],
"target_commitish": "main",
"name": git_info["name"],
"body": description,
"draft": True,
},
)
response.raise_for_status()
return response.json()
def _upload_release_artifact(session, release, artifact):
content_type, _ = mimetypes.guess_type(artifact["path"])
if not content_type:
content_type = "application/octet-string"
with open(artifact["path"], "rb") as fh:
response = session.post(
release["upload_url"].split("{", 1)[0],
params={
"name": artifact["name"],
},
headers={"Content-Type": content_type},
data=fh.read(),
)
response.raise_for_status()
def add_artifact(src, name, **details):
if not details:
details = {}
dst = os.path.join(_Artifacts.directory, name)
shutil.copy(src, dst)
details["name"] = name
details["path"] = dst
_Artifacts.items.append(details)
def main():
git_info = _git_info()
print(f"Working from {git_info['root']}")
os.chdir(git_info["root"])
print(f"Tagging {git_info['tag']}...")
git.tag(git_info["tag"])
print("Preparing artifacts...")
config = _import_config(git_info["root"])
config.prepare_artifacts(git_info)
print("Preparing release description...")
description = config.prepare_description(git_info, _Artifacts.items)
description = git.open_editor(description)
print("Creating release...")
gh = _github_session()
release = _create_release(gh, git_info, description)
for artifact in _Artifacts.items:
print(f"Uploading {artifact['name']}...")
_upload_release_artifact(gh, release, artifact)
webbrowser.open(release["html_url"])
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.12 on 2021-08-06 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aidants_connect_web", "0064_merge_20210804_1156"),
]
operations = [
migrations.AlterField(
model_name="habilitationrequest",
name="email",
field=models.EmailField(max_length=150),
),
]
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from sch.models import search1
from sch.models import subs
# Create your views here.
def list(request):
select1=request.POST.get('select1')
select2=request.POST.get('select2')
ls = search1.objects.filter(City=select2)
print(select2)
print(select1)
return render(request,'search/search.html',{"ls1":ls})
def footer1(request):
return render(request,'mid/index.html.carousel_32cb')
def subs1(request):
if request.method=="POST":
print("email submitted")
email=request.POST['email']
print(email)
return render(request,'blood/index.html')
|
nilq/baby-python
|
python
|
from gooey import options
from gooey_video import ffmpeg
def add_parser(parent):
parser = parent.add_parser('trim_crop', prog="Trim, Crop & Scale Video", help='Where does this show??')
input_group = parser.add_argument_group('Input', gooey_options=options.ArgumentGroup(
show_border=True
))
# basic details
input_group.add_argument(
'input',
metavar='Input',
help='The video you want to add a watermark to',
default=r'C:\Users\Chris\Dropbox\pretty_gui\Gooey\demo-screen-recording.mp4',
widget='FileChooser',
gooey_options=options.FileChooser(
wildcard='video files (*.mp4)|*.mp4',
full_width=True
))
settings = parser.add_argument_group(
'Trim Settings',
gooey_options=options.ArgumentGroup(
show_border=True
))
start_position = settings.add_mutually_exclusive_group(gooey_options=options.MutexGroup(
initial_selection=0
))
start_position.add_argument(
'--start-ss',
metavar='Start position',
help='Start position in seconds',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=99999,
increment_size=1
))
start_position.add_argument(
'--start-ts',
metavar='Start position',
help='start-position as a concrete timestamp',
gooey_options=options.TextField(
placeholder='HH:MM:SS',
validator=options.RegexValidator(
test='^\d{2}:\d{2}:\d{2}$',
message='Must be in the format HH:MM:SS'
)
))
end = settings.add_mutually_exclusive_group(
gooey_options=options.MutexGroup(
initial_selection=0
))
end.add_argument(
'--end-ss',
metavar='End position',
help='Total duration from the start (seconds)',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=99999,
increment_size=1
))
end.add_argument(
'--end-ts',
metavar='End position',
help='End position as a concrete timestamp',
gooey_options=options.TextField(
placeholder='HH:MM:SS',
validator=options.RegexValidator(
test='^\d{2}:\d{2}:\d{2}$',
message='Must be in the format HH:MM:SS'
)
))
crop_settings = parser.add_argument_group('Crop Settings', gooey_options=options.ArgumentGroup(
show_border=True
))
crop_settings.add_argument(
'--enable-crop',
metavar='Crop Video',
help='Enable the cropping filters',
action='store_true',
gooey_options=options.LayoutOptions(
full_width=True,
show_label=False
)
)
crop_settings.add_argument(
'--crop-width',
metavar='Width',
help='Width of the cropped region',
default=640,
widget='IntegerField',
gooey_options=options.IntegerField(
min=1,
max=1920
))
crop_settings.add_argument(
'--crop-height',
metavar='Height',
help='Height of the cropped region',
default=480,
widget='IntegerField',
gooey_options=options.IntegerField(
min=1,
max=1080
))
crop_settings.add_argument(
'--crop-x',
metavar='Margin left',
help='X position where to position the crop region',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=1920
))
crop_settings.add_argument(
'--crop-y',
metavar='Margin top',
help='Y position where to position the crop region',
widget='IntegerField',
gooey_options=options.IntegerField(
min=0,
max=1080
))
scale = parser.add_argument_group('Crop Settings', gooey_options=options.ArgumentGroup(
show_border=True
))
scale.add_argument(
'--scale-width',
metavar='Width',
help='Scale the video to this width (-1 preserves aspect ratio)',
default=-1,
widget='IntegerField',
gooey_options=options.IntegerField(
min=-1,
max=1920
))
scale.add_argument(
'--scale-height',
metavar='Height',
help='Scale the video to this height (-1 preserves aspect ratio)',
default=-1,
widget='IntegerField',
gooey_options=options.IntegerField(
min=-2,
max=1080
))
output_group = parser.add_argument_group('Output', gooey_options=options.ArgumentGroup(
show_border=True
))
output_group.add_argument(
'output',
help='Choose where to save the output video',
default=r'C:\Users\Chris\Desktop\output.mp4',
widget='FileSaver',
gooey_options=options.FileSaver(
wildcard='video files (*.mp4)|*.mp4',
default_file='output.mp4',
full_width=True
))
output_group.add_argument(
'--overwrite',
metavar='Overwrite existing',
help='Overwrite the output video if it already exists?',
action='store_const',
default=True,
const='-y',
widget='CheckBox')
return parser
def run(args):
template = 'ffmpeg.exe ' \
'-i "{input}" ' \
'-ss {trim_start} ' \
'-to {trim_end} ' \
'-filter:v "crop={crop_w}:{crop_h}:{crop_x}:{crop_y},scale={scale_w}:{scale_h}" ' \
'{overwrite} ' \
'"{output}"'
cmd = template.format(
input=args.input,
trim_start=args.start_ts or args.start_ss or 0,
trim_end=args.end_ts or args.end_ss or '99:59:59',
crop_w=args.crop_width if args.enable_crop else 'iw',
crop_h=args.crop_height if args.enable_crop else 'ih',
crop_x=args.crop_x if args.enable_crop else 0,
crop_y=args.crop_y if args.enable_crop else 0,
scale_w=args.scale_width,
scale_h=args.scale_height,
overwrite=args.overwrite,
output=args.output
)
ffmpeg.run(cmd)
|
nilq/baby-python
|
python
|
import pytest
from gpiozero import Device
from gpiozero.pins.mock import MockFactory, MockPWMPin
from pytenki import PyTenki
@pytest.yield_fixture
def mock_factory(request):
save_factory = Device.pin_factory
Device.pin_factory = MockFactory()
yield Device.pin_factory
if Device.pin_factory is not None:
Device.pin_factory.reset()
Device.pin_factory = save_factory
@pytest.fixture
def pwm(request, mock_factory):
mock_factory.pin_class = MockPWMPin
@pytest.fixture(scope='module')
def led_pins():
return {
'fine': 4,
'cloud': 17,
'rain': 27,
'snow': 22,
}
@pytest.fixture(scope='module')
def button_pin():
return 2
@pytest.fixture
def pytenki(mock_factory, pwm):
return PyTenki()
@pytest.fixture
def pytenki_init(mock_factory, pwm, led_pins, button_pin):
return PyTenki(led_pins=led_pins, button_pin=button_pin)
|
nilq/baby-python
|
python
|
a=list(map(int,input().split()))
n=len(a)
l=[]
m=0
j=n-1
for i in range(n-2,0,-1):
if(a[i]>a[i-1] and a[i]>a[0]):
m=max(m,a[i]-a[0])
#print(m)
elif(a[i]<a[i-1]):
j=i
m=0
l.append(m)
print(m)
m=0
while(j<n-1):
m=max(m,a[n-1]-a[j])
j+=1
l.append(m)
print(m)
print(sum(l))
|
nilq/baby-python
|
python
|
""" Pacakge for various utilities """
|
nilq/baby-python
|
python
|
# type:ignore
from django.conf.urls import include, url
from . import views
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('newproject', views.create_project, name = "create_project"),
path('profile/<username>', views.profile, name='profile'),
path("post/<int:id>", views.view_project, name="post_item"),
path("project/<int:id>", views.view_project, name="view_project"),
url(r"^api/project/$", views.ProjectList.as_view()),
url(r"api/project/project-id/(?P<pk>[0-9]+)/$", views.ProjectDescription.as_view()),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
nilq/baby-python
|
python
|
"""
Arrangement of panes.
Don't confuse with the prompt_toolkit VSplit/HSplit classes. This is a higher
level abstraction of the Pymux window layout.
An arrangement consists of a list of windows. And a window has a list of panes,
arranged by ordering them in HSplit/VSplit instances.
"""
from __future__ import unicode_literals
from ptterm import Terminal
from prompt_toolkit.application.current import get_app, set_app
from prompt_toolkit.buffer import Buffer
import math
import os
import weakref
import six
__all__ = (
'LayoutTypes',
'Pane',
'HSplit',
'VSplit',
'Window',
'Arrangement',
)
class LayoutTypes:
# The values are in lowercase with dashes, because that is what users can
# use at the command line.
EVEN_HORIZONTAL = 'even-horizontal'
EVEN_VERTICAL = 'even-vertical'
MAIN_HORIZONTAL = 'main-horizontal'
MAIN_VERTICAL = 'main-vertical'
TILED = 'tiled'
_ALL = [EVEN_HORIZONTAL, EVEN_VERTICAL, MAIN_HORIZONTAL, MAIN_VERTICAL, TILED]
class Pane(object):
"""
One pane, containing one process and a search buffer for going into copy
mode or displaying the help.
"""
_pane_counter = 1000 # Start at 1000, to be sure to not confuse this with pane indexes.
def __init__(self, terminal=None):
assert isinstance(terminal, Terminal)
self.terminal = terminal
self.chosen_name = None
# Displayed the clock instead of this pane content.
self.clock_mode = False
# Give unique ID.
Pane._pane_counter += 1
self.pane_id = Pane._pane_counter
# Prompt_toolkit buffer, for displaying scrollable text.
# (In copy mode, or help mode.)
# Note: Because the scroll_buffer can only contain text, we also use the
# get_tokens_for_line, that returns the token list with color
# information for each line.
self.scroll_buffer = Buffer(read_only=True)
self.copy_get_tokens_for_line = lambda lineno: []
self.display_scroll_buffer = False
self.scroll_buffer_title = ''
@property
def process(self):
return self.terminal.process
@property
def name(self):
"""
The name for the window as displayed in the title bar and status bar.
"""
# Name, explicitely set for the pane.
if self.chosen_name:
return self.chosen_name
else:
# Name from the process running inside the pane.
name = self.process.get_name()
if name:
return os.path.basename(name)
return ''
def enter_copy_mode(self):
"""
Suspend the process, and copy the screen content to the `scroll_buffer`.
That way the user can search through the history and copy/paste.
"""
self.terminal.enter_copy_mode()
def focus(self):
"""
Focus this pane.
"""
get_app().layout.focus(self.terminal)
class _WeightsDictionary(weakref.WeakKeyDictionary):
"""
Dictionary for the weights: weak keys, but defaults to 1.
(Weights are used to represent the proportion of pane sizes in
HSplit/VSplit lists.)
This dictionary maps the child (another HSplit/VSplit or Pane), to the
size. (Integer.)
"""
def __getitem__(self, key):
try:
# (Don't use 'super' here. This is a classobj in Python2.)
return weakref.WeakKeyDictionary.__getitem__(self, key)
except KeyError:
return 1
class _Split(list):
"""
Base class for horizontal and vertical splits. (This is a higher level
split than prompt_toolkit.layout.HSplit.)
"""
def __init__(self, *a, **kw):
list.__init__(self, *a, **kw)
# Mapping children to its weight.
self.weights = _WeightsDictionary()
def __hash__(self):
# Required in order to add HSplit/VSplit to the weights dict. "
return id(self)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, list.__repr__(self))
class HSplit(_Split):
""" Horizontal split. """
class VSplit(_Split):
""" Horizontal split. """
class Window(object):
"""
Pymux window.
"""
_window_counter = 1000 # Start here, to avoid confusion with window index.
def __init__(self, index=0):
self.index = index
self.root = HSplit()
self._active_pane = None
self._prev_active_pane = None
self.chosen_name = None
self.previous_selected_layout = None
#: When true, the current pane is zoomed in.
self.zoom = False
#: When True, send input to all panes simultaniously.
self.synchronize_panes = False
# Give unique ID.
Window._window_counter += 1
self.window_id = Window._window_counter
def invalidation_hash(self):
"""
Return a hash (string) that can be used to determine when the layout
has to be rebuild.
"""
# if not self.root:
# return '<empty-window>'
def _hash_for_split(split):
result = []
for item in split:
if isinstance(item, (VSplit, HSplit)):
result.append(_hash_for_split(item))
elif isinstance(item, Pane):
result.append('p%s' % item.pane_id)
if isinstance(split, HSplit):
return 'HSplit(%s)' % (','.join(result))
else:
return 'VSplit(%s)' % (','.join(result))
return '<window_id=%s,zoom=%s,children=%s>' % (
self.window_id, self.zoom, _hash_for_split(self.root))
@property
def active_pane(self):
"""
The current active :class:`.Pane`.
"""
return self._active_pane
@active_pane.setter
def active_pane(self, value):
assert isinstance(value, Pane)
# Remember previous active pane.
if self._active_pane:
self._prev_active_pane = weakref.ref(self._active_pane)
self.zoom = False
self._active_pane = value
@property
def previous_active_pane(self):
"""
The previous active :class:`.Pane` or `None` if unknown.
"""
p = self._prev_active_pane and self._prev_active_pane()
# Only return when this pane actually still exists in the current
# window.
if p and p in self.panes:
return p
@property
def name(self):
"""
The name for this window as it should be displayed in the status bar.
"""
# Name, explicitely set for the window.
if self.chosen_name:
return self.chosen_name
else:
pane = self.active_pane
if pane:
return pane.name
return ''
def add_pane(self, pane, vsplit=False):
"""
Add another pane to this Window.
"""
assert isinstance(pane, Pane)
assert isinstance(vsplit, bool)
split_cls = VSplit if vsplit else HSplit
if self.active_pane is None:
self.root.append(pane)
else:
parent = self._get_parent(self.active_pane)
same_direction = isinstance(parent, split_cls)
index = parent.index(self.active_pane)
if same_direction:
parent.insert(index + 1, pane)
else:
new_split = split_cls([self.active_pane, pane])
parent[index] = new_split
# Give the newly created split the same weight as the original
# pane that was at this position.
parent.weights[new_split] = parent.weights[self.active_pane]
self.active_pane = pane
self.zoom = False
def remove_pane(self, pane):
"""
Remove pane from this Window.
"""
assert isinstance(pane, Pane)
if pane in self.panes:
# When this pane was focused, switch to previous active or next in order.
if pane == self.active_pane:
if self.previous_active_pane:
self.active_pane = self.previous_active_pane
else:
self.focus_next()
# Remove from the parent. When the parent becomes empty, remove the
# parent itself recursively.
p = self._get_parent(pane)
p.remove(pane)
while len(p) == 0 and p != self.root:
p2 = self._get_parent(p)
p2.remove(p)
p = p2
# When the parent has only one item left, collapse into its parent.
while len(p) == 1 and p != self.root:
p2 = self._get_parent(p)
p2.weights[p[0]] = p2.weights[p] # Keep dimensions.
i = p2.index(p)
p2[i] = p[0]
p = p2
@property
def panes(self):
" List with all panes from this Window. "
result = []
for s in self.splits:
for item in s:
if isinstance(item, Pane):
result.append(item)
return result
@property
def splits(self):
" Return a list with all HSplit/VSplit instances. "
result = []
def collect(split):
result.append(split)
for item in split:
if isinstance(item, (HSplit, VSplit)):
collect(item)
collect(self.root)
return result
def _get_parent(self, item):
" The HSplit/VSplit that contains the active pane. "
for s in self.splits:
if item in s:
return s
@property
def has_panes(self):
" True when this window contains at least one pane. "
return len(self.panes) > 0
@property
def active_process(self):
" Return `Process` that should receive user input. "
p = self.active_pane
if p is not None:
return p.process
def focus_next(self, count=1):
" Focus the next pane. "
panes = self.panes
if panes:
self.active_pane = panes[(panes.index(self.active_pane) + count) % len(panes)]
else:
self.active_pane = None # No panes left.
def focus_previous(self):
" Focus the previous pane. "
self.focus_next(count=-1)
def rotate(self, count=1, with_pane_before_only=False, with_pane_after_only=False):
"""
Rotate panes.
When `with_pane_before_only` or `with_pane_after_only` is True, only rotate
with the pane before/after the active pane.
"""
# Create (split, index, pane, weight) tuples.
items = []
current_pane_index = None
for s in self.splits:
for index, item in enumerate(s):
if isinstance(item, Pane):
items.append((s, index, item, s.weights[item]))
if item == self.active_pane:
current_pane_index = len(items) - 1
# Only before after? Reduce list of panes.
if with_pane_before_only:
items = items[current_pane_index - 1:current_pane_index + 1]
elif with_pane_after_only:
items = items[current_pane_index:current_pane_index + 2]
# Rotate positions.
for i, triple in enumerate(items):
split, index, pane, weight = triple
new_item = items[(i + count) % len(items)][2]
split[index] = new_item
split.weights[new_item] = weight
def select_layout(self, layout_type):
"""
Select one of the predefined layouts.
"""
assert layout_type in LayoutTypes._ALL
# When there is only one pane, always choose EVEN_HORIZONTAL,
# Otherwise, we create VSplit/HSplit instances with an empty list of
# children.
if len(self.panes) == 1:
layout_type = LayoutTypes.EVEN_HORIZONTAL
# even-horizontal.
if layout_type == LayoutTypes.EVEN_HORIZONTAL:
self.root = HSplit(self.panes)
# even-vertical.
elif layout_type == LayoutTypes.EVEN_VERTICAL:
self.root = VSplit(self.panes)
# main-horizontal.
elif layout_type == LayoutTypes.MAIN_HORIZONTAL:
self.root = HSplit([
self.active_pane,
VSplit([p for p in self.panes if p != self.active_pane])
])
# main-vertical.
elif layout_type == LayoutTypes.MAIN_VERTICAL:
self.root = VSplit([
self.active_pane,
HSplit([p for p in self.panes if p != self.active_pane])
])
# tiled.
elif layout_type == LayoutTypes.TILED:
panes = self.panes
column_count = math.ceil(len(panes) ** .5)
rows = HSplit()
current_row = VSplit()
for p in panes:
current_row.append(p)
if len(current_row) >= column_count:
rows.append(current_row)
current_row = VSplit()
if current_row:
rows.append(current_row)
self.root = rows
self.previous_selected_layout = layout_type
def select_next_layout(self, count=1):
"""
Select next layout. (Cycle through predefined layouts.)
"""
# List of all layouts. (When we have just two panes, only toggle
# between horizontal/vertical.)
if len(self.panes) == 2:
all_layouts = [LayoutTypes.EVEN_HORIZONTAL, LayoutTypes.EVEN_VERTICAL]
else:
all_layouts = LayoutTypes._ALL
# Get index of current layout.
layout = self.previous_selected_layout or LayoutTypes._ALL[-1]
try:
index = all_layouts.index(layout)
except ValueError:
index = 0
# Switch to new layout.
new_layout = all_layouts[(index + count) % len(all_layouts)]
self.select_layout(new_layout)
def select_previous_layout(self):
self.select_next_layout(count=-1)
def change_size_for_active_pane(self, up=0, right=0, down=0, left=0):
"""
Increase the size of the current pane in any of the four directions.
"""
child = self.active_pane
self.change_size_for_pane(child, up=up, right=right, down=down, left=left)
def change_size_for_pane(self, pane, up=0, right=0, down=0, left=0):
"""
Increase the size of the current pane in any of the four directions.
Positive values indicate an increase, negative values a decrease.
"""
assert isinstance(pane, Pane)
def find_split_and_child(split_cls, is_before):
" Find the split for which we will have to update the weights. "
child = pane
split = self._get_parent(child)
def found():
return isinstance(split, split_cls) and (
not is_before or split.index(child) > 0) and (
is_before or split.index(child) < len(split) - 1)
while split and not found():
child = split
split = self._get_parent(child)
return split, child # split can be None!
def handle_side(split_cls, is_before, amount, trying_other_side=False):
" Increase weights on one side. (top/left/right/bottom). "
if amount:
split, child = find_split_and_child(split_cls, is_before)
if split:
# Find neighbour.
neighbour_index = split.index(child) + (-1 if is_before else 1)
neighbour_child = split[neighbour_index]
# Increase/decrease weights.
split.weights[child] += amount
split.weights[neighbour_child] -= amount
# Ensure that all weights are at least one.
for k, value in split.weights.items():
if value < 1:
split.weights[k] = 1
else:
# When no split has been found where we can move in this
# direction, try to move the other side instead using a
# negative amount. This happens when we run "resize-pane -R 4"
# inside the pane that is completely on the right. In that
# case it's logical to move the left border to the right
# instead.
if not trying_other_side:
handle_side(split_cls, not is_before, -amount,
trying_other_side=True)
handle_side(VSplit, True, left)
handle_side(VSplit, False, right)
handle_side(HSplit, True, up)
handle_side(HSplit, False, down)
def get_pane_index(self, pane):
" Return the index of the given pane. ValueError if not found. "
assert isinstance(pane, Pane)
return self.panes.index(pane)
class Arrangement(object):
"""
Arrangement class for one Pymux session.
This contains the list of windows and the layout of the panes for each
window. All the clients share the same Arrangement instance, but they can
have different windows active.
"""
def __init__(self):
self.windows = []
self.base_index = 0
self._active_window_for_cli = weakref.WeakKeyDictionary()
self._prev_active_window_for_cli = weakref.WeakKeyDictionary()
# The active window of the last CLI. Used as default when a new session
# is attached.
self._last_active_window = None
def invalidation_hash(self):
"""
When this changes, the layout needs to be rebuild.
"""
if not self.windows:
return '<no-windows>'
w = self.get_active_window()
return w.invalidation_hash()
def get_active_window(self):
"""
The current active :class:`.Window`.
"""
app = get_app()
try:
return self._active_window_for_cli[app]
except KeyError:
self._active_window_for_cli[app] = self._last_active_window or self.windows[0]
return self.windows[0]
def set_active_window(self, window):
assert isinstance(window, Window)
app = get_app()
previous = self.get_active_window()
self._prev_active_window_for_cli[app] = previous
self._active_window_for_cli[app] = window
self._last_active_window = window
def set_active_window_from_pane_id(self, pane_id):
"""
Make the window with this pane ID the active Window.
"""
assert isinstance(pane_id, int)
for w in self.windows:
for p in w.panes:
if p.pane_id == pane_id:
self.set_active_window(w)
def get_previous_active_window(self):
" The previous active Window or None if unknown. "
app = get_app()
try:
return self._prev_active_window_for_cli[app]
except KeyError:
return None
def get_window_by_index(self, index):
" Return the Window with this index or None if not found. "
for w in self.windows:
if w.index == index:
return w
def create_window(self, pane, name=None, set_active=True):
"""
Create a new window that contains just this pane.
:param pane: The :class:`.Pane` instance to put in the new window.
:param name: If given, name for the new window.
:param set_active: When True, focus the new window.
"""
assert isinstance(pane, Pane)
assert name is None or isinstance(name, six.text_type)
# Take the first available index.
taken_indexes = [w.index for w in self.windows]
index = self.base_index
while index in taken_indexes:
index += 1
# Create new window and add it.
w = Window(index)
w.add_pane(pane)
self.windows.append(w)
# Sort windows by index.
self.windows = sorted(self.windows, key=lambda w: w.index)
app = get_app(return_none=True)
if app is not None and set_active:
self.set_active_window(w)
if name is not None:
w.chosen_name = name
assert w.active_pane == pane
assert w._get_parent(pane)
def move_window(self, window, new_index):
"""
Move window to a new index.
"""
assert isinstance(window, Window)
assert isinstance(new_index, int)
window.index = new_index
# Sort windows by index.
self.windows = sorted(self.windows, key=lambda w: w.index)
def get_active_pane(self):
"""
The current :class:`.Pane` from the current window.
"""
w = self.get_active_window()
if w is not None:
return w.active_pane
def remove_pane(self, pane):
"""
Remove a :class:`.Pane`. (Look in all windows.)
"""
assert isinstance(pane, Pane)
for w in self.windows:
w.remove_pane(pane)
# No panes left in this window?
if not w.has_panes:
# Focus next.
for app, active_w in self._active_window_for_cli.items():
if w == active_w:
with set_app(app):
self.focus_next_window()
self.windows.remove(w)
def focus_previous_window(self):
w = self.get_active_window()
self.set_active_window(self.windows[
(self.windows.index(w) - 1) % len(self.windows)])
def focus_next_window(self):
w = self.get_active_window()
self.set_active_window(self.windows[
(self.windows.index(w) + 1) % len(self.windows)])
def break_pane(self, set_active=True):
"""
When the current window has multiple panes, remove the pane from this
window and put it in a new window.
:param set_active: When True, focus the new window.
"""
w = self.get_active_window()
if len(w.panes) > 1:
pane = w.active_pane
self.get_active_window().remove_pane(pane)
self.create_window(pane, set_active=set_active)
def rotate_window(self, count=1):
" Rotate the panes in the active window. "
w = self.get_active_window()
w.rotate(count=count)
@property
def has_panes(self):
" True when any of the windows has a :class:`.Pane`. "
for w in self.windows:
if w.has_panes:
return True
return False
|
nilq/baby-python
|
python
|
from microsetta_public_api.utils._utils import (
jsonify,
DataTable,
create_data_entry,
)
__all__ = [
'testing',
'jsonify',
'DataTable',
'create_data_entry',
]
|
nilq/baby-python
|
python
|
from __future__ import annotations
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import numpy as np
import pandas as pd
import datetime
import tensorflow as tf
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, mean_absolute_error
from .define_model import (
cnnLSTMModel,
convLSTMModel,
mlpModel,
convModel,
)
from src.features.build_features import DataBlock, to_supervised, to_supervised_shuffled
from pickle import dump, load
np.random.seed(42)
def scale_and_encode(dataframe, subject: int):
"""
Function to scale numerical features and one hot encode categorical ones
Args:
dataframe: pd.DataFrame -> a pandas dataframe containing the data
Returns:
self.scaled_array:np.array -> a numpy array of scaled and encoded features
"""
# the numeric features which are not dependent on the subject description
numeric_features = ["bvp", "acc_x", "acc_y", "acc_z", "bmi", "age"]
# cat_features = ["sport"]
# create a pipeline to do the transformation
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
# categorical_transformer = Pipeline(steps=[("encoder", OneHotEncoder())])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
# ("cat", categorical_transformer, cat_features),
],
remainder="passthrough",
)
# fit the columntransformer to the dataframe
preprocessor.fit(dataframe)
# save the preprocessor as we will fit this scaler to validation and testing sets
dump(preprocessor, open("models/scaler_and_encoder_{}.pkl".format(subject), "wb"))
# # return the transformed array
return preprocessor.transform(dataframe)
class TrainModel:
"""
Class to handle training using a convLSTM model
"""
def __init__(
self,
train_subjects: list,
valid_subjects: list,
n_timesteps: int,
n_features: int,
n_conv_layers: int,
n_conv_filters: int,
kernel_size: int,
n_lstm_units: int,
n_dense_nodes: int,
n_output_nodes: int,
n_seq: int,
batch_size: int,
epochs: int,
scaler_encoder=None,
):
# define the model
self.model = cnnLSTMModel(
n_conv_layers=n_conv_layers,
n_conv_filters=n_conv_filters,
kernel_size=kernel_size,
n_lstm_units=n_lstm_units,
n_dense_nodes=n_dense_nodes,
n_output_nodes=n_output_nodes,
input_shape=(None, n_timesteps // n_seq, n_features),
)
# compile the model
self.model.compile(loss="mse", metrics="mae", optimizer="adam")
# define the train, test and valid subjects
self.train_subjects = train_subjects
self.test_subjects = []
self.valid_subjects = valid_subjects
# define the number of timesteps used in prediction
self.timesteps = n_timesteps
# define number of features used in the model
self.features = n_features
# # define the length of each subsequence
self.seq = n_seq
# define the batch size
self.batch_size = batch_size
# define epochs
self.epochs = epochs
# valid scores
self.valid_score = 0
# load scaler
self.scaler_encoder = scaler_encoder
def load_data(self, subject: int):
"""
Function to load data for training
Args:
subject: int -> the subject for which data is being loaded
Returns:
X,y : np.array -> training data and labels
"""
# load the dataframe
data = DataBlock("S{}".format(subject), "data/raw/")
df = data.raw_dataframe
# # name the columns
# df.columns = [
# "bvp",
# "acc_x",
# "acc_y",
# "acc_z",
# "gender",
# "age",
# "sport",
# "bmi",
# "heart_rate",
# ]
# if scaling and encoding needs to be done, load the scaler encoder and transform the dataframe
if self.scaler_encoder:
df = self.scaler_encoder.transform(df)
X, y = to_supervised(np.array(df), self.timesteps, 1)
# reshape the X array to meet the requirements of the model
X = self.reshape(X)
return X, y
def train(self):
"""
Function to run training
"""
for sub in self.train_subjects:
# load training and validation data
print("-------------------------------------")
print("training on subject - {}".format(sub))
print("-------------------------------------")
train_X, train_y = self.load_data(subject=sub)
# define callbacks
# early stopping
es_callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=5)
log_dir = "models/logs/fit/" + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S"
)
# tensorboard callback
tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
# fit the model and save history
self.model.fit(
train_X,
train_y,
epochs=self.epochs,
batch_size=self.batch_size,
callbacks=[es_callback, tb_callback],
verbose=0,
)
print("-------------------------------------")
print("testing on subject - {}".format(self.valid_subjects[0]))
print("-------------------------------------")
# check performance on hold out validation set
valid_X, valid_y = self.load_data(subject=self.valid_subjects[0])
yhat = process.model.predict(valid_X)
# calculate mae of model predictions on validation data
mae = mean_absolute_error(valid_y, yhat)
self.valid_score = mae
# save the model
self.model.save("models/ckpoints/model_{}".format(self.valid_subjects[0]))
# def train_shuffled(
# self,
# train_X: np.array,
# train_y: np.array,
# valid_X: np.array,
# valid_y: np.array,
# valid_subject: int,
# ):
# """
# Function to run training
# """
# # define callbacks
# # early stopping
# es_callback = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=5)
# log_dir = "models/logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# # tensorboard callback
# tb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
# # fit the model and save history
# self.model.fit(
# train_X,
# train_y,
# epochs=self.epochs,
# batch_size=self.batch_size,
# callbacks=[es_callback, tb_callback],
# )
# yhat = process.model.predict(valid_X)
# mae = mean_absolute_error(valid_y, yhat)
# self.valid_score = mae
# self.model.save("models/ckpoints/model_{}".format(valid_subject))
def reshape(self, X: np.array):
"Function which reshapes the input data into the required shape for CNN LSTM model"
return X.reshape(
(X.shape[0], self.seq, self.timesteps // self.seq, self.features)
)
if __name__ == "__main__":
total_subjects = list(range(1, 16))
val_scores = []
# iterate through each subject and treat it as validation set
for i in total_subjects:
print("******************************************")
print("training fold - {}".format(i))
print("******************************************")
# defining training and validation subjects
train_subjects = [x for x in total_subjects if x != i]
valid_subjects = [i]
# initiate a list of dataframes
list_of_dfs = []
# append all the dataframes in the training set
for subject in train_subjects:
data = DataBlock("S{}".format(subject), "data/raw/")
df = data.raw_dataframe
list_of_dfs.append(df)
# create a concatenated dataframe
frames = pd.concat(list_of_dfs)
# scale and encode training set
sf_frames = scale_and_encode(frames, i)
# use the saved scaler encoder for later use with validation set
saved_scaler_encoder = load(
open("models/scaler_and_encoder_{}.pkl".format(i), "rb")
)
# define number of features
n_features = 8
# instantiate the training model process -> for each training fold, the model is freshly initiated
process = TrainModel(
train_subjects=train_subjects,
valid_subjects=valid_subjects,
n_timesteps=8,
n_features=n_features,
n_conv_layers=2,
n_conv_filters=20,
kernel_size=4,
n_lstm_units=64,
n_dense_nodes=32,
n_output_nodes=1,
n_seq=1,
batch_size=100,
epochs=100,
scaler_encoder=saved_scaler_encoder,
)
# run training
process.train()
# print and save validation scores
print(
"validation score on subject -{} ".format(valid_subjects[0]),
process.valid_score,
)
val_scores.append(process.valid_score)
print(val_scores)
|
nilq/baby-python
|
python
|
#It is necessary to import the datetime module when handling date and time
import datetime
currentTime = datetime.datetime.now()
currentDate = datetime.date.today()
#This will print the date
#print(currentDate)
#This the year
#print(currentDate.year)
#This the month
#print(currentDate.month)
#And this the day...
#print(currentDate.day)
#The "strftime()" function is a more common way for getting specific elements of date
#day = currentDate.strftime('%d')
#month = currentDate.strftime('%B')
#year = currentDate.strftime('%Y')
#This will print today's date
#print("Today's date is the " + day + "th of " + month + ", " + year + ".")
print("Okay, what if I told you I could guess how many days till your birthday...")
userBirthday = input("When's your birthday? Write it here: ")
try:
bday = datetime.datetime.strptime(userBirthday, '%d/%m/%Y').date()
except ValueError:
print("Oh sorry, my bad... You are meant to put it in this format; dd/mm/yyyy.")
userBirthday = input("When's your next birthday? Write it here: ")
try:
bday = datetime.datetime.strptime(userBirthday, '%d/%m/%Y').date()
except ValueError:
print("Invalid input... Input not processed...")
try:
daysTillBday = bday - currentDate
print("I think I got that... Ok, so there are " + str(daysTillBday.days) + " days till you birthday right?")
except:
print("Uh oh... \nI couldn't really catch your birthday, no worries, there's always next time...")
print("Goodbye.")
|
nilq/baby-python
|
python
|
RAD_FILE_FOLDER = ""
path_stack = [] #wrt RAD_FILE_FOLDER
JSON_FILE_FOLDER = ""
|
nilq/baby-python
|
python
|
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from batchgenerators.augmentations.utils import resize_segmentation
from uuunet.experiment_planning.plan_and_preprocess_task import get_caseIDs_from_splitted_dataset_folder
from uuunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Process, Queue
import torch
import threading
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import SimpleITK as sitk
import shutil
from multiprocessing import Pool
from uuunet.training.model_restore import load_model_and_checkpoint_files
from uuunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from uuunet.utilities.one_hot_encoding import to_one_hot
def plot_images(img, img2=None):
"""
Plot at most 2 images.
Support passing in ndarray or image path string.
"""
fig = plt.figure(figsize=(20,10))
if isinstance(img, str): img = imread(img)
if isinstance(img2, str): img2 = imread(img2)
if img2 is None:
ax = fig.add_subplot(111)
ax.imshow(img)
else:
height, width = img.shape[0], img.shape[1]
if height < width:
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else:
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.imshow(img)
ax2.imshow(img2)
plt.show()
def view_batch(imgs, lbls, labels=['image', 'label'], stack=False):
'''
imgs: [D, H, W, C], the depth or batch dimension should be the first.
'''
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.set_title(labels[0])
ax2.set_title(labels[1])
"""
if init with zeros, the animation may not update? seems bug in animation.
"""
if stack:
lbls = np.stack((lbls, imgs, imgs), -1)
img1 = ax1.imshow(np.random.rand(*imgs.shape[1:]))
img2 = ax2.imshow(np.random.rand(*lbls.shape[1:]))
def update(i):
plt.suptitle(str(i))
img1.set_data(imgs[i])
img2.set_data(lbls[i])
return img1, img2
ani = animation.FuncAnimation(fig, update, frames=len(imgs), interval=10, blit=False, repeat_delay=0)
plt.show()
def predict_save_to_queue(preprocess_fn, q, list_of_lists, output_files, segs_from_prev_stage, classes):
errors_in = []
for i, l in enumerate(list_of_lists):
try:
output_file = output_files[i]
print("preprocessing", output_file)
d, _, dct = preprocess_fn(l)
print(output_file, dct)
if segs_from_prev_stage[i] is not None:
assert isfile(segs_from_prev_stage[i]) and segs_from_prev_stage[i].endswith(".nii.gz"), "segs_from_prev_stage" \
" must point to a " \
"segmentation file"
seg_prev = sitk.GetArrayFromImage(sitk.ReadImage(segs_from_prev_stage[i]))
# check to see if shapes match
img = sitk.GetArrayFromImage(sitk.ReadImage(l[0]))
assert all([i == j for i, j in zip(seg_prev.shape, img.shape)]), "image and segmentation from previous " \
"stage don't have the same pixel array " \
"shape! image: %s, seg_prev: %s" % \
(l[0], segs_from_prev_stage[i])
seg_reshaped = resize_segmentation(seg_prev, d.shape[1:], order=1, cval=0)
seg_reshaped = to_one_hot(seg_reshaped, classes)
d = np.vstack((d, seg_reshaped)).astype(np.float32)
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
print(d.shape)
if np.prod(d.shape) > (2e9 / 4 * 0.9): # *0.9 just to be save, 4 because float32 is 4 bytes
print(
"This output is too large for python process-process communication. "
"Saving output temporarily to disk")
np.save(output_file[:-7] + ".npy", d)
d = output_file[:-7] + ".npy"
q.put((output_file, (d, dct)))
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as e:
print("error in", l)
print(e)
q.put("end")
if len(errors_in) > 0:
print("There were some errors in the following cases:", errors_in)
print("These cases were ignored.")
else:
print("This worker has ended successfully, no errors to report")
def preprocess_multithreaded(trainer, list_of_lists, output_files, num_processes=2, segs_from_prev_stage=None):
if segs_from_prev_stage is None:
segs_from_prev_stage = [None] * len(list_of_lists)
classes = list(range(1, trainer.num_classes))
assert isinstance(trainer, nnUNetTrainer)
q = Queue(1)
processes = []
for i in range(num_processes):
pr = Process(target=predict_save_to_queue, args=(trainer.preprocess_patient, q,
list_of_lists[i::num_processes],
output_files[i::num_processes],
segs_from_prev_stage[i::num_processes],
classes))
pr.start()
processes.append(pr)
try:
end_ctr = 0
while end_ctr != num_processes:
item = q.get()
if item == "end":
end_ctr += 1
continue
else:
yield item
finally:
for p in processes:
if p.is_alive():
p.terminate() # this should not happen but better safe than sorry right
p.join()
q.close()
def predict_cases(model, list_of_lists, output_filenames, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True,
overwrite_existing=False, data_type='2d', modality=0):
assert len(list_of_lists) == len(output_filenames)
if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames)
prman = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
dr, f = os.path.split(o)
if len(dr) > 0:
maybe_mkdir_p(dr)
if not f.endswith(".nii.gz"):
f, _ = os.path.splitext(f)
f = f + ".nii.gz"
cleaned_output_files.append(join(dr, f))
if not overwrite_existing:
print("number of cases:", len(list_of_lists))
not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if segs_from_prev_stage is not None:
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print("number of cases that still need to be predicted:", len(cleaned_output_files))
print("emptying cuda cache")
torch.cuda.empty_cache()
##################################
# Damn, finally find the model.
print("loading parameters for folds,", folds)
trainer, params = load_model_and_checkpoint_files(model, folds)
trainer.modality = modality
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage)
print("starting prediction...")
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
print("predicting", output_filename)
softmax = []
for p in params:
trainer.load_checkpoint_ram(p, False)
softmax.append(trainer.predict_preprocessed_data_return_softmax(d, do_tta, 1, False, 1,
trainer.data_aug_params['mirror_axes'],
True, True, 2, trainer.patch_size, True, data_type=data_type)[None])
softmax = np.vstack(softmax)
softmax_mean = np.mean(softmax, 0)
### View output
"""
output_ = softmax_mean.argmax(0)
target_ = d
if threading.current_thread() is threading.main_thread():
print("!!!output", output_.shape, target_.shape) # haw
matplotlib.use('TkAgg')
if len(target_.shape) == 4:
view_batch(output_, target_[0])
else:
plot_images(output_, target_[0])
"""
transpose_forward = trainer.plans.get('transpose_forward')
if transpose_forward is not None:
transpose_backward = trainer.plans.get('transpose_backward')
softmax_mean = softmax_mean.transpose([0] + [i + 1 for i in transpose_backward])
if save_npz:
npz_file = output_filename[:-7] + ".npz"
else:
npz_file = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_mean.shape) > (2e9 / 4 * 0.9): # *0.9 just to be save
print("This output is too large for python process-process communication. Saving output temporarily to disk")
np.save(output_filename[:-7] + ".npy", softmax_mean)
softmax_mean = output_filename[:-7] + ".npy"
results.append(prman.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_mean, output_filename, dct, 1, None, None, None, npz_file), )
))
_ = [i.get() for i in results]
def predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
overwrite_existing=True, data_type='2d', modality=0):
"""
here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases
:param model: [HAW] why you call it model? it is but a path! (output_folder)
:param input_folder:
:param output_folder:
:param folds:
:param save_npz:
:param num_threads_preprocessing:
:param num_threads_nifti_save:
:param lowres_segmentations:
:param part_id:
:param num_parts:
:param tta:
:return:
"""
maybe_mkdir_p(output_folder)
#shutil.copy(join(model, 'plans.pkl'), output_folder)
case_ids = get_caseIDs_from_splitted_dataset_folder(input_folder)
output_files = [join(output_folder, i + ".nii.gz") for i in case_ids]
all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in case_ids]
if lowres_segmentations is not None:
assert isdir(lowres_segmentations), "if lowres_segmentations is not None then it must point to a directory"
lowres_segmentations = [join(lowres_segmentations, i + ".nii.gz") for i in case_ids]
assert all([isfile(i) for i in lowres_segmentations]), "not all lowres_segmentations files are present. " \
"(I was searching for case_id.nii.gz in that folder)"
lowres_segmentations = lowres_segmentations[part_id::num_parts]
else:
lowres_segmentations = None
return predict_cases(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, save_npz,
num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations,
tta, overwrite_existing=overwrite_existing,
data_type=data_type, modality=modality)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct"
" order (same as training). Files must be named "
"CASENAME_XXXX.nii.gz where XXXX is the modality "
"identifier (0000, 0001, etc)", required=True)
parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
parser.add_argument('-m', '--model_output_folder', help='model output folder. Will automatically discover the folds '
'that were '
'run and use those as an ensemble', required=True)
parser.add_argument('-f', '--folds', nargs='+', default='None', help="folds to use for prediction. Default is None "
"which means that folds will be detected "
"automatically in the model output folder")
parser.add_argument('-z', '--save_npz', required=False, action='store_true', help="use this if you want to ensemble"
" these predictions with those of"
" other models. Softmax "
"probabilities will be saved as "
"compresed numpy arrays in "
"output_folder and can be merged "
"between output_folders with "
"merge_predictions.py")
parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', help="if model is the highres "
"stage of the cascade then you need to use -l to specify where the segmentations of the "
"corresponding lowres unet are. Here they are required to do a prediction")
parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (for example via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_parts", type=int, required=False, default=1, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help=
"Determines many background processes will be used for data preprocessing. Reduce this if you "
"run into out of memory (RAM) problems. Default: 6")
parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help=
"Determines many background processes will be used for segmentation export. Reduce this if you "
"run into out of memory (RAM) problems. Default: 2")
parser.add_argument("--tta", required=False, type=int, default=1, help="Set to 0 to disable test time data "
"augmentation (speedup of factor "
"4(2D)/8(3D)), "
"lower quality segmentations")
parser.add_argument("--overwrite_existing", required=False, type=int, default=1, help="Set this to 0 if you need "
"to resume a previous "
"prediction. Default: 1 "
"(=existing segmentations "
"in output_folder will be "
"overwritten)")
args = parser.parse_args()
input_folder = args.input_folder
output_folder = args.output_folder
part_id = args.part_id
num_parts = args.num_parts
model = args.model_output_folder
folds = args.folds
save_npz = args.save_npz
lowres_segmentations = args.lowres_segmentations
num_threads_preprocessing = args.num_threads_preprocessing
num_threads_nifti_save = args.num_threads_nifti_save
tta = args.tta
overwrite = args.overwrite_existing
if lowres_segmentations == "None":
lowres_segmentations = None
if isinstance(folds, list):
if folds[0] == 'all' and len(folds) == 1:
pass
else:
folds = [int(i) for i in folds]
elif folds == "None":
folds = None
else:
raise ValueError("Unexpected value for argument folds")
if tta == 0:
tta = False
elif tta == 1:
tta = True
else:
raise ValueError("Unexpected value for tta, Use 1 or 0")
if overwrite == 0:
overwrite = False
elif overwrite == 1:
overwrite = True
else:
raise ValueError("Unexpected value for overwrite, Use 1 or 0")
predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
overwrite_existing=overwrite)
|
nilq/baby-python
|
python
|
import os
ps_user = "sample"
ps_password = "sample"
|
nilq/baby-python
|
python
|
# encoding: UTF-8
'''
vn.lts的gateway接入
'''
import os
import json
from vnltsmd import MdApi
from vnltstd import TdApi
from vnltsqry import QryApi
from ltsDataType import *
from vtGateway import *
# 以下为一些VT类型和LTS类型的映射字典
# 价格类型映射
priceTypeMap= {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["SECURITY_FTDC_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["SECURITY_FTDC_OPT_AnyPrice"]
priceTypeMap[PRICETYPE_FAK] = defineDict["SECURITY_FTDC_OPT_BestPrice"]
priceTypeMap[PRICETYPE_FOK] = defineDict["SECURITY_FTDC_OPT_AllLimitPrice"]
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_D_Buy"]
directionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_D_Sell"]
directionMapReverse = {v: k for k, v in directionMap.items()}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict["SECURITY_FTDC_OF_Open"]
offsetMap[OFFSET_CLOSE] = defineDict["SECURITY_FTDC_OF_Close"]
offsetMap[OFFSET_CLOSETODAY] = defineDict["SECURITY_FTDC_OF_CloseToday"]
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict["SECURITY_FTDC_OF_CloseYesterday"]
offsetMapReverse = {v:k for k,v in offsetMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_SZSE] = 'SZE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_NET] = defineDict["SECURITY_FTDC_PD_Net"]
posiDirectionMap[DIRECTION_LONG] = defineDict["SECURITY_FTDC_PD_Long"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["SECURITY_FTDC_PD_Short"]
posiDirectionMapReverse = {v:k for k,v in posiDirectionMap.items()}
########################################################################################
class LtsGateway(VtGateway):
"""Lts接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='LTS'):
"""Constructor"""
super(LtsGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = LtsMdApi(self)
self.tdApi = LtsTdApi(self)
self.qryApi = LtsQryApi(self)
self.mdConnected = False
self.tdConnected = False
self.qryConnected = False
self.qryEnabled = False # 是否要启动循环查询
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json 文件
fileName = self.gatewayName + '_connect.json'
fileName = os.getcwd() + '\\ltsGateway\\' + fileName
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
mdPassword = str(setting['mdPassword'])
tdPassword = str(setting['tdPassword'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
qryAddress = str(setting['qryAddress'])
productInfo = str(setting['productInfo'])
authCode = str(setting['authCode'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, mdPassword, brokerID, mdAddress)
self.tdApi.connect(userID, tdPassword, brokerID, tdAddress, productInfo, authCode)
self.qryApi.connect(userID, tdPassword, brokerID, qryAddress, productInfo, authCode)
# 初始化并启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.qryApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.qryApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
if self.qryConnected:
self.qryApi.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class LtsMdApi(MdApi):
"""Lts行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(LtsMdApi, self).__init__()
self.gateway = gateway #gateway对象
self.gatewayName = gateway.gatewayName #gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登陆状态
self.subscribedSymbols = set()
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功'
self.gateway.onLog(log)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self,n):
"""服务器断开"""
self.connectionStatus= False
self.loginStatus = False
self.gateway.mdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
#----------------------------------------------------------------------
def onRspError(self,error,n,last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登录完成'
self.gateway.onLog(log)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = exchangeMapReverse.get(data['ExchangeID'], u'未知')
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec']/100)])
tick.date = data['TradingDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# LTS有5档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
tick.bidPrice2 = data['BidPrice2']
tick.bidVolume2 = data['BidVolume2']
tick.askPrice2 = data['AskPrice2']
tick.askVolume2 = data['AskVolume2']
tick.bidPrice3 = data['BidPrice3']
tick.bidVolume3 = data['BidVolume3']
tick.askPrice3 = data['AskPrice3']
tick.askVolume3 = data['AskVolume3']
tick.bidPrice4 = data['BidPrice4']
tick.bidVolume4 = data['BidVolume4']
tick.askPrice4 = data['AskPrice4']
tick.askVolume4 = data['AskVolume4']
tick.bidPrice5 = data['BidPrice5']
tick.bidVolume5 = data['BidVolume5']
tick.askPrice5 = data['AskPrice5']
tick.askVolume5 = data['AskVolume5']
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
req = {}
req['InstrumentID'] = str(subscribeReq.symbol)
req['ExchangeID'] = exchangeMap.get(str(subscribeReq.exchange), '')
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.subscribeMarketData(req)
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsTdApi(TdApi):
"""LTS交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.mdConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登录完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
"""报单回报"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['OrderRef']
# 方向
if data['Direction'] == '0':
order.direction = DIRECTION_LONG
elif data['Direction'] == '1':
order.direction = DIRECTION_SHORT
else:
order.direction = DIRECTION_UNKNOWN
# 开平
if data['CombOffsetFlag'] == '0':
order.offset = OFFSET_OPEN
elif data['CombOffsetFlag'] == '1':
order.offset = OFFSET_CLOSE
else:
order.offset = OFFSET_UNKNOWN
# 状态
if data['OrderStatus'] == '0':
order.status = STATUS_ALLTRADED
elif data['OrderStatus'] == '1':
order.status = STATUS_PARTTRADED
elif data['OrderStatus'] == '3':
order.status = STATUS_NOTTRADED
elif data['OrderStatus'] == '5':
order.status = STATUS_CANCELLED
else:
order.status = STATUS_UNKNOWN
# 价格、报单量等数值
order.price = float(data['LimitPrice'])
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['TradeID']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
trade.offset = offsetMapReverse.get(data['OffsetFlag'], '')
# 价格、报单量等数值
trade.price = float(data['Price'])
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFundOutByLiber(self, data, error, n, last):
"""LTS发起出金应答"""
pass
#----------------------------------------------------------------------
def onRtnFundOutByLiber(self, data):
"""LTS发起出金通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundOutByLiber(self, data, error):
"""LTS发起出金错误回报"""
pass
#----------------------------------------------------------------------
def onRtnFundInByBank(self, data):
"""银行发起入金通知"""
pass
#----------------------------------------------------------------------
def onRspFundInterTransfer(self, data, error, n, last):
"""资金内转应答"""
pass
#----------------------------------------------------------------------
def onRtnFundInterTransferSerial(self, data):
"""资金内转流水通知"""
pass
#----------------------------------------------------------------------
def onErrRtnFundInterTransfer(self, data, error):
"""资金内转错误回报"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcTraderApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = str(orderReq.symbol)
req['LimitPrice'] = str(orderReq.price) # LTS里的价格是字符串
req['VolumeTotalOriginal'] = int(orderReq.volume)
req['ExchangeID'] = exchangeMap.get(orderReq.exchange, '')
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
try:
req['OrderPriceType'] = priceTypeMap[orderReq.priceType]
req['Direction'] = directionMap[orderReq.direction]
req['CombOffsetFlag'] = offsetMap[orderReq.offset]
req['ExchangeID'] = exchangeMap[orderReq.exchange]
except KeyError:
return ''
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['CombHedgeFlag'] = defineDict['SECURITY_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['SECURITY_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['SECURITY_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['SECURITY_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['SECURITY_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
req['UserForceClose'] = 0
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
vtOrderID = '.'.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['SECURITY_FTDC_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
########################################################################
class LtsQryApi(QryApi):
"""Lts账户查询实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(LtsQryApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.productInfo = EMPTY_STRING # 程序产品名称
self.authCode = EMPTY_STRING # 授权码
self.randCode = EMPTY_STRING # 随机码
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接成功'
self.gateway.onLog(log)
# 前置机连接后,请求随机码
self.reqID += 1
self.reqFetchAuthRandCode({}, self.reqID)
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.qryConnected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登录完成'
self.gateway.onLog(log)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gateway
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.qryConnected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'查询服务器登出完成'
self.gateway.onLog(log)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspFetchAuthRandCode(self, data, error, n, last):
"""请求随机认证码"""
self.randCode = data['RandCode']
self.login()
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""合约查询回报"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['ExecPrice']
contract.underlyingSymbol = data['MarketID']
# 合约类型
if data['ProductClass'] == '1':
contract.productClass = PRODUCT_FUTURES
elif data['ProductClass'] == '2':
contract.productClass = PRODUCT_OPTION
elif data['ProductClass'] == '3':
contract.productClass = PRODUCT_COMBINATION
elif data['ProductClass'] == '6':
contract.productClass = PRODUCT_EQUITY
elif data['ProductClass'] == '8':
contract.productClass = PRODUCT_EQUITY
else:
print data['ProductClass']
# 期权类型
if data['InstrumentType'] == '1':
contract.optionType = OPTION_CALL
elif data['InstrumentType'] == '2':
contract.optionType = OPTION_PUT
# 推送
self.gateway.onContract(contract)
if last:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'交易合约信息获取完成'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
"""投资者查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""资金账户查询回报"""
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
#account.closeProfit = data['CloseProfit']
#account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = data['Balance']
# 推送
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onRspQryBondInterest(self, data, error, n, last):
"""债券利息查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryMarketRationInfo(self, data, error, n, last):
"""市值配售查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
"""合约手续费查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFInstrument(self, data, error, n, last):
"""ETF基金查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryETFBasket(self, data, error, n, last):
"""ETF股票篮查询回报"""
pass
#----------------------------------------------------------------------
def onRspQryOFInstrument(self, data, error, n, last):
"""OF合约查询回报"""
pass
#----------------------------------------------------------------------
def onRspQrySFInstrument(self, data, error, n, last):
"""SF合约查询回报"""
event1 = Event(type_=EVENT_LTS_SF)
event1.dict_['data'] = data
self.gateway.eventEngine.put(event1)
symbol = data['InstrumentID']
exchange = exchangeMapReverse[data['ExchangeID']]
vtSymbol = '.'.join([symbol, exchange])
event2 = Event(type_=EVENT_LTS_SF + vtSymbol)
event2.dict_['data'] = data
self.gateway.eventEngine.put(event2)
#----------------------------------------------------------------------
def onRspQryInstrumentUnitMargin(self, data, error, n, last):
"""查询单手保证金"""
pass
#----------------------------------------------------------------------
def onRspQryPreDelivInfo(self, data, error, n , last):
"""查询预交割信息"""
pass
#----------------------------------------------------------------------
def onRsyQryCreditStockAssignInfo(self, data, error, n, last):
"""查询可融券分配"""
pass
#----------------------------------------------------------------------
def onRspQryCreditCashAssignInfo(self, data, error, n , last):
"""查询可融资分配"""
pass
#----------------------------------------------------------------------
def onRsyQryConversionRate(self, data, error, n, last):
"""查询证券这算率"""
pass
#----------------------------------------------------------------------
def onRspQryHisCreditDebtInfo(self,data, error, n, last):
"""查询历史信用负债"""
pass
#----------------------------------------------------------------------
def onRspQryMarketDataStaticInfo(self, data, error, n, last):
"""查询行情静态信息"""
pass
#----------------------------------------------------------------------
def onRspQryExpireRepurchInfo(self, data, error, n, last):
"""查询到期回购信息响应"""
pass
#----------------------------------------------------------------------
def onRspQryBondPledgeRate(self, data, error, n, last):
"""查询债券质押为标准券比例"""
pass
#----------------------------------------------------------------------
def onRspQryPledgeBond(self, data, error, n, last):
"""查询债券质押代码对照关系"""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
pos = VtPositionData()
pos.gatewayName = self.gatewayName
# 保存代码
pos.symbol = data['InstrumentID']
pos.exchange = exchangeMapReverse.get(data['ExchangeID'], '')
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
# 方向和持仓冻结数量
pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
if pos.direction == DIRECTION_NET or pos.direction == DIRECTION_LONG:
pos.frozen = data['LongFrozen']
elif pos.direction == DIRECTION_SHORT:
pos.frozen = data['ShortFrozen']
# 持仓量
pos.position = data['Position']
pos.ydPosition = data['YdPosition']
# 持仓均价
if pos.position:
pos.price = data['PositionCost'] / pos.position
# VT系统持仓名
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
# 推送
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def OnRspQryFundTransferSerial(self, data, error, n, last):
"""资金转账查询"""
pass
#----------------------------------------------------------------------
def onRspQryFundInterTransferSerial(self, data, error,n, last):
"""资金内转流水查询"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, productInfo, authCode):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.productInfo = productInfo
self.authCode = authCode
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = os.getcwd() + '\\temp\\' + self.gatewayName + '\\'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcQueryApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
req['UserProductInfo'] = self.productInfo
req['AuthCode'] = self.authCode
req['RandCode'] = self.randCode
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
#是否需要INVESTERID, BROKERID?
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryTradingAccount(req, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.