text stringlengths 38 1.54M |
|---|
def subset_sum(in_list, target, start, end):
if target == 0:
return '{0},{1}'.format(start, end-1)
if target < 0 or end == len(in_list):
return '-1,-1'
target -= in_list[end]
return subset_sum(in_list, target, start, end+1)
def solution(l, t):
# Your code here
for i, each_num in enumerate(l):
ret = subset_sum(l, t, i, i)
if ret != '-1,-1':
return ret
return '-1,-1'
print(solution([4, 3, 10, 2, 8], 12))
print(solution([1, 2, 3, 4], 15))
|
#!/usr/bin/env python
#original: https://github.com/haroldsultan/MCTS/blob/master/mcts.py
import math
import hashlib
import rospy
import logging
import argparse
import numpy as np
import matplotlib.pyplot as plt
import random
import scipy.stats as stats
from dt_comm.enums import Ground
import time
costMat = np.zeros((100,100,100))
class mctsPlanner():
def __init__(self, predictor, agent_params, sim_params, our_duckie_params, reward_params):
self.predictor = predictor
self.dt = sim_params["dt"]
self.time_horizon = agent_params["time_horizon"]
self.road_width = sim_params["road_width"]
self.v = our_duckie_params["velocity"]
self.radius = our_duckie_params["radius"]
self.angle_change_limit = our_duckie_params["angle_change_limit"]
self.reward_params = reward_params
self.SCALAR = agent_params["scalar"]
self.budget = agent_params["budget"]
# self.number_time_steps = int(self.time_horizon/self.dt)
self.number_time_steps = agent_params["time_steps"]
def computePlan(self, goal, obs_msg):
self.goal = goal
levels = 1
current_node = Node(State(turn = self.number_time_steps, angle_change_limit = self.angle_change_limit))
current_node.state.x = obs_msg.our_duckie_pose.x
current_node.state.y = obs_msg.our_duckie_pose.y
current_node.state.cum_angle = obs_msg.our_duckie_pose.theta
self.startx = current_node.state.x
self.starty = current_node.state.y
self.start_time = round(obs_msg.our_duckie_pose.time, 2)
self.start_theta = current_node.state.cum_angle
for l in range(levels):
current_node=self.UCTSEARCH(self.budget,current_node)
return self.bestPath(current_node)
def bestPath(self, node):
path = []
angles = []
moves = []
collision = []
rewards = []
visits = []
lost_candidates = []
lost_can_collisions = []
while node is not None:
path.append([node.state.x, node.state.y])
angles.append(node.state.cum_angle)
collision.append(node.state.collision_cost)
rewards.append(node.reward/(node.visits *1.0) )
moves = node.state.moves
visits.append(node.visits)
node, candidates, lost_collisions = self.BESTCHILD_FINAL(node, 0)
lost_candidates.append(candidates)
lost_can_collisions.append(lost_collisions)
return path, angles, moves, collision, rewards, visits, lost_candidates, lost_can_collisions
def UCTSEARCH(self, budget, root):
for iter in range(int(budget)):
# rospy.loginfo(root)
front = self.TREEPOLICY(root)
reward = self.GETREWARD(front.state)
self.BACKUP(front,reward)
return root
# return self.BESTCHILD(root,0)
def TREEPOLICY(self, node):
#a hack to force 'exploitation' in a game where there are many options, and you may never/not want to fully expand first
while node.state.terminal() == False:
if len(node.children) == 0:
return self.EXPAND(node)
elif random.uniform(0,1)<.5:
node = self.BESTCHILD(node, self.SCALAR)
else:
if node.fully_expanded() == False:
return self.EXPAND(node)
else:
node = self.BESTCHILD(node, self.SCALAR)
return node
def EXPAND(self, node):
tried_children = [c.state for c in node.children]
tried_moves = [c.state.moves[-1] for c in node.children]
valid = list(set(node.state.MOVES).difference(tried_moves))
new_state = self.next_state(node.state, moves = valid)
node.add_child(new_state)
return node.children[-1]
def BESTCHILD_FINAL(self, node, scalar):
bestscore = -9999999
bestchildren = []
candidate_rewards = []
lost_can_collisions = []
for c in node.children:
exploit = c.reward/c.visits
candidate_rewards.append(exploit)
lost_can_collisions.append(c.state.collision_cost)
explore = math.sqrt(2.0*math.log(node.visits)/float(c.visits))
score = exploit+scalar*explore
if score == bestscore:
bestchildren.append(c)
if score>bestscore:
bestchildren = [c]
bestscore = score
# print('length of bestchildren array : '+str(len(bestchildren)))
if len(bestchildren) == 0:
# print('children count '+str(len(node.children)))
# rospy.loginfo("no best child found, probably fatal")
return None, [], []
return random.choice(bestchildren), candidate_rewards, lost_can_collisions
def BESTCHILD(self, node, scalar):
bestscore = -9999999
bestchildren = []
for c in node.children:
exploit = c.reward/(c.visits *1.0)
explore = math.sqrt(2.0*math.log(node.visits)/float(c.visits))
score = exploit+scalar*explore
if score == bestscore:
bestchildren.append(c)
if score>bestscore:
bestchildren = [c]
bestscore = score
if len(bestchildren) == 0:
# print('children count '+str(len(node.children)))
rospy.loginfo("no best child found, probably fatal")
return None
return random.choice(bestchildren)
def centerlane_incentive(self, state):
incentive = abs(state.x)
return -incentive
def forward_incentive(self, state):
diffy = state.y - self.starty
incentive = diffy
return incentive
def goal_reward(self, state):
dist_to_goal = abs(state.x-self.startx) + abs(state.y-self.starty)
#transform to an appropriate value
return 0
def get_collision_cost(self, state):
collision_cost = self.predictor.get_collision_probability(state.x, state.y, self.start_time+((self.number_time_steps - state.turn)*self.dt), self.radius)
return collision_cost*(-1000)
def GETREWARD(self, state):
ground_type = self.check_ground(state)
# print(ground_type)
ground_reward = self.reward_params[ground_type]
collision_cost = self.get_collision_cost(state)
state.collision_cost = collision_cost
reward = ground_reward + collision_cost + self.forward_incentive(state) + self.centerlane_incentive(state)
return reward
def BACKUP(self, node, reward):
# print('backing up node at x y '+str(node.state.x) + ' ' + str(node.state.y))
while node != None:
node.visits += 1
node.reward += reward
node = node.parent
return
def next_state(self, state, moves =[]):
if(random.uniform(0,1)<.1):
nextmove = 0
else:
if(len(moves)!=0):
nextmove = random.choice(moves)
else:
print('ERROR should not happen')
time.sleep(100)
nextmove = random.choice(state.MOVES)
nextstate = State(state.moves+[nextmove], state.turn-1)
nextstate.cum_angle = state.cum_angle + nextmove
nextstate.x = state.x + ((self.v * self.dt) * math.sin(nextstate.cum_angle ))
nextstate.y = state.y + ((self.v * self.dt) /(1.0 * math.cos(nextstate.cum_angle)))
# if(nextstate.y < state.y):
# print('y '+str(nextstate.y))
# print('y is less cause angle is : '+str(nextstate.cum_angle))
# time.sleep(6)
return nextstate
def check_ground(self, state):
# Returns the type of ground for a duckie pose
x = state.x
if abs(x) <= 0.25*self.road_width - self.radius:
return Ground.RIGHT_LANE
elif x < -0.25*self.road_width + self.radius and x >= -0.75*self.road_width + self.radius :
return Ground.WRONG_LANE
elif (x > -0.75*self.road_width - self.radius and x < -0.75*self.road_width + self.radius) or (x > 0.25*self.road_width - self.radius and x < 0.25*self.road_width + self.radius):
return Ground.PARTIALLY_OUT_OF_ROAD
else:
return Ground.LOST
class State():
def __init__(self, moves = [], turn = 20, angle_change_limit = 0.2):
self.turn = turn
self.moves = moves
self.MOVES = [-angle_change_limit, 0, angle_change_limit]
self.cum_angle = 0
self.x = 0
self.y = 0
self.num_moves = len(self.MOVES)
self.collision_cost = 0
def terminal(self):
if self.turn == 0:
return True
return False
def __hash__(self):
return int(hashlib.md5(str(self.moves).encode('utf-8')).hexdigest(),16)
def __eq__(self,other):
if hash(self) == hash(other):
return True
return False
def __repr__(self):
s = "Moves: %s "%(self.moves)
return s
class Node():
def __init__(self, state, parent=None):
self.visits = 1
self.reward = 0.0
self.state = state
self.children = []
self.parent = parent
def add_child(self,child_state):
child = Node(child_state,self)
self.children.append(child)
def update(self,reward):
self.reward += reward
self.visits += 1
def fully_expanded(self):
if len(self.children) == self.state.num_moves:
return True
return False
def __repr__(self):
s = "Node; children: %d; visits: %d; reward: %f"%(len(self.children),self.visits,self.reward)
return s
|
"""
Author: linnil1
Objective: Image Processing HW1
Description: This program 1)read a spectial format called 64, which represented
a image, 2)do some operation (multiply, add, avg) on it and 3)draw histogram.
"""
import numpy as np
import matplotlib.pyplot as plt
import utils
def limitImg(func):
"""
Limit the image value from 0 to 1.
Use it as a decorator.
"""
def wrapFunc(*args, **kwargs):
img = func(*args, **kwargs)
img[img > 1] = 1
img[img < 0] = 0
return img
return wrapFunc
@limitImg
def imageMult(img, num):
"""
Multiple the image by a constant
"""
return img * num
@limitImg
def imageAdd(img, num):
"""
Add the image by a constant. Make sure the value in my image is from 0 to 1.
"""
return img + num / 256
@limitImg
def imageAvg(img1, img2):
"""
Make average of two image pixelwisely.
"""
return (img1 + img2) / 2
@limitImg
def image_special_func(img):
"""
Special operation to this image
"""
return img[:, :-1] - img[:, 1:]
def test():
# read
# real_image = read64("LINCOLN.64")
# real_image = read64("JET.64")
real_image = utils.read64("../LIBERTY.64")
real_image1 = utils.read64("../LISA.64")
# now_img = imageAvg(real_image, real_image1)
now_img = imageAdd(real_image, 100)
# plot it
plt.figure()
plt.imshow(now_img, cmap="gray")
# histogram
hist_bin = utils.getHist(now_img)
print("Separate 0~1 to 32 bins")
print(hist_bin)
plt.figure()
plt.bar(np.arange(32), height=hist_bin)
# built-in histogram
# plt.figure()
# plt.hist(np.int8(real_image.flatten() * 31), bins=32)
plt.show()
if __name__ == "__main__":
parser = utils.setParser()
args = parser.parse_args()
print(args)
img = utils.read64(args.path)
new_img = None
if args.add is not None:
new_img = imageAdd(img, args.add)
elif args.addimage is not None:
img2 = utils.read64(args.addimage)
new_img = imageAvg(img, img2)
elif args.multiply is not None:
new_img = imageMult(img, args.multiply)
elif args.special:
new_img = image_special_func(img)
# plot
utils.plot(img, new_img)
|
n = int(input())
a,b = map(int,input().split())
k = int(input())
pk = [int(i) for i in input().split()]
if a not in pk and b not in pk and (len(pk) == len(set(pk))):
print('YES')
else:
print('NO')
|
# -*- coding: UTF-8 -*-
"""
relaxed_spec.py
Copyright 2019 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from bravado_core.spec import Spec
from bravado_core import formatter
from bravado_core.formatter import SwaggerFormat
class RelaxedSpec(Spec):
"""
Just chill bro!
w3af needs to be flexible, bravado-core needs to follow the OpenAPI
specification.
This class modifies some parts of the bravado-core Spec class that we found
to be too strict while scanning real-life APIs that may not follow *all*
of the OpenAPI specification, but are still usable.
"""
def get_format(self, format_name):
"""
One of the first things I noticed was that developers create custom
formats like:
orderId:
description: Order reference
example: e475f288-4e9b-43ea-966c-d3912e7a25b2
format: uuid <-------- HERE
type: string
And because the format was not defined in the OpenAPI specification the
parser would send a warning and not handle the parameter properly.
This method tries to fix the issue by always returning a generic
user defined format
"""
#
# First try to handle the case with the default (OpenAPI spec-defined)
# formats that are already included and handled by bravado-core
#
default_format = formatter.DEFAULT_FORMATS.get(format_name)
if default_format is not None:
return default_format
#
# Now we have to create a generic format handler, because the remote
# OpenAPI spec uses it and we want to fuzz it
#
generic_format = SwaggerFormat(
# name of the format as used in the Swagger spec
format=format_name,
# Callable to convert a python object to a string
to_wire=lambda input_string: input_string,
# Callable to convert a string to a python object
to_python=lambda input_string: input_string,
# Callable to validate the input string
validate=validate_generic,
# Description
description='Generic format for w3af fuzzer'
)
return generic_format
def validate_generic(input_string):
"""
Always return true, we can't really validate a format that a developer
defined in an arbitrary way.
:param input_string: The string to validate
:return: True
"""
return True
|
import torch
import torchaudio
import random
def pad_tensor(tensor, max_length):
# input tensor (1, n) --> (1, max_length)
n = tensor.size(1)
zeros = torch.zeros(1, max_length)
zeros[:, :n] = tensor
return zeros
def padding_batch(sequences):
"""
sequences is a list of tensors
"""
num = len(sequences)
max_len = max([s.size(1) for s in sequences])
padded_batch = torch.cat([pad_tensor(i, max_len) for i in sequences])
return padded_batch
"""
getting batch for lstm input randomly choosing from different spaekers directories
"""
class get_batch(object):
def __init__(self, path, N, M):
self.N = N
self.path = path
self.M = M
self.speakers = [i[len(path):] for i in glob.glob(path + '/*')]
def sampler(self):
batch_speakers = []
for k in range(self.N):
speaker = random.choice(self.speakers)
newpath = self.path + speaker
readings = [r for r in glob.glob(newpath + '/*')] #utterances of a certain speaker in librispeech
for sp in range(self.M):
reading = random.choice(readings)
utterance = random.choice([ut for ut in glob.glob(reading + '/*.flac')])
audio = torchaudio.load(utterance)[0]
batch_speakers.append(audio)
return batch_speakers
|
from .views import EmployeeViewSet, TaskViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'employee', EmployeeViewSet)
router.register(r'task', TaskViewSet)
|
#!/usr/bin/python
# Raspberry Pi based system for recording and transmitting data from a lone long distance walker.
# Being built for http://thelongwellwalk.org/
#Import the supporting code we need
import picamera # Controlling the Camera
import time # Time and Date functions
import os # Operating system information and functions
import io # Input and Output (Files and streams))
import RPi.GPIO as GPIO # Controls the GPIO for LEDs and Buttons
import sys
from GPSController import *
import math
import getopt
import subprocess
import re
#Config
buffer_length = 15 # How many seconds worth of video do we keep in the buffer
debug = True # Do we print out debugging messages
f_favail_limit = 20000 # How much disk space is too little space (measured in blocks)
arecordcmd = "arecord -D plughw:1,0 "
# TODO : use the --max-file-time on the audio files
duration_step = 1
#duration_timelapse = 60
duration_timelapse = 20
duration_first_timelapse = 5
next_step = 0
next_timelapse = 0
cycle_wait = 0.5
poweroffclicktarget = 6
poweroffclickstep = 2
powerclickcount = 0
videosizelimit = 20000
audiosizelimit = 10000
sizewarning = 50000
loadavglimit = 1.25
#Where are files stored
outputbasedir = os.path.expanduser('/home/pi/BPOA/')
#GPIO Config
# Which GPIO pin does what job
videobutton = 11 # Video (currently rigged as momentary switch)
audiobutton = 12 # Audio (no code so far)
poweroffbutton = 12 # Poweroff button (can be same as audio)
statusLED_R = 15 #10 # Red LED, short wire on own
statusLED_G = 18 # 7 # Green LED middle length wire
statusLED_B = 16 # 8 # Blue LED short wire next to middle length
GPS_TXD = 8
GPS_RXD = 10
# Status variables
videorecording = False # Are we currently recording video
audiorecording = False # Are we currently recording audio
tl_count = 55 # Starting Timelapse count 55 means it will take 5 seconds to do first timlapse
loadlimitbreached = False
videosizelimitreached = False
audiosizelimitreached = False
sizewarningreached = False
transferring = False
lastutc = ""
# Functions
# Work out the file name based on the current time and file type
def getFileName(curtime, ext):
if ext == "gps":
name = time.strftime("%Y-%m-%d-%H", curtime)+"."+ext
else:
name = time.strftime("%Y-%m-%d-%H-%M-%S", curtime)+"."+ext
return name
# Work out the file folder based on the current time and file type
def getFolderName(curtime, ext):
if ext == "gps":
name = outputbasedir+ext+'/'+time.strftime("%Y-%m/%d/", curtime)
else:
name = outputbasedir+ext+'/'+time.strftime('%Y-%m/%d/%H/', curtime)
# if the folder doesn't exist make it
if not os.path.exists(name):
os.makedirs(name)
return name
def output_mode():
global videorecording, audiorecording, tl_count, loadlimitbreached, videosizelimitreached, audiosizelimitreached, sizewarningreached, transferring
# Mode lights
# Status - Off = Not running
if audiosizelimitreached:
# Status -Red = Disk space too low, audio & video will not record
GPIO.output( statusLED_R, False)
GPIO.output( statusLED_G, True)
GPIO.output( statusLED_B, True)
elif videorecording:
# Status -Blue = Video recording
GPIO.output( statusLED_R, True)
GPIO.output( statusLED_G, True)
GPIO.output( statusLED_B, False)
elif audiorecording:
# Status -cyan = Audio recording
GPIO.output( statusLED_R, True)
GPIO.output( statusLED_G, False)
GPIO.output( statusLED_B, False)
else:
# Status -green = Timelapse mode
GPIO.output( statusLED_R, True)
GPIO.output( statusLED_G, False)
GPIO.output( statusLED_B, True)
def output_status():
global videorecording, audiorecording, tl_count, loadlimitbreached, videosizelimitreached, audiosizelimitreached, sizewarningreached, transferring
if (math.floor(current_time) % 20) == 0:
if (debug):
if videorecording:
print("videorecording blue?=", videorecording)
if audiorecording:
print("audiorecording cyan?=", audiorecording)
if transferring:
print("transferring =", transferring)
if videosizelimitreached:
print("videosizelimitreached =", videosizelimitreached)
if audiosizelimitreached:
print("audiosizelimitreached =", audiosizelimitreached)
if sizewarningreached:
print("sizewarningreached =", sizewarningreached)
if loadlimitbreached:
print("loadlimitbreached =", loadlimitbreached)
if math.floor(current_time) % 2:
if transferring:
# Status -Flashing white = Transferring
GPIO.output( statusLED_R, False)
GPIO.output( statusLED_G, False)
GPIO.output( statusLED_B, False)
elif videosizelimitreached:
# Status -Flashing Yellow = Disk space getting low
GPIO.output( statusLED_R, False)
GPIO.output( statusLED_G, True)
GPIO.output( statusLED_B, True)
elif sizewarningreached:
# Status -Flashing Red = Disk space too low, video will not record
GPIO.output( statusLED_R, False)
GPIO.output( statusLED_G, False)
GPIO.output( statusLED_B, True)
elif loadlimitbreached:
# Status -Flashing Magenta = CPU too stressed
GPIO.output( statusLED_R, False)
GPIO.output( statusLED_G, True)
GPIO.output( statusLED_B, False)
else:
output_mode()
else:
output_mode()
def splitDegrees(fDeg):
iDeg = math.floor(fDeg)
fMin = 60 * (fDeg - iDeg)
iMin = math.floor(fMin)
fSecs = 60 * (fMin - iMin)
iSecs = math.floor(fSecs)
return [iDeg, iMin, iSecs]
# Take a timelapse shot
def dotimelapse():
if not audiosizelimitreached:
camera.exif_tags['EXIF.Copyright'] = 'Copyright (c) 2014 the Long Well Walk'
if not (math.isnan(gpsc.fix.latitude) or math.isnan(gpsc.fix.longitude)) and gpsc.fix.latitude and gpsc.fix.longitude:
if (debug):
print "latitude ", gpsc.fix.latitude
print "longitude ", gpsc.fix.longitude
print "altitude (m)", gpsc.fix.altitude
# DONE : Test that the GPS data set in EXIF properly fits the spec and is readable
# Spec here http://www.digicamsoft.com/exif22/exif22/html/exif22_53.htm
camera.exif_tags['GPS.GPSVersionID'] = "2.2.0.0"
if gpsc.fix.latitude >= 0:
camera.exif_tags['GPS.GPSLatitudeRef'] = "N"
else:
camera.exif_tags['GPS.GPSLatitudeRef'] = "S"
lat = math.fabs(gpsc.fix.latitude)
iDeg, iMin, iSecs = splitDegrees(lat)
#dd/1,mm/1,ss/1
camera.exif_tags['GPS.GPSLatitude'] = '{},{},{}'.format(iDeg+0.1, iMin+0.1, iSecs+0.1)
if gpsc.fix.longitude >= 0:
camera.exif_tags['GPS.GPSLongitudeRef'] = "E"
else:
camera.exif_tags['GPS.GPSLongitudeRef'] = "W"
longitude = math.fabs(gpsc.fix.longitude)
iDeg, iMin, iSecs = splitDegrees(longitude)
#dd/1,mm/1,ss/1
camera.exif_tags['GPS.GPSLongitude'] = '{},{},{}'.format(iDeg+0.1, iMin+0.1, iSecs+0.1)
if gpsc.fix.altitude >= 0:
camera.exif_tags['GPS.GPSAltitudeRef'] = "0"
else:
camera.exif_tags['GPS.GPSAltitudeRef'] = "1"
camera.exif_tags['GPS.GPSAltitude'] ='{}'.format(math.fabs(gpsc.fix.altitude))
gpslogline = '{},{},{},{},{}\n'.format(math.floor(time.time()), gpsc.utc, gpsc.fix.latitude, gpsc.fix.longitude, gpsc.fix.altitude)
# TODO : Write the gpslogline to a GPS file.
gpsnow = time.gmtime()
gpsname = getFolderName(gpsnow,'gps')+getFileName(gpsnow,'gps')
print("gpsname =", gpsname)
with open(gpsname, "a") as gpsfile:
gpsfile.write(gpslogline)
if (debug):
print("gpslogline =", gpslogline)
stillnow = time.gmtime()
stillname = getFolderName(stillnow,'jpg')+getFileName(stillnow,'jpg')
camera.capture(stillname, use_video_port=True)
if (debug):
print("still "+stillname)
def write_video(stream):
# Write the entire content of the circular buffer to disk. No need to
# lock the stream here as we're definitely not writing to it
# simultaneously
then = time.gmtime(time.time() - buffer_length)
videoname = getFolderName(then,'h264')+getFileName(then,'h264')
if (debug):
print("buffer video writing "+videoname)
with io.open(videoname, 'wb') as output:
for frame in stream.frames:
if frame.header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
if (debug):
print("buffer video ending "+videoname)
def checkstatus():
global videorecording, audiorecording, tl_count, loadlimitbreached, videosizelimitreached, audiosizelimitreached, sizewarningreached, transferring
# Have we run out of disk space
stats = os.statvfs(outputbasedir)
videosizelimitreached = stats.f_bfree < videosizelimit
audiosizelimitreached = stats.f_bfree < audiosizelimit
sizewarningreached = stats.f_bfree < sizewarning
loadavg, loadavg5, loadavg15 = os.getloadavg()
loadlimitbreached = loadavg>loadavglimit
if (debug):
print("Checkstatus:-","stats.f_bfree =", stats.f_bfree,"videosizelimitreached =", videosizelimitreached, "audiosizelimitreached =", audiosizelimitreached, "sizewarningreached =", sizewarningreached,"loadavg =", loadavg, "loadlimitbreached =", loadlimitbreached)
#Main Code Starts here
#create GPS controller
gpsc = GpsController()
#start controller
gpsc.start()
# GPIO setup
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD) # Use the standard RPi pin numbers
GPIO.setup(videobutton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set as Input which is usually off
GPIO.setup(audiobutton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set as Input which is usually off
if not poweroffbutton == audiobutton:
GPIO.setup(poweroffbutton, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set as Input which is usually off
GPIO.setup(statusLED_R, GPIO.OUT) # Set as Output
GPIO.setup(statusLED_G, GPIO.OUT) # Set as Output
GPIO.setup(statusLED_B, GPIO.OUT) # Set as Output
if (debug):
print("backback started")
with picamera.PiCamera() as camera:
# Start up the Camera
camera.resolution = (1920, 1080) #1080P Full HD 1920x1080
#camera.resolution = (1280, 720) #720P HD 1280x720
camera.framerate = 25
stream = picamera.PiCameraCircularIO(camera, seconds=buffer_length)
camera.start_recording(stream, format='h264')
current_time = time.time()
print("current_time =", current_time)
next_step = current_time + duration_step
next_timelapse = current_time + duration_first_timelapse
# Have we run out of disk space
checkstatus()
output_status()
GPIO.add_event_detect(videobutton, GPIO.BOTH, bouncetime=300) # Start listening out for button presses
GPIO.add_event_detect(audiobutton, GPIO.BOTH, bouncetime=300) # Start listening out for button presses
if not poweroffbutton == audiobutton:
GPIO.add_event_detect(poweroffbutton, GPIO.BOTH, bouncetime=300) # Start listening out for button presses
lastvideobuttonstate = GPIO.input(videobutton)
lastaudiobuttonstate = GPIO.input(audiobutton)
lastpoweroffbuttonstate = GPIO.input(poweroffbutton)
try:
while True:
camera.wait_recording(cycle_wait) # Pause in loop
videobuttonpressednow = False
if GPIO.event_detected(videobutton):
videobuttonpressednow = (GPIO.input(videobutton) <> lastvideobuttonstate)
lastvideobuttonstate = GPIO.input(videobutton)
poweroffbuttonpressednow = False
couldbeaudio = False
if GPIO.event_detected(poweroffbutton):
poweroffbuttonpressednow = (GPIO.input(poweroffbutton) <> lastpoweroffbuttonstate)
lastpoweroffbuttonstate = GPIO.input(poweroffbutton)
if poweroffbuttonpressednow and audiobutton == poweroffbutton:
couldbeaudio = True
audiobuttonpressednow = False
if GPIO.event_detected(audiobutton):
print("audiobutton =", audiobutton)
audiobuttonpressednow = (GPIO.input(audiobutton) <> lastaudiobuttonstate)
lastaudiobuttonstate = GPIO.input(audiobutton)
print("audiobuttonpressednow =", audiobuttonpressednow)
current_time = time.time()
#print("current_time =", current_time)
#visual stuff
if(current_time >= next_step):
#next_step = current_time + duration_step
next_step = next_step + duration_step
#print("next_step =", next_step)
# check GPS time and if it ahead of RPi time update RPi time
if not (math.isnan(gpsc.fix.latitude) or math.isnan(gpsc.fix.longitude)) and gpsc.fix.latitude and gpsc.fix.longitude and gpsc.utc and gpsc.utc<>"None":
if gpsc.utc <> lastutc:
lastutc = gpsc.utc
timetoseconds = re.sub(r'\.[0-9][0-9][0-9]Z', r'Z', lastutc)
sattime = time.mktime(time.strptime(timetoseconds, "%Y-%m-%dT%H:%M:%SZ"))
if debug:
print("sattime =", sattime)
print("time.time =", time.time())
if (time.time() < sattime):
print "setting time"
os.system('date -s %s' % lastutc)
if(current_time >= next_timelapse):
dotimelapse()
next_timelapse = next_timelapse + duration_timelapse
# Have we run out of disk space
checkstatus()
#Fast reacting stuff
if poweroffbuttonpressednow:
powerclickcount = powerclickcount + poweroffclickstep
if (debug):
print("powerclickcount =", powerclickcount)
elif powerclickcount > 0:
powerclickcount = powerclickcount - 1
if powerclickcount >= poweroffclicktarget:
if (debug):
print ("Power off triggered stopping")
GPIO.output( statusLED_R, False)
GPIO.output( statusLED_G, True)
GPIO.output( statusLED_B, True)
camera.split_recording(stream)
videorecording = False
os.system("sudo shutdown -h -t 30 now") #not sure about the -F which forces fsck on next boot
sys.exit()
# Have we run out whilst recording video
if audiosizelimitreached and audiorecording:
# TODO : Stop recording
audiorecording = False
subprocess.call("killall arecord", shell=True)
if (debug):
print ("limit breached audio stopping")
if audiobuttonpressednow or couldbeaudio:
# TODO :34 Audio
# this is the audio blog button
if (debug):
print('Audio Button Pressed!')
if audiorecording:
if (not videorecording) or audiosizelimitreached:
audiorecording = False
subprocess.call("killall arecord", shell=True)
if (debug):
print ("ending recording")
else:
now = time.gmtime()
audiofilename = getFolderName(now,'wav')+getFileName(now,'wav')
AudioRecordingProcess = subprocess.Popen(arecordcmd+audiofilename, shell=True)
audiorecording = True
if (debug):
print ("recordinging into ", audiofilename)
# Have we run out whilst recording video
if videosizelimitreached and videorecording:
camera.split_recording(stream)
videorecording = False
if (debug):
print ("limit breached video stopping")
# Has the video button been pressed?
if videobuttonpressednow and not videosizelimitreached:
if (debug):
print('Video Button Clicked!')
# If we are recording stop recording
if videorecording:
if (debug):
print('Button pressed to stop ')
# Go back to recording to the ring buffer not the file
camera.split_recording(stream)
videorecording = False
audiorecording = False
subprocess.call("killall arecord", shell=True)
if (debug):
print("button video ending "+videoname)
else:
videorecording = True
output_mode()
# What should the video be called
now = time.gmtime()
videoname = getFolderName(now,'h264')+getFileName(now,'h264')
audiofilename = getFolderName(now,'wav')+getFileName(now,'wav')
# TODO : Start recording audio
AudioRecordingProcess = subprocess.Popen(arecordcmd+audiofilename, shell=True)
if (debug):
print("button video starting "+videoname)
# Send video to the file
camera.split_recording(videoname)
# TODO : #34 Audio Recording
# TODO : the video triggers the wide angle audio
# TODO : workout if we can have an audio ring buffer
# Save the ring buffer to the disk
write_video(stream)
# Set the output lights
output_status()
finally:
# Tidy up when the program stops
camera.stop_recording()
if (debug):
print("stopping ending ")
GPIO.output( statusLED_R, True)
GPIO.output( statusLED_G, True)
GPIO.output( statusLED_B, True)
GPIO.cleanup()
#stop GPS controller
gpsc.stopController()
gpsc.join()
|
# I modelled this problem as a bipartite graph
# where each vote is a vertex. All votes in favor of cats
# go on one side and all votes in favor of dogs go to the other
# side of the graph.
# Two vertices/votes are connected if they exclude one another,
# in a way that they cannot both be satisfied (e.g. C1 D1 and D1 C1)
#
# Then I apply the Hopcroft-Karp algorithm to calculate a maximum
# matching.
#
# The idea behind using the Hopcroft-Karp algorithm is as follows:
# Each set of votes contains at least one "biggest group of
# non-excluding votes". Each vote that is not in this group collides
# with at least one vote in the group, otherwise it would be in
# that group itself. So for every vote colliding with some vote
# from the biggest group, the matching increases by one. Therefore
# the maximum matching has the same size as the total number of
# collissions between the biggest group and every vote not in it and
# that again is the number of votes NOT in the "biggest group of
# non-excluding votes".
# Therefore |all votes| - |maximum matching| = |biggest group of non-excluding-votes|
#
import sys
from hopcroftkarp import hopcroftgraph, maximum_matching
def build_graph(votes):
V1 = set()
V2 = set()
G = hopcroftgraph()
N = len(votes)
for i in range(N):
love1, hate1 = votes[i]
vertex1 = "#%d %s %s" % (i, love1, hate1)
# cat votes to the left
if love1[0] == 'C':
V1.add(vertex1)
# dog votes to the right
else:
V2.add(vertex1)
for j in range(N):
love2, hate2 = votes[j]
if love1 == hate2 or love2 == hate1:
vertex2 = "#%d %s %s" % (j, love2, hate2)
G.add_edge(vertex1, vertex2)
return (G, V1, V2)
# number of testcases
testcases = int(raw_input())
results = []
for _ in range(testcases):
# number of cats, dogs and voters
cats, dogs, voters = [int(num) for num in raw_input().split(" ")]
votes = []
for _ in range(voters):
love, hate = [vote.strip() for vote in raw_input().split(" ")]
votes.append((love,hate))
graph = build_graph(votes)
result = voters - len(maximum_matching(*graph))
results.append(result)
N = len(results)
for i in range(N-1):
sys.stdout.write("%d\n" % results[i])
sys.stdout.write("%d" % results[N-1])
|
# -*- coding: utf-8 -*-
"""
@File : _aiofile.py
@Time : 2021/6/14 0:07
@Author : my-xh
@Version : 1.0
@Software: PyCharm
@Desc : 文件异步IO库(旧版)
"""
import asyncio
class AsyncFunWrapper:
def __init__(self, blocked_func):
# 封装阻塞型IO函数
self._blocked_func = blocked_func
def __call__(self, *args):
return asyncio.get_running_loop().run_in_executor(
None,
self._blocked_func,
*args,
)
class AIOWrapper:
def __init__(self, blocked_file_io):
# 封装阻塞型IO对象
self._blocked_file_io = blocked_file_io
def __getattribute__(self, name) -> AsyncFunWrapper:
return AsyncFunWrapper(
getattr(
super().__getattribute__('_blocked_file_io'),
name,
)
)
# 异步方式打开文件
async def async_open(*args) -> AIOWrapper:
return AIOWrapper(
await asyncio.get_running_loop().run_in_executor(
None,
open,
*args,
)
)
|
"""API microservice porviding the activities and model APIs"""
from connexion import App
from flask import Flask
def setup_app(flask_app: Flask) -> None:
"""
Setup the flask modules used in the app
:param flask_app: the app
"""
from ..flask_modules.celery import setup_celery
from ..flask_modules.database import setup_db
from ..flask_modules.mail import setup_mail
from ..flask_modules.jwt import setup_jwt
from ..flask_modules.cors import setup_cors
from ..flask_modules.logging import setup_logging
setup_logging(flask_app)
setup_db(flask_app)
setup_mail(flask_app)
setup_jwt(flask_app)
setup_cors(flask_app)
setup_celery(flask_app)
def create_app() -> App:
""":return: lazily created and initialized app"""
new_app = App(__name__, specification_dir='swagger')
setup_app(new_app.app)
from ..flask_modules.connexion import TaggedSimpleResolver
from . import controller
new_app.add_api('api.yaml',
validate_responses=True,
resolver=TaggedSimpleResolver(controller),
swagger_url='/api')
new_app.add_url_rule('/', 'health', lambda: 'ok')
return new_app
app = create_app()
|
from film_details_searcher.models.movie import Movie
from film_details_searcher.scrappers.custom_headers import CUSTOM_HEADERS
from film_details_searcher.scrappers.movie_service import MovieService
from bs4 import BeautifulSoup
import requests
class FilmwebMovieService(MovieService):
def _fetch_movie_from_link(self, link_to_movie):
request = requests.get(link_to_movie, headers=CUSTOM_HEADERS)
film_details = FilmwebMovieService._parse_request_to_movie(request)
return film_details
@staticmethod
def _parse_request_to_movie(request):
soup = BeautifulSoup(request.text, "html.parser")
title = soup.findChild("h1", {"class": "filmCoverSection__title"}).text
description_pl = soup.find("div", {"class": "filmPosterSection__plot"}).text
premiere_year = soup.find("span", {"class": "filmCoverSection__year"}).text
movie_time = soup.find("span", {"class": "filmCoverSection__filmTime"}).text
movie_rating_value = soup.find("span", {"class", "filmRating__rateValue"}).text
movie_rating_count = soup.find("span", {"class": "filmRating__count"}).text
movie_details = {"title": title, "description_pl": description_pl, "premiere_year": premiere_year,
"movie_time": movie_time, "movie_rating_value": movie_rating_value,
"movie_rating_count": movie_rating_count}
movie = Movie.parse(movie_details)
return movie
|
"""
****************************************
Create a folder named "Original_image"
And put the carrier image in that folder
To get more information see line no. 75
****************************************
"""
import os
import shutil
from PIL import Image
from pathlib import Path
#encoding part :
def encode_image(img, msg):
length = len(msg)
if length > 255:
print("text too long! (don't exeed 255 characters)")
return False
encoded = img.copy()
width, height = img.size
index = 0
for row in range(height):
for col in range(width):
if img.mode != 'RGB':
r, g, b ,a = img.getpixel((col, row))
elif img.mode == 'RGB':
r, g, b = img.getpixel((col, row))
# first value is length of msg
if row == 0 and col == 0 and index < length:
asc = length
elif index <= length:
c = msg[index -1]
asc = ord(c)
else:
asc = r
encoded.putpixel((col, row), (asc, g , b))
index += 1
return encoded
#decoding part :
def decode_image(img):
width, height = img.size
msg = ""
index = 0
for row in range(height):
for col in range(width):
if img.mode != 'RGB':
r, g, b ,a = img.getpixel((col, row))
elif img.mode == 'RGB':
r, g, b = img.getpixel((col, row))
# first pixel r value is length of message
if row == 0 and col == 0:
length = r
elif index <= length:
msg += chr(r)
index += 1
decoded_image_file = "decoded_image.png"
img.save(decoded_image_file)
print("Decoded image was saved!")
return msg
#driver part :
#deleting previous folders :
if os.path.exists("Encoded_image/"):
shutil.rmtree("Encoded_image/")
if os.path.exists("Decoded_output/"):
shutil.rmtree("Decoded_output/")
#creating new folders :
os.makedirs("Encoded_image/")
os.makedirs("Decoded_output/")
while True:
m = input("To encode press '1', to decode press '2', press any other button to close: ")
if m == "1":
name_of_file = input("Enter the name of the file with extension : ")
original_image_file = "Original_image/"+name_of_file
img = Image.open(original_image_file)
print("Description : ",img,"\nMode : ", img.mode)
encoded_image_file = "encoded_image.png"
secret_msg = input("Enter the message you want to hide: ")
print("The message length is: ",len(secret_msg))
img_encoded = encode_image(img, secret_msg)
os.chdir("Encoded_image/")
img_encoded.save(encoded_image_file) # saving the image with the hidden text
print("Encoded image was saved!")
elif m == "2":
encoded_image_file = "encoded_image.png"
img = Image.open(encoded_image_file)
os.chdir("..") #going back to parent directory
os.chdir("Decoded_output/")
hidden_text = decode_image(img)
file = open("hidden_text.txt","w")
file.write(hidden_text) # saving hidden text as text file :
file.close()
print("Hidden text saved as text file")
else:
print("Closed!")
break |
SERIAL = 5235
SIZE = 300
def power_level(x, y, serial):
rack_id = x + 10
power = rack_id * y
power += serial
power *= rack_id
power = (power % 1000) // 100
return power - 5
def get_powers(serial):
return {
(i, j): power_level(i, j, serial)
for i in range(1, SIZE + 1)
for j in range(1, SIZE + 1)
}
def square_power(powers, x, y, size):
return sum(
powers[i, j]
for i in range(x, x + size)
for j in range(y, y + size)
)
def get_max_square(powers, size):
return max({
(x, y): square_power(powers, x, y, size=size)
for x in range(1, SIZE - size)
for y in range(1, SIZE - size)
}.items(), key=lambda x: x[1])
def part_1(serial):
powers = get_powers(serial)
coordinates, _ = get_max_square(powers, size=3)
x, y = coordinates
return f"{x},{y}"
def part_2(serial):
powers = get_powers(serial)
last_power = 0
last_coordinates = None
for size in range(1, SIZE + 1):
coordinates, power = get_max_square(powers, size=size)
print(f"size={size} power={power}")
if power < last_power:
x, y = last_coordinates
return f"{x},{y},{size - 1}"
last_power = power
last_coordinates = coordinates
if __name__ == '__main__':
print(f"Part 1: {part_1(SERIAL)}")
print(f"Part 2: {part_2(SERIAL)}")
|
WINDOWS_KERNEL_BOUND = 0x80000000
WINDOWS_SYSENTER = None
WINDOWS_SYSEXIT = None #0x804de904
ins_count = 0
ctx_switches = 0
class StopExecution(BaseException):
pass
def init():
gdb.execute("set height 0")
gdb.execute("set pagination off")
gdb.execute("set logging redirect on")
gdb.execute("set logging file /dev/null")
gdb.execute("set logging on")
def finit():
gdb.execute("set logging off")
gdb.execute("d br")
def log(buff):
gdb.execute("set logging off")
print( buff )
gdb.execute("set logging on")
def execute(cmd):
gdb.execute("set logging off")
gdb.execute(cmd)
gdb.execute("set logging on")
def is_our_process(process_marks):
for addr, dword in process_marks.items():
try:
if gdb.execute("p *((int *) 0x%08x) == 0x%08x" % (addr, dword), False, True).find(' = 1') == -1:
return False
except Exception as e:
return False
return True
def step():
global ins_count
gdb.execute("si")
ins_count += 1
def step_out():
global ins_count
gdb.execute("ni")
ins_count += 1
def cont():
gdb.execute("c")
def bpx(eip):
gdb.execute("b *0x%08x" % eip)
'''
def bpx_del(eip):
for line in gdb.execute("i br", False, True).split('\n')[1:]:
if line.strip():
bpx_id = int( line.split()[0] )
if line.find("%08x" % eip) != -1:
gdb.execute("d br %d" % bpx_id)
with open('temp.txt','a') as o:
o.write("d br %d\n" % bpx_id)
'''
def ptr(addr):
try:
return int( gdb.execute("x/1wx %d" % addr, False, True).split('\n')[0].split(':')[1].strip(), 16 )
except:
return None
def disas(eip):
return gdb.execute("x/1i %d" % eip, False, True).split('\n')[0].split(':')[1].strip()
def get_register(register):
return int( gdb.parse_and_eval("$%s" % register) )
def get_registers():
registers = {}
registers_name = (
'eax','ax','ah','al',
'ecx','cx','ch','cl',
'edx','dx','dh','dl',
'ebx','bx','bh','bl',
'esp','sp',
'ebp','bp',
'esi','si',
'edi','di',
'eip',
'eflags',
'st0','st1','st2','st3','st4','st5','st6','st7',
'xmm0','xmm1','xmm2','xmm3','xmm4','xmm5','xmm6','xmm7',
'mm0','mm1','mm2','mm3','mm4','mm5','mm6','mm7'
)
for line in gdb.execute("maint print cooked-registers", False, True).split('\n')[1:]:
try:
register = line.split().pop(0)
value = line.split().pop()
if register in registers_name:
registers.update( { register: int(value,16) } )
except:
pass
return registers
def get_opcode(eip):
hex_string = ''
opcode_size = int( gdb.execute("x/2i $eip", False, True).split('\n')[1].split(':')[0].strip(), 16 ) - eip
for line in gdb.execute("x/%dbx $eip" % opcode_size, False, True).split('\n'):
if not line.strip():
break
for byte in line.split(':')[1].strip().split('\t'):
hex_string += byte[2:]
return bytes.fromhex(hex_string)
def get_eip():
return get_register('eip')
def is_kernel(eip):
return eip >= WINDOWS_KERNEL_BOUND
kernel_ins_count = 0
def in_kernel():
global WINDOWS_SYSEXIT, kernel_ins_count, ctx_switches
if not is_kernel( get_eip() ):
return
if WINDOWS_SYSEXIT:
if gdb.execute("i br", False, True).find("%x" % WINDOWS_SYSEXIT) == -1:
bpx(WINDOWS_SYSEXIT)
cont()
step()
ctx_switches += 1
# log("[i] leave kernel")
else:
while True:
eip = get_eip()
if not WINDOWS_SYSEXIT and disas(eip).find("sysexit") != -1:
WINDOWS_SYSEXIT = eip
log("[i] sysexit: 0x%08x" % eip)
if not is_kernel(eip):
# log("[i] in user R3")
break
step_out()
kernel_ins_count += 1
if not kernel_ins_count % 1000:
log( "[0x%08x] (%d)" % ( eip, kernel_ins_count ) )
ctx_switches += 1
if not ctx_switches % 1000:
log( "[i] contexts switches: %d" % ctx_switches )
user_ins_count = 0
def in_process( callback, process_marks={} ):
global WINDOWS_SYSENTER, user_ins_count, ctx_switches
if is_kernel( get_eip() ):
return
if not is_our_process(process_marks):
# log("[-] neighbor process")
'''
if WINDOWS_SYSENTER and disas(WINDOWS_SYSENTER).find('sysenter') == -1:
bpx_del(WINDOWS_SYSENTER)
breakpoint_sysenter = 0
WINDOWS_SYSENTER = 0
'''
if WINDOWS_SYSENTER:
if gdb.execute("i br", False, True).find("%x" % WINDOWS_SYSENTER) == -1:
bpx(WINDOWS_SYSENTER)
cont()
step()
ctx_switches += 1
# log("[i] entering in kernel")
else:
while True:
eip = get_eip()
user_ins_count += 1
if not user_ins_count % 1000:
log( "[0x%08x]: (%d)" % ( eip, user_ins_count ) )
if not WINDOWS_SYSENTER and disas(eip).find("sysenter") != -1:
WINDOWS_SYSENTER = eip
log("[i] sysenter: 0x%08x" % eip)
break
if is_kernel(eip):
log("[i] neighbor process R3 -> kernel R0")
break
step()
ctx_switches += 1
else:
log("[+] return to process")
while not is_kernel( get_eip() ):
try:
callback()
except StopExecution as e:
raise e
except Exception as e:
log( "[!] %s" % str(e) )
step()
ctx_switches += 1
# log("[*] in kernel R0")
if __name__ == '__main__':
init()
for reg,val in get_registers().items():
log( "%s: 0x%x" % (reg,val) )
finit() |
""" pop removes the last item, or the item at a particular index.
pop returns the item that was removed
You can ignore the item, if you don't need it, or use it if you do.
"""
colleges = ['Minneapolis College',
'Metro State',
'Saint Paul College',
'North Hennepin Community College',
'Century College']
# Remove the last list item. Use pop with no arguments.
last_college = colleges.pop() # 'Century College'
print(last_college)
print(colleges) # ['Minneapolis College', 'Metro State', 'Saint Paul College', 'North Hennepin Community College']
# Remove the item at an index - use pop with that index
second_college = colleges.pop(1)
print(second_college) # Metro State
print(colleges) # ['Minneapolis College', 'Saint Paul College', 'North Hennepin Community College']
# You can ignore the item, if you don't need it. Perhaps you know you don't need element 0 any more.
colleges.pop(0)
print(colleges) # ['Saint Paul College', 'North Hennepin Community College']
|
# solved
import sys
for line in sys.stdin:
cpf = list(map(int, line.replace('.', '')[:9]))
digits = list(map(int, line.replace('\n', '')[-2:]))
first = (sum([cpf[i] * (i + 1) for i in range(len(cpf))]) % 11) % 10
second = (sum([cpf[i] * (len(cpf) - i) for i in range(len(cpf))]) % 11) % 10
if first == digits[0] and second == digits[1]:
print("CPF valido")
else:
print("CPF invalido")
# print(f"{first}-{second}")
|
# Filters twitter json file text field
# python -OO twep.py TWITTER_FILE REGEX
import sys
import re
import json
import datetime
#Wed, 12 Aug 2009 01:23:04 +0000
#Wed, 12 Aug 2009 01:23:04 +0000
e = datetime.datetime.strptime("Wed, 12 Aug 2012 01:23:04 +0000", "%a, %d %b %Y %X +0000")
def main():
f = file (sys.argv[1])
while True:
line = f.readline()
if line == "":
break
j = json.loads(line)
d = datetime.datetime.strptime(j["created_at"], "%a, %d %b %Y %X +0000")
total_seconds = (d-e).total_seconds()
j["time_s"] = int(total_seconds)
sys.stdout.write("%d %s\n" % (total_seconds, json.dumps(j)))
f.close()
if __name__ == "__main__":
main()
|
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
from tensorflow.python.framework import ops
import cnn_utils
np.random.seed(1)
def create_placeholders(n_H0, n_W0, n_C0, n_y):
X = tf.placeholder(tf.float32,[None, n_H0, n_W0, n_C0])
Y = tf.placeholder(tf.float32,[None, n_y])
return X,Y
def initialize_parameters():
tf.set_random_seed(1)
W1 = tf.get_variable("W1", [4, 4, 3, 8], initializer=tf.contrib.layers.xavier_initializer(seed=0))
W2 = tf.get_variable("W2", [2, 2, 8, 16], initializer=tf.contrib.layers.xavier_initializer(seed=0))
parameters = {"W1": W1, "W2": W2}
return parameters
def forward_propagation(X, parameters):
W1 = parameters['W1']
W2 = parameters['W2']
X = tf.nn.conv2d(X, W1, [1, 1, 1, 1], padding="SAME")
X = tf.nn.relu(X)
X = tf.nn.max_pool(X, [1, 8, 8, 1], [1, 8, 8, 1], padding="SAME")
X = tf.nn.conv2d(X, W2, [1, 1, 1, 1], padding="SAME")
X = tf.nn.relu(X)
X = tf.nn.max_pool(X, [1, 4, 4, 1], [1, 4, 4, 1], padding="SAME")
X = tf.layers.flatten(X)
X = tf.contrib.layers.fully_connected(X, 6, activation_fn=None)
return X
def compute_cost(Z3,Y):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3,labels=Y))
return cost
def model(X_train, Y_train, X_test, Y_test, learning_rate=0.005,
num_epochs=100,minibatch_size=64,print_cost=True,isPlot=True):
ops.reset_default_graph()
tf.set_random_seed(1)
seed = 3
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = []
X,Y = create_placeholders( n_H0, n_W0, n_C0,n_y)
parameters = initialize_parameters()
Z3 = forward_propagation(X,parameters)
cost = compute_cost(Z3,Y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
minibatch_cost = 0
num_minibatches = int(m / minibatch_size) # 获取数据块的数量
seed = seed + 1
minibatches = cnn_utils.random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_, temp_cost = sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
if print_cost:
# 每5代打印一次
if epoch % 5 == 0:
print("当前是第 " + str(epoch) + " 代,成本值为:" + str(minibatch_cost))
# 记录成本
if epoch % 1 == 0:
costs.append(minibatch_cost)
# 数据处理完毕,绘制成本曲线
if isPlot:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
predict_op = tf.arg_max(Z3, 1)
corrent_prediction = tf.equal(predict_op, tf.arg_max(Y, 1))
##计算准确度
accuracy = tf.reduce_mean(tf.cast(corrent_prediction, "float"))
print("corrent_prediction accuracy= " + str(accuracy))
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuary = accuracy.eval({X: X_test, Y: Y_test})
print("训练集准确度:" + str(train_accuracy))
print("测试集准确度:" + str(test_accuary))
return (train_accuracy, test_accuary, parameters)
if __name__ == '__main__':
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = cnn_utils.load_dataset()
X_train = X_train_orig / 255.
X_test = X_test_orig / 255.
Y_train = cnn_utils.convert_to_one_hot(Y_train_orig, 6).T
Y_test = cnn_utils.convert_to_one_hot(Y_test_orig, 6).T
print("number of training examples = " + str(X_train.shape[0]))
print("number of test examples = " + str(X_test.shape[0]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
_,_, parameters = model(X_train, Y_train, X_test, Y_test, num_epochs=150)
|
# O(n*log(k))
# n = n | k = len(primes)
import heapq
class Solution:
def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:
uglyHeap = [(primes[i], i, 0) for i in range(len(primes))]
superUgly = [1]
while len(superUgly) < n:
nextUgly, primeUsed, multiplierIndex = heapq.heappop(uglyHeap)
superUgly.append(nextUgly)
multiplierIndex += 1
heapq.heappush(
uglyHeap,
(
primes[primeUsed] * superUgly[multiplierIndex],
primeUsed,
multiplierIndex,
),
)
while uglyHeap[0][0] <= superUgly[-1]:
nextUgly, primeUsed, multiplierIndex = heapq.heappop(uglyHeap)
multiplierIndex += 1
heapq.heappush(
uglyHeap,
(
primes[primeUsed] * superUgly[multiplierIndex],
primeUsed,
multiplierIndex,
),
)
return superUgly[-1] |
"""
Homework 9 Errors
"""
class InvalidCharactersError(Exception):
pass
class NumberTooBigError(Exception):
pass
class NumberTooSmallError(Exception):
pass
class NumberLessThanLowerLimitError(Exception):
pass
class NumberGreaterThanUpperLimitError(Exception):
pass
class NumberOutOfRangeError(Exception):
pass
|
import unittest
from rgen import *
from xml.etree.ElementTree import Element, SubElement, tostring
from collections import namedtuple
import os
Field = namedtuple('Field', ['name', 'width', 'default'])
class create_xml_reg_file:
def __init__(self, filename, name, asize=16, dsize=16):
self.filename = filename
self.asize = asize
self.dsize = dsize
self.tree = Element('registers', attrib={'name': name, 'addr_sz': str(asize), 'data_sz': str(dsize)})
def add_register(self, rname="reserved", typ='status', width=1, fields=None):
if fields is None:
fields = []
attributes = {'name': rname, 'type': typ}
if not fields:
attributes['width'] = str(width)
reg = SubElement(self.tree, "register", attrib=attributes)
if fields:
for field in fields:
SubElement(reg, "field", attrib=field._asdict())
def dump(self):
with open(self.filename, 'w') as fh:
fh.write(tostring(self.tree).decode('utf-8'))
class create_xml_dec_file:
def __init__(self, filename, name, asize=16, dsize=16):
self.filename = filename
self.asize = asize
self.dsize = dsize
self.tree = Element('it_decoder', attrib={'name': name, 'addr_sz': str(asize), 'data_sz': str(dsize)})
def add_range(self, prefix, base, bits):
SubElement(self.tree, 'range', attrib={'prefix': prefix, 'base': str(base), 'bits': str(bits)})
def dump(self):
with open(self.filename, 'w') as fh:
fh.write(tostring(self.tree).decode('utf-8'))
class rgenTestSuite(unittest.TestCase):
def test_rgen(self):
testfile = "test_file1.xml"
blockname = "rgen1"
f = create_xml_reg_file(filename=testfile, name=blockname)
for r in range(10):
f.add_register(rname="register_{}".format(r), typ='config', width=r + 2)
f.dump()
parse_file(testfile, {})
self.assertTrue(os.path.isfile(blockname + ".v"))
os.unlink(testfile)
def test_fields(self):
testfile = "test_file2.xml"
blockname = "rgen2"
f = create_xml_reg_file(filename=testfile, name=blockname)
flds = [Field(name="field{}".format(x), width=str(x), default=str(x)) for x in range(2, 5)]
f.add_register(rname="has_fields", typ='config', fields=flds)
f.dump()
parse_file(testfile, {})
self.assertTrue(os.path.isfile(blockname + ".v"))
os.unlink(testfile)
def test_decode(self):
testfile = "test_decode.xml"
blockname = "dec1"
f = create_xml_dec_file(filename=testfile, name=blockname)
for r in range(10):
f.add_range(prefix='prefix{}'.format(r), base='16\'h{0:02x}00'.format(r), bits=8)
f.dump()
parse_file(testfile, {})
self.assertTrue(os.path.isfile(blockname + ".v"))
os.unlink(testfile)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import matplotlib.pylab as plt
import pandas as pd
from scipy.optimize import fmin,fmin_slsqp,minimize,differential_evolution
from scipy.signal import hilbert,savgol_filter
from scipy import stats
import seaborn as sns
import sys
sys.path.insert(0, '../Helper')
sys.path.insert(0, '../Superconductivity')
from ExpandingFunction import expandFuncWhile
from plotxy import plot # TODO import from Helper
from Gaussian import gaussian
from IV_Curve_Simulations import iV_Chalmers,iV_Curve_Gaussian_Convolution_with_SubgapResistance
#from IV_Curve_Simulations import iV_Curve_Gaussian_Convolution
sns.set_style("whitegrid",
{'axes.edgecolor': '.2',
'axes.facecolor': 'white',
'axes.grid': True,
'axes.linewidth': 0.5,
'figure.facecolor': 'white',
'grid.color': '.8',
'grid.linestyle': u'-',
'legend.frameon': True,
'xtick.color': '.15',
'xtick.direction': u'in',
'xtick.major.size': 3.0,
'xtick.minor.size': 1.0,
'ytick.color': '.15',
'ytick.direction': u'in',
'ytick.major.size': 3.0,
'ytick.minor.size': 1.0,
})
sns.set_context("poster")
#areaSingleJunction = np.pi*1.38*1.38/4.
#areaDoubleJunction = 2*np.pi*1.12*1.12/4.
#print ('Junction seizes', areaSingleJunction, areaDoubleJunction)
#Define the kwords of IV_Response
kwargs_IV_Response_rawData = {
'filenamestr':None,
'headerLines':0, # 1 for John's data
'footerLines':1,
'columnOffset':1,#0 for John's data
'currentFactorToMicroampere':1,#1000 for Johns data
'junctionArea':None,
'normalResistance300K':None,
'numberOfBins':2001,
'vmin':-10,
'vmax':10,
'vGapSearchRange':np.array([2.5,3.2]),
'rNThresholds':[4.5,10],
'rSGThresholds':[1.2,1.8],
'offsetThreshold' : .5,
'savgolWindow': 91,
'savgolOrder' : 3, # results in a steeper curve
'fixedOffset' : None,
'simulationVoltageSteps':1e-3,
'simulationVmin':-6,
'simulationVmax': 6,
'simulation_Sigma_Gaussian_Convolution_Guess':0.07,
'skip_IV_analysis':False,
'skip_IV_simulation':True
}
kwargs_IV_Response_John = {
'filenamestr':None,
'headerLines':1,
'footerLines':1,
'columnOffset':0,
'currentFactorToMicroampere':1000,
'junctionArea':None,
'normalResistance300K':None,
'numberOfBins':2001,#1001
'vmin':-6,
'vmax': 6,
'vGapSearchRange':np.array([2.5,3.2]),
'rNThresholds':[3.2,6],
'rSGThresholds':[1.2,1.8],#[1.9,2.1],#[
'offsetThreshold' : .5,
'savgolWindow': 91,
'savgolOrder' : 3, # results in a steeper curve
'fixedOffset' : None,
'simulationVoltageSteps':1e-4, #quite high to be able to do the convolution properly
'simulationVmin':-15,
'simulationVmax': 15,
'simulation_Sigma_Gaussian_Convolution_Guess':0.07,
'skip_IV_analysis':False,
'skip_IV_simulation':True
}
class IV_Response():
'''This class is used to contain the IV curve of a dataset and to compute the charteristic values
'''
def __init__(self,filename,**kwargs):
'''The initialisation of the class.
params
------
filename: string or array
The name of the file containing the dataset.
**kwargs
--------
headerLines: int
The number of header lines in the containing file.
footerLines: int
The number of irrelevant lines at the end of the containing file.
columnOffset: int
The number of columns in the containing file before the voltage column.
currentFactorToMicroampere: float
The factor to get the current in microampere
junctionArea: float
Area of the junction in um^2
normalResistance300K: float
The normal resistance of the junction at 300 K to compute the RRR value
numberOfBins: int
The number of bins used to bin the dataset
vmin: int
The minimum voltage (mV)
vmax: int
The maximum voltage (mV)
vGapSearchRange: 2 element np array [lowerBoundaryVoltage, upperBoundaryVoltage]
The mininmum and maximimum voltage value where the gap voltage should be searched for.
rNThreshold: float
Defines the values involved in the linear regression to obtain the rN value.
above rNThreshold and below -rNThreshold values are involved for the linear regression
rSGThreshold: float
Defines the values involved in the linear regression to obtain the rSG value.
above rSGThreshold and below -rSGThreshold values are involved for the linear regression
offsetThreshold: float
The negative and positive voltage within which the offset is searched for.
fixedOffset: 2 element array or None
The value for a fixed voltage [0] and current offset [1].
If the value is None the offset is computed from the data.
simulationVoltageSteps: float
The voltage step size for simulated IV curves.
simulationVmin: float
The minimum voltage of the simulated IV curve.
simulationVmax: float
The maximum voltage of the simulated IV curve.
simulation_Sigma_Gaussian_Convolution_Guess: float
The guess value for the standard deviation of the gaussian, which is used to convolve and compute simulated IV curves.
skip_IV_analysis: bool
Decides if the characteristic values are determined.
This might be useful in case the data set contains only a portion of the IV curve, like only the subgap region.
Note that no offset correction is performed as well except it is defined in the fixedOffset parameter.
skip_IV_simulation: bool
Decides if a simulated IV curve si set during the initialisation of the class.
'''
#preserve parameters
self.__dict__.update(kwargs)
self.filename = filename
if isinstance(filename,str):
#Pandas raw data
self.pdData = pd.read_csv(self.filename, sep=',',engine='python',header=None,skiprows=self.headerLines,skipfooter=self.footerLines)
#2D Array containing the IV dataset
self.rawIVData=np.array([self.pdData[self.columnOffset].values,self.pdData[self.columnOffset+1].values*self.currentFactorToMicroampere])
else: # filename is an array
self.pdData=[]
self.rawIVData = []
for f in filename: #Read in the individual files
self.pdData.append(pd.read_csv(f, sep=',',engine='python',header=None,skiprows=self.headerLines,skipfooter=self.footerLines))
self.rawIVData.append(np.array([self.pdData[-1][self.columnOffset].values,self.pdData[-1][self.columnOffset+1].values*self.currentFactorToMicroampere]))
try:
self.rawIVData = np.hstack(self.rawIVData) # Merge x and y axis
except ValueError:
print(self.rawIVData)
print(self.filename)
#2D Array containing the IV dataset sorted by increasing voltage
order = self.rawIVData[0].argsort()
self.sortedIVData =np.array( [self.rawIVData[0,order],self.rawIVData[1,order] ] )
# more complicated sort
#self.sortedIVData=self.rawIVData.T[np.lexsort((self.rawIVData[0],self.rawIVData[1]))].T
self.unsortedSlope = self.slope_calc(self.rawIVData)
self.sortedSlope = self.slope_calc(self.sortedIVData) # for comparison of the slope from unsorted data
if self.fixedOffset == None and not self.skip_IV_analysis:
self.offset = self.offset_determination()
elif not self.fixedOffset == None:
self.offset = self.fixedOffset
else: self.offset = [0,0]
#kind of redundant to express these values separately TODO?
self.voltageOffset = self.offset[0]
self.currentOffset = self.offset[1]
self.offsetCorrectedRawIVData = self.offset_Correction(self.rawIVData)
self.offsetCorrectedSortedIVData = self.offset_Correction(self.sortedIVData)
#All further modifications are done with offset corrected data.
self.savgolIV = self.savgol_filter()
self.savgolSlope = self.slope_calc(self.savgolIV)
#Not working method, since too noisy:
#self.averagedIVData = self.averagedIVData_calc()
self.binWidth = self.binWidth_calc()
self.binedIVData = self.binedIVData_calc(self.savgolIV)
self.unfilteredBinedIVData = self.binedIVData_calc(self.offsetCorrectedSortedIVData) #for comparison reasons
self.binSlope = self.slope_calc(self.binedIVData)
self.unfilteredBinSlope = self.slope_calc(self.unfilteredBinedIVData)
if not self.skip_IV_analysis:
self.rN_LinReg = self.rN_LinReg_calc(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[1])
self.rSG_LinReg = self.rSG_LinReg_calc()
self.rN = self.rN_calc()
self.rNsigma = self.rNsigma_calc()
self.rSG = self.rSG_calc()
self.rSGsigma = self.rSGsigma_calc()
self.rSGrN = self.rSGrN_calc()
self.rSGrNsigma = self.rSGrN_calc()
self.maxSlopeVgapAndCriticalCurrent = self.maxSlopeVgapAndCriticalCurrent_calc(self.binedIVData,self.binSlope,True)
self.gapVoltage = self.gapVoltage_calc(self.maxSlopeVgapAndCriticalCurrent)
self.criticalCurrent_from_max_slope = self.criticalCurrent_from_max_slope_calc() # is lower than of Vgap/Rn
self.criticalCurrent_from_gapVoltage_rN = self.criticalCurrent_from_gapVoltage_rN_calc()
self.criticalCurrent = self.criticalCurrent_from_gapVoltage_rN
#Outdated, since data is already offset corrected. The returned offset is wrong
#self.currentOffsetByCrtiticalCurrent = self.currentOffsetByCrtiticalCurrent_calc()
#self.currentOffsetByNormalResistance = self.currentOffsetByNormalResistance_calc()
self.gaussianBinSlopeFit = self.gaussianBinSlopeFit_calc()
self.information()
#initiate a simulated IV curve. As default the convolution fit is used. This causes a huge delay durig start up.
if not self.skip_IV_simulation:
self.set_simulatedIV(self.convolution_most_parameters_stepwise_Fit_Calc())
self.simulated_binSlope = self.slope_calc(self.simulatedIV)
self.simulated_maxSlopeVgapAndCriticalCurrent = self.maxSlopeVgapAndCriticalCurrent_calc(self.simulatedIV,self.simulated_binSlope,compute_Error = False)
self.simulated_gapVoltage = self.gapVoltage_calc(self.simulated_maxSlopeVgapAndCriticalCurrent)
self.simulated_gaussianBinSlopeFit = self.simulated_gaussianBinSlopeFit_calc()
self.simulated_iKK = self.iKK_Calc(self.simulatedIV)
# self.chalmers_Fit = self.chalmers_Fit_calc()
# self.convolution_perfect_IV_curve_Fit = self.convolution_perfect_IV_curve_Fit_calc()
#
def information(self):
'''This function prints and returns the characteristic parameters of the SIS junction.
TODO add standard deviation
'''
txt = ''
txt += 'Gap Voltage \t\t\t %.2f mV\n'%self.gapVoltage
txt += 'Critical Current \t\t %.1f uA\n'%self.criticalCurrent
if not self.junctionArea == None:
txt += 'Critical Current Density %.1f kA/cm$^2$\n'%(self.criticalCurrent/self.junctionArea*.1)
txt += 'Normal Resistance \t %.2f Ohm\n'%self.rN
txt += 'Subgap Resistance \t %.1f Ohm\n'%self.rSG
if not self.normalResistance300K == None:
txt += 'Subgap Resistance \t\t %.1f\n'%(self.normalResistance300K/self.rN)
txt += 'Voltage Offset \t\t\t%.3f mV\n'%self.offset[0]
txt += 'Current Offset \t\t\t %.2f uA'%self.offset[1]
print(txt)
return txt
def plot_IV_with_Info(self,positionInfoBox=[-9.3,40,-9.3,20],linespacing =1.2, fontsize=8):
'''This function plots the IV curve with the characteristic values in a box.
inputs
------
positionInfoBox: array
Position o the info box. For more detail look up the plt.annotate documentation.
linespacing: float
The spacing between lines in the textbox.
fontsize: integer
The fontsize in the textbox
'''
plot(self.binedIVData)
plt.annotate(self.information().expandtabs(),(positionInfoBox[0],positionInfoBox[1]),xytext=(positionInfoBox[2], positionInfoBox[3]),
linespacing=linespacing, size=fontsize, bbox=dict(boxstyle="round", fc="w",alpha=0.8) )
def plot_slope_raw_unsorted(self):
'''This function plots the slope of the raw unsorted data.
'''
plot(self.unsortedSlope)
def savgol_filter(self):
'''This function applies a Savitzky-Golay filter to remove noise from the data.
'''
return np.vstack([self.offsetCorrectedSortedIVData[0],
savgol_filter(self.offsetCorrectedSortedIVData[1],self.savgolWindow,self.savgolOrder)])
def averagedIVData_calc(self):
'''This function averages adjacent datapoint to smoothen the IV curve.
Five datnapoints are merged to obtain the average.
Note: not enough to get rid of noise in transission region.
'''
return np.vstack([np.mean(np.vstack([self.sortedIVData[0,:-4],self.sortedIVData[0,1:-3],self.sortedIVData[0,2:-2],self.sortedIVData[0,3:-1],self.sortedIVData[0,4:]]),axis=0),
np.mean(np.vstack([self.sortedIVData[1,:-4],self.sortedIVData[1,1:-3],self.sortedIVData[1,2:-2],self.sortedIVData[1,3:-1],self.sortedIVData[1,4:]]),axis=0)])
def binWidth_calc(self):
'''The width of a single voltage bin.
'''
return np.divide(self.vmax-self.vmin,self.numberOfBins)
def binedIVData_calc(self,ivData):
'''This function bins an IV data set into equispaced bins of the x axis.
Nan bins are removed.
inputs
------
ivData: 2d Array
The IV data which is bined.
returns
-------
ivData: 3d Array
[bin centers, bin means, bin standard deviation]
'''
#ivData = self.sortedIVData
numberOfBins=self.numberOfBins
vmin=self.vmin
vmax=self.vmax
bin_means, bin_edges, binnumber = stats.binned_statistic(ivData[0], ivData[1], statistic='mean', bins=numberOfBins,range=(vmin,vmax))
bin_std,_,_ = stats.binned_statistic(ivData[0], ivData[1], statistic='std', bins=numberOfBins,range=(vmin,vmax))
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width/2
returnarray = np.array([bin_centers,bin_means,bin_std])
return returnarray[:,np.logical_not(np.isnan(returnarray[1]))]
@property
def rrrValue(self):
'''The RRR value'''
if self.normalResistance300K == None:
print('No normal resistance at 300 K defined')
return None
else: return np.divide(self.normalResistance300K,self.normalResistance)
def slope_calc(self,ivData):
'''This function computes the slope of an 2d array.
inputs
------
ivData: 2d array
The dataset from which the slope is calculated.
[0] The x data.
[1] The y data.
returns
-------
2d array where the length is inputarray-1
[0] The average x data of the slope at (x1+x0)/2 .
[1] The normalised slope (y1-y0)/(x1-x0).
'''
with np.errstate(divide='ignore', invalid='ignore'):# This is necessary for evaluation of the raw data
return np.array([np.divide(np.add(ivData[0,1:],ivData[0,:-1]),2),
np.divide(np.subtract(ivData[1,1:],ivData[1,:-1]),np.subtract(ivData[0,1:],ivData[0,:-1]))])
def offset_determination(self):
'''This function is a wrapper for the implemented offset correction methods.
The function requires user input and allows user friendly offset correction.
returns
-------
1d array
[0] The voltage offset.
[1] The current offset.
'''
def addLegend():
'''This function adds the legend to the plot.
'''
legend = plt.legend(loc='best', shadow=False,ncol=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
llines = leg.get_lines() # all the lines.Line2D instance in the legend
plt.setp(ltext, fontsize='small')
plt.setp(llines, linewidth=1.5) # the legend line width
#plt.rcParams.update({'legend.handlelength': .5})# the legend line length
plt.tight_layout()
print('\nOffset correction of %s'%self.filename)
origin = self.offset_from_raw_data()
transition = self.offset_from_maxSlopeVgapAndCriticalCurrent()
transitionVoltageRnCurrent = [transition[0],
self.currentOffsetByNormalResistance_calc(self.sortedIVData[0],self.sortedIVData[1],transition[0])]
originVoltageTransitionCurrent = [origin[0],transition[1]]
plt.figure()
plot(self.rawIVData,label = 'raw')
plt.plot(self.rawIVData[0]-origin[0],self.rawIVData[1]-origin[1],label='a')
plt.plot(self.rawIVData[0]-transition[0],self.rawIVData[1]-transition[1], label='b')
plt.plot(self.rawIVData[0]-transitionVoltageRnCurrent[0],self.rawIVData[1]-transitionVoltageRnCurrent[1], label='c')
plt.plot(self.rawIVData[0]-originVoltageTransitionCurrent[0],self.rawIVData[1]-originVoltageTransitionCurrent[1], label='d')
addLegend()
closeWindowString = 'Close the window with the plot to enter your joice.'
print('Which option is the best? \n a = %r,\n b = %r,\n c = %r,\n d = %r\n or voltage offset as float.\n%s'%(origin,transition,transitionVoltageRnCurrent,originVoltageTransitionCurrent,closeWindowString))
plt.show()
userInput = input('What is your joice?\n')
if userInput == 'a': return origin
elif userInput == 'b': return transition
elif userInput == 'c': return transitionVoltageRnCurrent
elif userInput == 'd': return originVoltageTransitionCurrent
else:
w = True
while w:
try:
userInput = float(userInput)
w = False
except:
userInput = input('Please enter a valid float.\n')
currentOffsetSuggestion = self.currentOffsetByNormalResistance_calc(self.sortedIVData[0],self.sortedIVData[1],userInput)
plot(self.rawIVData,label = 'raw')
plt.plot(self.rawIVData[0]-userInput,self.rawIVData[1]-currentOffsetSuggestion,label='Suggestion')
addLegend()
print('The suggested offset is %f.\n%s'%(currentOffsetSuggestion,closeWindowString))
plt.show()
w = True
while w:
try:
currentOffset = input('Please enter the current offset as float.\n')
currentOffset = float(currentOffset)
w = False
except:
print('Please enter a valid float.')
plt.close()
return([userInput,currentOffset])
def offset_from_raw_data(self):
'''This function determines the current and voltage offset from the slope of the unsorted raw data.
Using the raw data unsorted avoids a washing of the maximum slope at 0 V. # TODO update
returns
-------
1d array
[0] The voltage offset.
[1] The current offset.
'''
data = self.unsortedSlope[:,np.abs(self.unsortedSlope[0])<self.offsetThreshold]
data[np.isnan(data)] =0 # remove nan's
data[np.isinf(data)] =0 #remove infinities
indexMinToMax = data[1].argsort()
#res = fmin(self.costGaus,[data[0,indexMinToMax[-1]],data[1,indexMinToMax[-1]],.02],(data,))
#voltageOffset = res[0] #2020/01/20 #np.average(data[0,indexMinToMax[-2:]])
voltageOffset = np.average(data[0,indexMinToMax[-2:]])
#offset correction from normal resistance
currentOffset = self.currentOffsetByNormalResistance_calc(self.sortedIVData[0],self.sortedIVData[1],voltageOffset)
#offset correction from current at the transition. This works relatively bad in case of Cooper pair tunnelling is present
#indexMinToMaxVoltageDifference = np.abs(self.rawIVData[0] - voltageOffset).argsort()
#currentOffset = np.average(self.rawIVData[1,indexMinToMaxVoltageDifference[:2]])
return [voltageOffset,currentOffset]
def currentOffsetByNormalResistance_calc(self,xdata,ydata,voltageOffset):
'''The current offset obtained from the normal resistance fit, which is obtained from voltage offset corrected data.
inputs
------
xdat: 1d array
The x axis data for the linear regression.
ydat:1d array
The y axis data for the linear regression.
voltageOffset: float
The voltage offset.
returns
-------
float
The current offset.
'''
rN_LinReg = self.rN_LinReg_calc(xdata-voltageOffset,ydata)
return (rN_LinReg[0][1]+rN_LinReg[1][1])*1e3/2.
def offset_from_maxSlopeVgapAndCriticalCurrent(self):
'''This function calculates the offset from the gap voltage and the current after the gap.
The data is binned for this calculations.
'''
binedIVData = self.binedIVData_calc(self.sortedIVData)
binSlope = self.slope_calc(binedIVData)
maxSlopeVgapAndCriticalCurrent = self.maxSlopeVgapAndCriticalCurrent_calc(binedIVData,binSlope,compute_Error = False)
voltageOffset = self.voltageOffset_calc(maxSlopeVgapAndCriticalCurrent)
currentOffset = self.currentOffsetByCrtiticalCurrent_calc(maxSlopeVgapAndCriticalCurrent)
return [voltageOffset,currentOffset]
def voltageOffset_calc(self,maxSlopeVgapAndCriticalCurrent):
'''The voltage offset obtained from the gap voltages at negative and positive bias voltage.
inputs
------
maxSlopeVgapAndCriticalCurrent: 2d array
[[negativeVoltageWithMaximumSlope,positiveVoltageWithMaximumSlope],[negativeCriticalCurrent,positiveCriticalCurrent]]
returns
-------
float:
The voltage offset.
'''
return np.average(maxSlopeVgapAndCriticalCurrent[0])
def currentOffsetByCrtiticalCurrent_calc(self,maxSlopeVgapAndCriticalCurrent):
'''The current offset obtained from the critical current at negative and positive bias voltage.
inputs
------
maxSlopeVgapAndCriticalCurrent: 2d array
[[negativeVoltageWithMaximumSlope,positiveVoltageWithMaximumSlope],[negativeCriticalCurrent,positiveCriticalCurrent]]
returns
-------
float:
The voltage offset.
'''
return np.average((maxSlopeVgapAndCriticalCurrent[1]))
def offset_Correction(self,ivData):
'''This function corrects any IV dataset for the voltage and current offset.
inputs
------
ivData: 2d array
The dataset from which the slope is calculated.
[0] The x data.
[1] The y data.
returns
-------
2d array
The input IV data corrected for the voltage and current offset.
'''
ivData = ivData.copy()
ivData[0] = ivData[0]-self.offset[0]
ivData[1] = ivData[1]-self.offset[1]
return ivData
def maxSlopeVgapAndCriticalCurrent_calc(self,iVData,iVSlope,compute_Error = True):
'''This function computese the maximum slope of the IV curve for positive and negative voltages. The function can be used to determine gap voltage, as the maximum slope is taken as V_gap.
The second part of this function is to return the critical current. It is the second negative slope after the gap voltage/maximum slope.
inputs
------
iVData: 2d array
The IV data points.
iVSlope: 2d array
The difference between the datapoints.
Note that the array is of len(iVData)-1
compute_Error: bool
Determines if the error is calculated or not.
returns
-------
2d array:
[[negativeVoltageWithMaximumSlope,positiveVoltageWithMaximumSlope],[negativeCriticalCurrent,positiveCriticalCurrent]]
'''
# [negativeIndexes, positiveIndexes]
indexesToSearch = [np.where(np.logical_and(iVData[0]<-self.vGapSearchRange[0],iVData[0]>-self.vGapSearchRange[1])),
np.where(np.logical_and(iVData[0]<self.vGapSearchRange[1],iVData[0]>self.vGapSearchRange[0]))]
#[negative maxima, positive maxima]
slopeMaxima = [iVSlope[0][indexesToSearch[0][0][np.nanargmax(iVSlope[1][indexesToSearch[0]])]],iVSlope[0][indexesToSearch[1][0][np.nanargmax(iVSlope[1][indexesToSearch[1]])]]]
slopeMaximaIndex = [ indexesToSearch[0][0][np.nanargmax(iVSlope[1][indexesToSearch[0]])],indexesToSearch[1][0][np.nanargmax(iVSlope[1][indexesToSearch[1]])]]
first0Crossing = [iVData[1,(slopeMaximaIndex[0]-50)+np.nanargmin(iVSlope[1,(slopeMaximaIndex[0]-50):slopeMaximaIndex[0]])],
iVData[1,(slopeMaximaIndex[1]+00)+np.nanargmin(iVSlope[1,(slopeMaximaIndex[1]):(slopeMaximaIndex[1]+50)])]]
if compute_Error:
first0Crossingerr = [iVData[2,(slopeMaximaIndex[0]-50)+np.nanargmin(iVSlope[1,(slopeMaximaIndex[0]-50):slopeMaximaIndex[0]])],
iVData[2,(slopeMaximaIndex[1]+00)+np.nanargmin(iVSlope[1,(slopeMaximaIndex[1]):(slopeMaximaIndex[1]+50)])]]
return [slopeMaxima,np.divide(np.multiply(np.pi,first0Crossing),4),np.divide(np.multiply(np.pi,first0Crossingerr),4)]
else:
return [slopeMaxima,np.divide(np.multiply(np.pi,first0Crossing),4)]
def gapVoltage_calc(self,maxSlopeVgapAndCriticalCurrent):
'''The gap voltage obtained from the maximum slope of the IV curve.'''
return np.average(np.abs(maxSlopeVgapAndCriticalCurrent[0]))
def criticalCurrent_from_max_slope_calc(self):
'''The critical current obtained from the maximum slope of the binned IV curve'''
return np.average(np.abs(self.maxSlopeVgapAndCriticalCurrent[1])) #TODO this requires a pi/4
def criticalCurrent_from_gapVoltage_rN_calc(self):
'''The critical current obtained from the gap voltage and the normal resistance'''
return self.gapVoltage*1e3/self.rN
def differenceGaussianData(self,params, xy,rangeToEvaluate,vGap):
'''This function is the cost function to fit a gaussian to the given data.
inputs
------
params: array
The parameters for the Gaussian:
[0] The value of the guassian's peak.
[1] The width of the gaussian.
xy: 2d np.array
The slope of the IV data where the gaussian is fitted on.
rangeToEvaluate: float
The voltage range considered during the fit.
vGap: float
The gap voltage at which the gaussian is centered.
A variable gap voltage, an optimization of the gap voltage as part of the fmin function is not possible since the data is to noisy.
returns
-------
float
The sum over the squared differences between the gaussian fit and the datapoints
'''
indexes = np.where(np.logical_and(xy[0]<rangeToEvaluate.max(),xy[0]>rangeToEvaluate.min()))[0]#get to 1.5 sigma
#indexes = np.where(np.logical_and(np.logical_and(xy[0]<rangeToEvaluate.max(),xy[0]>rangeToEvaluate.min()),xy[1]>0))[0]#get to 1.5 sigma
#indexes = np.where(np.logical_and(xy[0]<params[1]+4*params[2].max(),xy[0]>params[1]-4*params[2].min()))[0]#get to 1.5 sigma
#2020/01/20
return np.sum(np.abs(np.subtract(np.square(gaussian(xy[0,indexes],vGap,params[0],params[1])[1]),np.square(xy[1,indexes]))))
#return np.abs(np.sum(np.subtract(np.square(gaussian(xy[0,indexes],vGap,params[0],params[1])[1]),np.square(xy[1,indexes]))))
def costGaus(self,params,xy):
'''This is a cost function for a gaussian fit on a given dataset. The parameters include the position of the gaussian
inputs
------
params: array
The parameters for the Gaussian:
[0] The position of the gaussian.
[1] The value of the guassian's peak.
[2] The width of the gaussian.
xy: 2d np.array
The slope of the IV data where the gaussian is fitted on.
returns
-------
float
The sum over the squared differences between the gaussian fit and the datapoints.
'''
return np.sum(np.abs( np.subtract(np.square(gaussian(xy[0],params[0],params[1],params[2])[1]),np.square(xy[1]))))
#Detection with Gaussian Convolution Fit does not work
# def differenceGaussianData(params, xy,rN):
# '''This function is the cost function to fit a gaussian to the given data.
#
# inputs
# ------
# params: array
# [0] Gap voltage
# xy: 2d np.array
# The slope of the IV data where the gaussian is fitted on
#
#
# returns
# -------
# float
# '''
# if params[1] >0.3:params[1]=.3#limit sigma of gaussian to .3 mV
# # Note: iV_Curve_Gaussian_Convolution reduces vrange
# simulation = iV_Curve_Gaussian_Convolution(vrange=xy[0],vGap=params[0],sigmaGaussian = params[1],rN=rN)
# return np.sum(np.square(simulation[1]-xy[1,np.where(np.isin(xy[0],simulation[0]))[0]]))
## return np.sum(np.square(np.subtract(simulation[1,np.where(np.logical_and(simulation[0]<xy[0,-1]-6*.3,
## simulation[0]>xy[0,0]+6*.3))[0]],
## xy[1,np.where(np.logical_and(xy[0]<xy[0,-1]-6*.3,xy[0]>xy[0,0]+6*.3))[0]])))
# optimised = fmin(differenceGaussianData,[self.gapVoltage,.01],args=(self.offsetCorrectedBinedIVData,self.rN))
# return optimised
def gaussianBinSlopeFit_calc(self):
'''This function fits a gaussian on the slope of the binned current data.
returns
-------
np array:
gaussian fit parameters
[0] The value of the guassian's peak.
[1] The width of the gaussian
'''
neggaus=fmin(self.differenceGaussianData,[1000,.03],args=(self.binSlope,np.negative(self.vGapSearchRange),np.negative(self.gapVoltage)),ftol=1e-12,xtol=1e-10)
posgaus=fmin(self.differenceGaussianData,[1000,.03],args=(self.binSlope,self.vGapSearchRange,self.gapVoltage),ftol=1e-12,xtol=1e-10)
return np.vstack([neggaus,posgaus])
def simulated_gaussianBinSlopeFit_calc(self):
'''This function fits a gaussian on the slope of the simulated current data.
TODO merge with gaussianBinSlopeFit_calc
returns
-------
np array:
gaussian fit parameters
[0] The value of the guassian's peak.
[1] The width of the gaussian
'''
neggaus=fmin(self.differenceGaussianData,[20,.02],args=(self.simulated_binSlope,np.negative(self.vGapSearchRange),np.negative(self.simulated_gapVoltage)))
posgaus=fmin(self.differenceGaussianData,[20,.02],args=(self.simulated_binSlope,self.vGapSearchRange,self.simulated_gapVoltage))
return np.vstack([neggaus,posgaus])
def plot_gaussianBinSlopeFit(self):
'''This function plots the fits gaussians on the slope of the binned current data.
'''
b =self.gaussianBinSlopeFit
g1 = gaussian(self.binSlope[0],-self.gapVoltage,b[0,0],b[0,1])
g2 = gaussian(self.binSlope[0],self.gapVoltage,b[1,0],b[1,1])
plot(self.binSlope,label='Slope Binned Data')
plot(g2,label='Fit on Positive Transission')
plot(g1,label='Fit on Negative Transission')
def rN_LinReg_calc(self,xdat,ydat):
'''Linear regression to obtain the value of the normal resistance.
The voltage offset corrected data is token to achieve solid determination of the normal resistance in the defined range.
inputs
------
xdat: 1d array
The x axis data for the linear regression.
ydat:1d array
The y axis data for the linear regression.
returns
-------
[resultOfNegativeRegression,resultOfPositiveRegression]
'''
# ~ is "not"
reslinregRnpos = stats.linregress(
xdat[np.where(np.logical_and(xdat<self.rNThresholds[1],np.logical_and(xdat>self.rNThresholds[0] , ~np.isnan(ydat))))],
np.multiply(1e-3,ydat[np.where(np.logical_and(xdat<self.rNThresholds[1],np.logical_and(xdat>self.rNThresholds[0] , ~np.isnan(ydat))))]))
reslinregRnneg = stats.linregress(
xdat[np.where(np.logical_and( xdat>-self.rNThresholds[1] ,np.logical_and( xdat<-self.rNThresholds[0] , ~ np.isnan(ydat))))],
np.multiply(1e-3,ydat[np.where(np.logical_and( xdat>-self.rNThresholds[1] ,np.logical_and( xdat<-self.rNThresholds[0] , ~ np.isnan(ydat))))]))
return reslinregRnneg, reslinregRnpos
def plot_Rn_bined_IV_fit(self):
'''This function plots the fit of the normal resistance on the binned IV data.
'''
plt.plot(self.binedIVData[0],self.binedIVData[1])
plt.plot(self.binedIVData[0],self.binedIVData[0]*1e3*self.rN_LinReg[0][0]+self.rN_LinReg[0][1]*1e3)
plt.plot(self.binedIVData[0],self.binedIVData[0]*1e3*self.rN_LinReg[1][0]+self.rN_LinReg[1][1]*1e3)
def plot_Rn_raw_IV_fit(self):
'''This function plots the fit of the normal resistance on the sorted raw IV data
'''
plt.plot(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[1],label='Raw IV Data')
plt.plot(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[0]*1e3*self.rN_LinReg[0][0]+self.rN_LinReg[0][1]*1e3,label='Fit on negative Slope')
plt.plot(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[0]*1e3*self.rN_LinReg[1][0]+self.rN_LinReg[1][1]*1e3,label='Fit on positive Slope')
def plot_Rn_offsetCorrectedRawIVData_IV_fit(self):
'''This function plots the fit of the normal resistance on the offset corrected binned IV data
'''
plt.plot(self.offsetCorrectedRawIVData[0],self.offsetCorrectedRawIVData[1],label='Raw IV Data')
plt.plot(self.offsetCorrectedRawIVData[0],self.offsetCorrectedRawIVData[0]*1e3*self.rN_LinReg[0][0]+self.rN_LinReg[0][1]*1e3,label='Fit on negative Slope')
plt.plot(self.offsetCorrectedRawIVData[0],self.offsetCorrectedRawIVData[0]*1e3*self.rN_LinReg[1][0]+self.rN_LinReg[1][1]*1e3,label='Fit on positive Slope')
def rSG_LinReg_calc(self):
'''Linear regression to obtain the value of Rsg
-------
returns
-------
[resultOfNegativeRegression,resultOfPositiveRegression]
'''
#correct x data for voltage offset
xdat = self.offsetCorrectedSortedIVData[0]
ydat = self.offsetCorrectedSortedIVData[1]
# ~ is "not"
reslinregRsgpos = stats.linregress(
xdat[np.where(np.logical_and(xdat<self.rSGThresholds[1] ,np.logical_and(xdat>self.rSGThresholds[0] , ~ np.isnan(ydat))))],
np.multiply(1e-3,ydat[np.where(np.logical_and(xdat<self.rSGThresholds[1] ,np.logical_and(xdat>self.rSGThresholds[0] , ~ np.isnan(ydat))))]))
reslinregRsgneg = stats.linregress(
xdat[np.where(np.logical_and( xdat>-self.rSGThresholds[1] ,np.logical_and( xdat<-self.rSGThresholds[0] , ~ np.isnan(ydat))))],
np.multiply(1e-3,ydat[np.where(np.logical_and( xdat>-self.rSGThresholds[1] ,np.logical_and( xdat<-self.rSGThresholds[0] , ~ np.isnan(ydat))))]))
#reslinregRsgpos = stats.linregress(
# bindat[curveIndex,0,np.where(np.logical_and(np.logical_and(bindat[curveIndex,0]>rSGThresholds[0] ,bindat[curveIndex,0]<rSGThresholds[1]), ~ np.isnan(bindat[curveIndex,1])))][0],
# np.multiply(.001,bindat[curveIndex,1,np.where(np.logical_and(np.logical_and(bindat[curveIndex,0]>rSGThresholds[0] ,bindat[curveIndex,0]<rSGThresholds[1]) , ~ np.isnan(bindat[curveIndex,1])))][0]))
#reslinregRsgneg = stats.linregress(ø
# bindat[curveIndex,0,np.where(np.logical_and(np.logical_and(bindat[curvºeIndex,0]<-rSGThresholds[0] ,bindat[curveIndex,0]>-rSGThresholds[1]), ~ np.isnan(bindat[curveIndex,1])))][0],
# np.multiply(.001,bindat[curveIndex,1,np.where(np.logical_and(np.logical_and(bindat[curveIndex,0]<-rSGThresholds[0] ,bindat[curveIndex,0]>-rSGThresholds[1]) , ~ np.isnan(bindat[curveIndex,1])))][0]))
return reslinregRsgneg,reslinregRsgpos
def plot_Rsg_raw_IV_fit(self):
'''This function plots the fit of the normal resistance on the sorted raw IV data
'''
plt.plot(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[1],label='Raw IV Data')
plt.plot(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[0]*1e3*self.rSG_LinReg[0][0]+self.rSG_LinReg[0][1]*1e3,label='Fit on negative Slope')
plt.plot(self.offsetCorrectedSortedIVData[0],self.offsetCorrectedSortedIVData[0]*1e3*self.rSG_LinReg[1][0]+self.rSG_LinReg[1][1]*1e3,label='Fit on positive Slope')
def rN_calc(self):
'''The normal resistance obtained from the normal resistance slopes'''
reslinregRnneg, reslinregRnpos = self.rN_LinReg
return np.mean(np.reciprocal([reslinregRnneg[0],reslinregRnpos[0]]))
def rNsigma_calc(self):
'''The error of the normal resistance obtained from the normal resistance slopes'''
reslinregRnneg, reslinregRnpos = self.rN_LinReg
return np.sqrt(np.std(np.reciprocal([reslinregRnneg[0],reslinregRnpos[0]]))**2+np.square(reslinregRnneg[-1]*np.reciprocal(np.square(reslinregRnneg[0])))+np.square(reslinregRnpos[-1]*np.reciprocal(np.square(reslinregRnpos[0]))))
def rSG_calc(self):
'''The subgap resistance obtained from the subgap resistance slopes'''
reslinregRsgneg,reslinregRsgpos = self.rSG_LinReg
return np.mean(np.reciprocal([reslinregRsgneg[0],reslinregRsgpos[0]]))
def rSGsigma_calc(self):
'''The error of the subgap resistance obtained from the subgap resistance slopes'''
reslinregRsgneg,reslinregRsgpos = self.rSG_LinReg
return np.sqrt(np.std(np.reciprocal([reslinregRsgneg[0],reslinregRsgpos[0]]))**2+np.square(reslinregRsgpos[-1]*np.reciprocal(np.square(reslinregRsgpos[0])))+np.square(reslinregRsgneg[-1]*np.reciprocal(np.square(reslinregRsgneg[0]))))
def rSGrN_calc(self):
'''The Rsg/Rn value'''
return np.divide(self.rSG,self.rN)
def rSGrNsigma_calc(self):
'''The Rsg/Rn value'''
return np.sqrt((self.rSGsigma/self.rN)**2+(self.rSG*self.rNsigma/self.rN**2)**2)
def binedDataExpansion(self,limit):
'''This function expands the voltage range of the bined data to a given limit using the normal resistanc linear regression data.
inputs
------
limit: float
The maximum limit (in postivie and negative direction) which need to be included in the output array.
'''
voltageRange = expandFuncWhile(self.binedIVData[0],limit)
#Fit normal resistance to all voltages
currents = (np.hstack([self.rN_LinReg[0][0]*voltageRange[np.where(voltageRange<=0.)]*1e3+self.rN_LinReg[0][1]*1e3,
self.rN_LinReg[1][0]*voltageRange[np.where(voltageRange>0.)]*1e3+self.rN_LinReg[1][1]*1e3]))
#change the known data point to the value they should be
currents[np.where(np.in1d(voltageRange,self.binedIVData[0]))[0]]=self.binedIVData[1]
return np.vstack([voltageRange,currents])
def plot_binedDataExpansion(self,limit):
'''This function plots the function binedDataExpansion, which expands the voltage range of the bined data to a given limit using the normal resistanc linear regression data.
inputs
------
limit: float
The maximum limit (in postivie and negative direction) which need to be included in the output array.
'''
iv = self.binedDataExpansion(limit)
plt.plot(iv[0],iv[1])
def iKK_Calc(self,ivData):
'''This function computes the Kramers Kronig Transformation current using scipy.signal.hilbert
inwputs
------
ivData: 2d array
The IV curve which is transformed.
returns
-------
2d array
[0] The bias voltage.
[1] The Kramers Kronig Transformed Current.
'''
return np.array([ivData[0],-hilbert(ivData[1]-ivData[0]*1e3/self.rN).imag])
def iKKExpansion(self,limit):
'''This function computes the Kramers Kronig Transformation current using scipy.signal.hilbert
inputs
------
limit: float
The maximum limit (in postivie and negative direction) which need to be included in the output array.
returns
-------
2d array of size of binedDataExpansion
Kramers Kronig Transformed Currents
'''
ivData =self.binedDataExpansion(limit)
return self.iKK_Calc(ivData)
#return np.array([ivData[0],-hilbert(ivData[1]).imag]) # Does not change the embedding Impedance
def plot_simulated_IV_and_KramersKronig(self):
'''This function plots the simulated IV curve and the corresponding Kramers Kronig transformation.
'''
plot(self.simulatedIV,'IV')
plot(self.simulated_iKK,'KK')
def chalmers_Fit_calc(self):
'''This function returns data points obtained from fitting the IV curve equation of Rashid et al. 2016 to the offset corrected raw data.
returns
-------
np 2d array
[0] The bias voltages in the range from self.vmin to self.vmax.
[1] The current through the SIS junction at each bias voltage.
'''
def cost_Chalmers(params,iVMeasured,dummy):
'''This cost function is minimised to obtain the best fit of the IV data.
inputs
------
params: list
[0] Empirical Parameter 'a' introduced by Rashid. It corresponds with the transission width at the gap voltage.
[1] The Gap Voltage.
[2] The Normal Resistance Rn.
[3] The Subgap Resistance Rsg.
iVMeasured: 2d array
The measured IV data.
Dummy: any
Dummy variable to overgive args to fmin.
returns
-------
float
The remaining difference between the measured and simulated data.
'''
sim = iV_Chalmers(iVMeasured[0],params[0],params[1],params[2],params[3])
return np.sum(np.abs(np.subtract(sim,iVMeasured[1])))
#Fit the Chalmers curve to the sorted raw IV data.
guess =[30,self.gapVoltage,self.rN,self.rSG]
fit = fmin(cost_Chalmers,guess,args=(self.offsetCorrectedSortedIVData,1),ftol=1e-12,xtol=1e-10)
#recover the best fitting curve.
vrange= np.arange(self.simulationVmin,self.simulationVmax,self.simulationVoltageSteps)
self.chalmers_Fit_Parameter=fit
self.chalmers_Fit = iV_Chalmers(vrange,fit[0],fit[1],fit[2],fit[3])
return self.chalmers_Fit
def plot_Chalmers_Fit(self):
'''This function plots the Chalmers Fit along with the raw data.
'''
plot(self.chalmers_Fit,label='Fit')
plot(self.offsetCorrectedSortedIVData,label='Measurement')
def convolution_most_parameters_Fit_Calc(self):
'''This function computes data points obtained from fitting the raw IV data to a perfect IV curve which accounts also for subgap resistance.
TODO Note that there is a remaining offset in the normal resistance region.
Since the computation is computational intensive (takes several seconds), the result is written into:
attributes
----------
convolution_Fit: 2d array
The simulated IV curve data.
convolution_most_parameters_Fit_Parameter: object of :minimize:
The output of the minimisation solver. The fit parameters are associated with attribute :x:
[0] The gap voltage
[1] The excess critical current at the transition.
[2] The critical current.
[3] The standard deviation of the gaussian used in the convolution.
[4] The subgap leakage resistance
[5] The offset of the subgap leakage.
returns
-------
2d array
The simulated IV data.
'''
def cost_Subgap(params,iVMeasured,rN):
'''The cost function to minimize the difference between simulated curve and the measured data.
inputs
------
params: list
The values which are free to be optimised.
[0] The gap voltage
[1] The excess critical current at the transition.
[2] The critical current.
[3] The standard deviation of the gaussian used in the convolution.
[4] The subgap leakage resistance
[5] The offset of the subgap leakage.
iVMeasured: 2d array
The measured IV data.
rN: float
The normal resistance of the junction.
returns
-------
float
The value of the sum of the absolute remaining differences.
'''
vGap=params[0]
excessCriticalCurrent = params[1]
critcalCurrent= params[2]
sigmaGaussian= params[3]
subgapLeakage= params[4]
subgapLeakageOffset = params[5]
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(iVMeasured[0],vGap,excessCriticalCurrent=excessCriticalCurrent,criticalCurrent=critcalCurrent,sigmaGaussian =sigmaGaussian,rN=rN,subgapLeakage=subgapLeakage,subgapLeakageOffset=subgapLeakageOffset)
return np.sum(np.abs(np.subtract(sim[1],iVMeasured[1])))
guess =[self.gapVoltage,1000,self.criticalCurrent,self.simulation_Sigma_Gaussian_Convolution_Guess,self.rSG,10]
bounds=np.full([len(guess),2],None)
bounds[4,0]=0 # Limit the subgap leakage current offset to only positive values
#Here We are
fit = fmin(cost_Subgap,guess,args=(self.savgolIV,self.rN),ftol=1e-12,xtol=1e-10,maxiter=1000)
# 2019/12/27
# fit = minimize(cost_Subgap,guess,args=(self.offsetCorrectedSortedIVData,self.rN),method='Nelder-Mead',options={'maxiter':3000, 'maxfev':3000})
#fit = minimize(cost_Subgap,guess,args=(self.rawIVDataOffsetCorrected,self.rN),method='Newton-CG')#'CG')#'Powell')
self.convolution_most_parameters_Fit_Parameter=fit
#recover the best fitting curve.
vrange= np.arange(self.vmin,self.vmax,self.simulationVoltageSteps)
# 2019/12/27
# self.convolution_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,fit.x[0],fit.x[1],fit.x[2],fit.x[3],self.rN,fit.x[4],fit.x[5])
self.convolution_most_parameters_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,fit[0],fit[1],fit[2],fit[3],self.rN,fit[4],fit[5])
return self.convolution_most_parameters_Fit
def convolution_most_parameters_Fit_fixed_Vgap_Calc(self):
'''This function computes data points obtained from fitting the raw IV data to a perfect IV curve which accounts also for subgap resistance.
TODO Note that there is a remaining offset in the normal resistance region.
Since the computation is computational intensive (takes several seconds), the result is written into:
attributes
----------
convolution_Fit: 2d array
The simulated IV curve data.
convolution_most_parameters_Fit_Parameter: object of :minimize:
The output of the minimisation solver. The fit parameters are associated with attribute :x:
[0] The gap voltage
[1] The excess critical current at the transition.
[2] The critical current.
[3] The standard deviation of the gaussian used in the convolution.
[4] The subgap leakage resistance
[5] The offset of the subgap leakage.
returns
-------
2d array
The simulated IV data.
'''
def cost_Subgap(params,iVMeasured,rN,vGap):
'''The cost function to minimize the difference between simulated curve and the measured data.
inputs
------
params: list
The values which are free to be optimised.
[0] The gap voltage
[1] The excess critical current at the transition.
[2] The critical current.
[3] The standard deviation of the gaussian used in the convolution.
[4] The subgap leakage resistance
[5] The offset of the subgap leakage.
iVMeasured: 2d array
The measured IV data.
rN: float
The normal resistance of the junction.
returns
-------
float
The value of the sum of the absolute remaining differences.
'''
excessCriticalCurrent = params[1]
critcalCurrent= params[2]
sigmaGaussian= params[3]
subgapLeakage= params[4]
subgapLeakageOffset = params[0]
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(iVMeasured[0],vGap,excessCriticalCurrent=excessCriticalCurrent,criticalCurrent=critcalCurrent,sigmaGaussian=sigmaGaussian,rN=rN,subgapLeakage=subgapLeakage,subgapLeakageOffset=subgapLeakageOffset)
return np.sum(np.abs(np.subtract(sim[1],iVMeasured[1])))
guess =[10,1000,self.criticalCurrent,self.simulation_Sigma_Gaussian_Convolution_Guess,self.rSG]
bounds=np.full([len(guess),2],None)
bounds[4,0]=0 # Limit the subgap leakage current offset to only positive values
#Here We are
fit = fmin(cost_Subgap,guess,args=(self.savgolIV,self.rN,self.gapVoltage),ftol=1e-12,xtol=1e-10,maxiter=1000)
# 2019/12/27
# fit = minimize(cost_Subgap,guess,args=(self.offsetCorrectedSortedIVData,self.rN),method='Nelder-Mead',options={'maxiter':3000, 'maxfev':3000})
#fit = minimize(cost_Subgap,guess,args=(self.rawIVDataOffsetCorrected,self.rN),method='Newton-CG')#'CG')#'Powell')
self.convolution_most_parameters_Fit_fixed_Vgap_Parameter=fit
#recover the best fitting curve.
vrange= np.arange(self.vmin,self.vmax,self.simulationVoltageSteps)
# 2019/12/27
# self.convolution_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,fit.x[0],fit.x[1],fit.x[2],fit.x[3],self.rN,fit.x[4],fit.x[5])
self.convolution_most_parameters_Fit_fixed_Vgap_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,self.gapVoltage,excessCriticalCurrent=fit[1],criticalCurrent=fit[2],sigmaGaussian=fit[3],rN=self.rN,subgapLeakage=fit[4],subgapLeakageOffset=fit[0])
return self.convolution_most_parameters_Fit_fixed_Vgap_Fit
def convolution_most_parameters_stepwise_Fit_Calc(self):
'''This function computes data points obtained from fitting the raw IV data to a perfect IV curve which accounts also for subgap resistance.
TODO Note that there is a remaining offset in the normal resistance region.
Since the computation is computational intensive (takes several seconds), the result is written into:
attributes
----------
convolution_Fit: 2d array
The simulated IV curve data.
convolution_most_parameters_Fit_Parameter: object of :minimize:
The output of the minimisation solver. The fit parameters are associated with attribute :x:
[0] The gap voltage
[1] The excess critical current at the transition.
[2] The critical current.
[3] The standard deviation of the gaussian used in the convolution.
[4] The subgap leakage resistance
[5] The offset of the subgap leakage.
returns
-------
2d array
The simulated IV data.
'''
def cost_Subgap(params,iVMeasured,vGap,excessCriticalCurrent,criticalCurrent,sigmaGaussian,
rN,rSGThresholds):
'''TODO
'''
subgapLeakage= params[0] #TODO what happens if I use the Rsg value here
subgapLeakageOffset = params[1]
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(iVMeasured[0],vGap,excessCriticalCurrent=excessCriticalCurrent,
criticalCurrent=criticalCurrent,sigmaGaussian=sigmaGaussian,rN=rN,subgapLeakage=subgapLeakage,subgapLeakageOffset=subgapLeakageOffset)
return np.sum(np.abs(np.subtract(sim[1,np.logical_and(abs(sim[0])>rSGThresholds[0],abs(sim[0])<rSGThresholds[1])],iVMeasured[1,np.logical_and(abs(iVMeasured[0])>rSGThresholds[0],abs(iVMeasured[0])<rSGThresholds[1])])))
#2020/01/17
#return np.sum(np.abs(np.subtract(sim[1,np.logical_and(abs(iVMeasured[0])>rSGThresholds[0],abs(iVMeasured[0])<rSGThresholds[1])],iVMeasured[1,np.logical_and(abs(iVMeasured[0])>rSGThresholds[0],abs(iVMeasured[0])<rSGThresholds[1])])))
def cost_Normal(params,iVMeasured,vGap,excessCriticalCurrent,sigmaGaussian,
rN,subgapLeakage,subgapLeakageOffset,rNThresholds):
'''TODO
'''
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(iVMeasured[0],vGap,excessCriticalCurrent=excessCriticalCurrent,
criticalCurrent=params[0],sigmaGaussian=sigmaGaussian,rN=rN,
subgapLeakage=subgapLeakage,subgapLeakageOffset=subgapLeakageOffset)
return np.sum(np.abs(np.subtract(sim[1,np.logical_and(abs(sim[0])>rNThresholds[0],abs(sim[0])<rNThresholds[1])],iVMeasured[1,np.logical_and(abs(iVMeasured[0])>rNThresholds[0],abs(iVMeasured[0])<rNThresholds[1])])))
#2020/01/17
#return np.sum(np.abs(np.subtract(sim[1,np.logical_and(abs(iVMeasured[0])>rNThresholds[0],abs(iVMeasured[0])<rNThresholds[1])],iVMeasured[1,np.logical_and(abs(iVMeasured[0])>rNThresholds[0],abs(iVMeasured[0])<rNThresholds[1])])))
def cost_Transission(params,iVMeasured,criticalCurrent,
rN,subgapLeakage,subgapLeakageOffset,vGapSearchRange):
'''TODO
'''
vGap = params[0]
excessCriticalCurrent = params[1]
sigmaGaussian = params[2]
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(iVMeasured[0],vGap,excessCriticalCurrent=excessCriticalCurrent,
criticalCurrent=criticalCurrent,sigmaGaussian=sigmaGaussian,rN=rN,
subgapLeakage=subgapLeakage,subgapLeakageOffset=subgapLeakageOffset)
return np.sum(np.abs(np.subtract(sim[1,np.logical_and(abs(sim[0])>vGapSearchRange[0],abs(sim[0])<vGapSearchRange[1])],iVMeasured[1,np.logical_and(abs(iVMeasured[0])>vGapSearchRange[0],abs(iVMeasured[0])<vGapSearchRange[1])])))
#2020/01/17
#return np.sum(np.abs(np.subtract(sim[1,np.logical_and(abs(iVMeasured[0])>vGapSearchRange[0],abs(iVMeasured[0])<vGapSearchRange[1])],iVMeasured[1,np.logical_and(abs(iVMeasured[0])>vGapSearchRange[0],abs(iVMeasured[0])<vGapSearchRange[1])])))
guess =[self.criticalCurrent]
fit = fmin(cost_Normal,guess,args=(self.savgolIV,self.gapVoltage,self.criticalCurrent,self.simulation_Sigma_Gaussian_Convolution_Guess,
self.rN,500,10,self.rNThresholds),ftol=1e-12,xtol=1e-10)
criticalCurrentFit = fit[0]
guess =[self.rSG,10]
fit = fmin(cost_Subgap,guess,args=(self.savgolIV,self.gapVoltage,self.criticalCurrent,criticalCurrentFit,
self.simulation_Sigma_Gaussian_Convolution_Guess,
self.rN,self.rSGThresholds),ftol=1e-12,xtol=1e-10)
subgapLeakageFit = fit[0]
subgapLeakageOffsetFit = fit[1]
guess =[self.gapVoltage,self.criticalCurrent,self.simulation_Sigma_Gaussian_Convolution_Guess]
fit = fmin(cost_Transission,guess,args=(self.savgolIV,criticalCurrentFit,
self.rN,subgapLeakageFit,subgapLeakageOffsetFit,
self.vGapSearchRange),ftol=1e-12,xtol=1e-10)
vGapFit = fit[0]
excessCriticalCurrentFit = fit[1]
sigmaGaussianFit = fit[2]
self.convolution_most_parameters_stepwise_Fit_Parameter=[vGapFit,
excessCriticalCurrentFit,
criticalCurrentFit,
sigmaGaussianFit,
subgapLeakageFit,
subgapLeakageOffsetFit]
#recover the best fitting curve.
vrange= np.arange(self.vmin,self.vmax,self.simulationVoltageSteps)
# 2019/12/27
# self.convolution_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,fit.x[0],fit.x[1],fit.x[2],fit.x[3],self.rN,fit.x[4],fit.x[5])
self.convolution_most_parameters_stepwise_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,vGapFit,excessCriticalCurrent=excessCriticalCurrentFit,
criticalCurrent=criticalCurrentFit,sigmaGaussian=sigmaGaussianFit,rN=self.rN,
subgapLeakage=subgapLeakageFit,subgapLeakageOffset=subgapLeakageOffsetFit)
return self.convolution_most_parameters_stepwise_Fit
def convolution_without_excessCurrent_Fit_Calc(self):
'''This function computes data points obtained from fitting the raw IV data to a perfect IV curve which accounts also for subgap resistance.
The used fit does not add an excess current at the transission.
TODO Note that there is a remaining offset in the normal resistance region.
Since the computation is computational intensive (takes several seconds), the result is written into:
attributes
----------
convolution_Fit: 2d array
The simulated IV curve data.
convolution_most_parameters_Fit_Parameter: object of :minimize:
The output of the minimisation solver. The fit parameters are associated with attribute :x:
[0] The gap voltage
[1] The critical current.
[2] The standard deviation of the gaussian used in the convolution.
[3] The subgap leakage resistance
[4] The offset of the subgap leakage.
returns
-------
2d array
The simulated IV data.
'''
def cost_Subgap(params,iVMeasured,rN):
'''The cost function to minimize the difference between simulated curve and the measured data.
inputs
------
params: list
The values which are free to be optimised.
[0] The gap voltage
[1] The critical current.
[2] The standard deviation of the gaussian used in the convolution.
[3] The subgap leakage resistance
[4] The offset of the subgap leakage.
iVMeasured: 2d array
The measured IV data.
rN: float
The normal resistance of the junction.
returns
-------
float
The value of the sum of the absolute remaining differences.
'''
vGap=params[0]
critcalCurrent= params[1]
sigmaGaussian= params[2]
subgapLeakage= params[3]
subgapLeakageOffset = params[4]
excessCriticalCurrent = critcalCurrent
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(iVMeasured[0],vGap,excessCriticalCurrent=excessCriticalCurrent,criticalCurrent=critcalCurrent,sigmaGaussian =sigmaGaussian,rN=rN,subgapLeakage=subgapLeakage,subgapLeakageOffset=subgapLeakageOffset)
return np.sum(np.abs(np.subtract(sim[1],iVMeasured[1])))
guess =[self.gapVoltage,self.criticalCurrent,self.simulation_Sigma_Gaussian_Convolution_Guess,self.rSG,10]
bounds=np.full([len(guess),2],None)
bounds[4,0]=0 # Limit the subgap leakage current offset to only positive values
fit = fmin(cost_Subgap,guess,args=(self.offsetCorrectedSortedIVData,self.rN),ftol=1e-12,xtol=1e-10)
#2019/12/27
#fit = minimize(cost_Subgap,guess,args=(self.offsetCorrectedSortedIVData,self.rN),method='Nelder-Mead',options={'maxiter':3000, 'maxfev':3000})
#fit = minimize(cost_Subgap,guess,args=(self.rawIVDataOffsetCorrected,self.rN),method='Newton-CG')#'CG')#'Powell')
self.convolution_without_excessCurrent_Fit_Parameter=fit
#recover the best fitting curve.
vrange= np.arange(self.vmin,self.vmax,self.simulationVoltageSteps)
# 2019/12/27
#self.convolution_without_excessCurrent_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,fit.x[0],fit.x[1],fit.x[1],fit.x[2],self.rN,fit.x[3],fit.x[4])
self.convolution_without_excessCurrent_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange,fit[0],fit[1],fit[1],fit[2],self.rN,fit[3],fit[4])
return self.convolution_without_excessCurrent_Fit
def plot_convolution_most_parameters_Fit(self):
'''This function plots the convolved perfect IV curve fit along with the raw data.
'''
plot(self.convolution_Fit)
plot(self.offsetCorrectedSortedIVData)
def convolution_perfect_IV_curve_Fit_calc(self):
'''This function fits the perfect IV curve, convolved with a gaussian to the raw data.
The subgap current is mostely 0 in this fit
returns
-------
np 2d array
[0] The bias voltages in the range from self.vmin to self.vmax.
[1] The current through the SIS junction at each bias voltage.
'''
def cost_Perfect(params,iVMeasured,rN):
'''This cost function is minimised to obtain the best fit of the IV data.
inputs
------
params: list
[0] The Gap Voltage.
[1] The critical current
[2] The standard deviation of the gaussian used in the convolution
iVMeasured: 2d array
The measured IV data.
rN: float
The normal resistance of the junction.
returns
-------
float
The remaining difference between the measured and simulated data.
'''
sim = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange=iVMeasured[0],vGap =params[0],excessCriticalCurrent=0,
criticalCurrent=params[1],sigmaGaussian = params[2],rN=rN,subgapLeakage=np.inf,subgapLeakageOffset=0)
return np.sum(np.abs(np.subtract(sim[1],iVMeasured[1])))
guess =[self.gapVoltage,self.criticalCurrent,self.simulation_Sigma_Gaussian_Convolution_Guess]
#bounds=[(self.vGapSearchRange[0],self.vGapSearchRange[1]),(self.criticalCurrent-100,self.criticalCurrent+100),(0,.2)]
#fit = minimize(cost_Perfect,guess,bounds=bounds,args=(self.rawIVDataOffsetCorrected,self.rN))
fit = fmin(cost_Perfect,guess,args=(self.offsetCorrectedSortedIVData,self.rN),ftol=1e-12,xtol=1e-10)
#recover the best fitting curve.
vrange= np.arange(self.simulationVmin,self.simulationVmax,self.simulationVoltageSteps)
self.convolution_perfect_IV_curve_Fit_Parameter=fit
# return iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange=vrange,vGap =fit.x[0],excessCriticalCurrent=0,
# criticalCurrent=fit.x[1],sigmaGaussian = fit.x[2],rN=self.rN,subgapLeakage=np.inf,subgapLeakageOffset=0)
self.convolution_perfect_IV_curve_Fit = iV_Curve_Gaussian_Convolution_with_SubgapResistance(vrange=vrange,vGap =fit[0],excessCriticalCurrent=0,
criticalCurrent=fit[1],sigmaGaussian = fit[2],rN=self.rN,subgapLeakage=np.inf,subgapLeakageOffset=0)
return self.convolution_perfect_IV_curve_Fit
def plot_convolution_perfect_IV_curve_Fit(self):
'''This function plots the convolved perfect IV curve fit along with the raw data.
'''
plot(self.convolution_perfect_IV_curve_Fit)
plot(self.offsetCorrectedSortedIVData)
def set_simulatedIV(self,iVFit):
'''This function set the attribute of the simulated IV curve which is used in further simulation and calculations.
The idea of this function is to be able to easily switch between different fit models.
inputs
------
iVFit: 2d array
The simulated IV data which is set to be used for further simulations and calculations.
attributes
----------
simulatedIV: 2d array
The simulated IV data which should be used to do further computation with simulated data.
'''
print('Set Simulated IV curve.')
self.simulatedIV = iVFit
|
# -*- coding: utf-8 -*-
"""
Created on 2018/2/4 10:24
statsmodels模块示例
在统计学中,普通最小二乘法(OLS)用于估计线性回归模型参数的一个常用方法
目标是选择参数使得观测值和模型的预测值之间的差值的平方之和最小
@author: wangdongsong1229@163.com
"""
import numpy as np
import statsmodels.api as sm
y = [1, 2, 3, 4, 2, 3, 4]
x = range(1, 8)
x = sm.add_constant(x)
result = sm.OLS(y, x).fit()
print((result.params))
|
def numberguessgame():
import random
num = random.randint(1,10)
print('I have chosen a number from 1 to 10. Please guess it. (5 Guesses)')
i = 1
gl = 5
for i in range(1,6):
guess = input()
guess = int(guess)
gl = 5 - i
if guess < 0 or guess >10:
print('You chose invalid number. Try again.')
print('Guesses left : ' + str(gl))
continue
if guess == num:
print('You guessed right. You Win.')
break
elif guess != num:
print('Wrong. Try again.')
print('Guesses left : ' + str(gl))
if gl == 0:
print('You lose. Chosen Number was: ' + str(guess))
i = i + 1
numberguessgame() |
from django.core.management.base import BaseCommand, CommandError
from django.shortcuts import get_object_or_404
from pdfgenerator.models import Queue, Converted_Pdf
from pdfgenerator.helpers import convert_doc_to_pdf
class Command(BaseCommand):
help = "Converts file to pdf"
def handle(self, *args, **options):
while True:
queued_items = Queue.objects.filter(completed=False).order_by(
"source__created"
)
for item in queued_items:
filename = item.source.file.name
converted_pdf = convert_doc_to_pdf(filename)
item.completed = True
item.save()
Converted_Pdf.objects.create(file=converted_pdf)
item.delete()
self.stdout.write(self.style.SUCCESS(f"Converted {filename}"))
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import MapCompose, TakeFirst
class TopligaItem(scrapy.Item):
# define the fields for your item here like:
_id = scrapy.Field()
goods_link = scrapy.Field()
goods_name = scrapy.Field()
goods_price = scrapy.Field()
goods_status = scrapy.Field()
goods_art = scrapy.Field()
images = scrapy.Field()
goods_type = scrapy.Field()
|
import pandas as pd
import numpy as np
from foodkm import config
import os
def update_column_names(df, COL_RENAME_DICT):
df_ = df[list(COL_RENAME_DICT.keys()) + ['ingredients'] + config.PRICES + config.LOCATION_COL].copy()
return df_.rename(columns=COL_RENAME_DICT)
def clean_and_rename(df):
# Drop non necesary columns
# df_selection = df.drop(config.COL_DROPS, axis=1)
# Manage ingredients into a new data structure and select only unique
df_ingred = df[config.COL_INGREDIENTS].copy()
# criteria cleaning regex ingredients
# df_selection = df_selection.drop(config.COL_INGREDIENTS, axis=1)
df_ingred['ingredients'] = df_ingred[config.COL_INGREDIENTS[0]] + '. ' + df_ingred[
config.COL_INGREDIENTS[1]] + '. ' + \
df_ingred[config.COL_INGREDIENTS[2]] + '. ' + df_ingred[config.COL_INGREDIENTS[3]]
df.loc[:, 'ingredients'] = df_ingred['ingredients'].str.replace('"', '').str.replace("'", '').str.replace('[',
'').str.replace(
']', '') \
.str.replace(':', '').str.replace('\.\.', '\.').str.replace('^\. ', '').str.replace('^ ', '') \
.str.replace('\\', '').str.lower().tolist()
# Rename columns
return update_column_names(df, config.COL_RENAME_DICT)
def extract_prices_info(df):
# Get price netto and quantity as different columns
df['price'] = pd.to_numeric(df['price'].str.replace(',', '.'), errors='coerce')
w_p_null = df['price'].isnull()
w_pn_null = df['price_norm'].isnull()
df.loc[w_p_null & ~w_pn_null, 'price_norm'] = df.loc[w_p_null & ~w_pn_null, 'price']
df['price_netto'] = df['price_norm'].str.split(':').str[1]
df['product_price_netto'] = \
df['price_netto'].str.split(' ').str[1].str.replace(',', '.').astype(float)
df['product_quantity_netto'] = \
df['price_norm'].str.split(':').str[0].replace('unknown', np.nan)
df.drop(['price_norm', 'price_netto'], axis=1)
return df
def get_paths(source):
basename, ext = os.path.splitext(source)
source_path = f"data/product_geo/{source}"
dest_path = f"data/product_complete/{basename}.csv"
return source_path, dest_path
def get_all_product_id_files():
for source in os.listdir('data/product_info'):
if source != 'placeholder.txt':
source, dest = get_paths(source)
if not os.path.isfile(dest):
yield source, dest
def main():
for source, dest in get_all_product_id_files():
df = pd.read_csv(source)
# clean product info
df_clean = clean_and_rename(df)
# format net prices
df_complete = extract_prices_info(df_clean)
# Save final csv
df_complete.to_csv(dest, index=False, encoding='utf-8')
if __name__ == "__main__":
main()
|
from sympy.plotting import plot
import sympy as sym
import xlsxwriter
from xlrd import open_workbook
from sympy.plotting import plot3d
print("Q1..................................")
x = sym.Symbol('x')
y, i ,n, a, b, z = sym.symbols('y i n a b z')
expr=x**2+x**3+21*x**4+10*x+1
print (expr.subs(x, 7))
#................................................
print( sym.expand(x+y)**2)
#................................................
print(sym.simplify(4*x**3+21*x**2+10*x+12))
#................................................
print(sym.limit(1/(x**2),x,sym.oo))
#................................................
print(sym.summation(2*i+i-1,(i,5,n)))
#...............................................
print(sym.integrate(sym.sin(x)+sym.exp(x)*sym.cos(x)+sym.tan(x),x))
#.................................................
print(sym.factor(x**3 + 12*x*y*z+3*y**2*z))
#................................................
print(sym.solveset(x-4,x))
#................................................
m1 = sym.Matrix([[5, 12 , 40], [30, 70, 2]])
m2 = sym.Matrix([2,1,0])
print(m1*m2)
#................................................
plot(x**3+3 ,(x,-10,10))
#...............................................
f=x**2*y**3
plot3d(f,(x,-6,6),(y,-6,6))
print("Q2........................................")
workbook = xlsxwriter.Workbook('test1.xlsx')
worksheet=workbook.add_worksheet()
worksheet.autofilter('A1:A5')
data=["This is Example","My first export example",1,2,3]
format1=workbook.add_format({'bold':True, 'font_color':'red'})
format2=workbook.add_format({'font_color':'black'})
worksheet.set_column('A:A', 20)
worksheet.write('A1', data[0],format1)
worksheet.write('A2', data[1], format2)
for x in range(2):
worksheet.write(x+2, 0, x+1)
worksheet.write('A5', data[4],format1)
workbook.close()
print("Q3...........................................")
wb=open_workbook('test2.xlsx')
for s in wb.sheets():
print('sheet:',s.name)
for row in range(s.nrows):
values=[]
for col in range(s.ncols):
values.append(s.cell(row,col).value)
print(''.join(values))
|
import numpy as np
import matplotlib.pyplot as plt
DIR_ = ['AL_results/projection_bad/projection_bad', 'AL_results/projection_good/projection_good',
'AL_results/p_projection_bad/p_projection_bad', 'AL_results/p_projection_good/p_projection_good']
LABEL = ['AL-PFP with bad demo', 'AL-PFP with bad demo','SAL with bad demo', 'SAL with good demo']
fig2 = plt.figure(figsize=[10,5])
ax2 = fig2.add_subplot(111)
ax2.set_xlabel('iterations')
ax2.set_ylabel('average tracking error')
for index, DIR in enumerate(DIR_):
REPEATS = 10
ITER = 20
score = np.zeros(shape=[REPEATS, 3, ITER])
for i in range(REPEATS):
score[i, :, :] = np.genfromtxt(DIR + str(i) + '/score_file.csv', delimiter=',')[3:]
score_aver = np.average(score, axis=0)
score_error = np.std(score, axis=0)
_, caps, bars = ax2.errorbar(np.arange(0, ITER), score_aver[2], capsize=3, yerr=score_error[0], label=LABEL[index])
#_, caps, bars = ax2.errorbar(np.arange(0, ITER), score_aver[0], color='g', capsize=3, yerr=score_error[0], label='RL in SAL')
# [bar.set_alpha(0.3) for bar in bars]
# [cap.set_alpha(0.3) for cap in caps]
# average tracking error for the best PID: -114.152
# average tracking error for the bad PID: -283.5675
# average tracking error for the RL: -193.727272
ax2.plot(np.ones_like(score_aver[0])*(114.152), '--', color='r', label='good demo', alpha=0.8)
ax2.plot(np.ones_like(score_aver[0])*(283.5675), '--', color='c', label='bad demo', alpha=0.8)
RL_score = np.genfromtxt('RL_results/RL_score.csv', delimiter=',')
ax2.plot(RL_score[1], '--', color='m', label='RL with handcrafted rewards', alpha=0.8)
ax2.set_xticks(np.arange(0, 20))
ax2.grid(linestyle=':')
fig2.legend(bbox_to_anchor=(0.9, 0.89))
plt.show()
|
import cv2
import imutils
from imutils.video import FPS
import argparse
import serial
import time
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--tracker", type=str, default="mosse",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
center_point = {
'x': 0,
'y': 0
}
vertical = 0
initBoundaries = None
fps = None
cap = cv2.VideoCapture(1)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def send_data(vertical, x):
if x < vertical:
return 1
elif x > vertical:
return -1
else:
return 0
ard = serial.Serial('COM4',115200,timeout=0.05)
time.sleep(2) # wait for Arduino
while(True):
ret, frame = cap.read()
if frame is None:
break
length=600
frame = imutils.resize(frame, length)
(H, W) = frame.shape[:2]
cv2.line(frame,(int(length/2),0),(int(length/2),int(length)),(0,255,0),1)
#cv2.line(frame,(int(width/2),0),(int(width/2),int(height)),(0,255,0),1)
#cv2.circle(frame,(int(width/2.5),int(height/2.5)), 2, (0,0,255), -1)
ard.flush()
vertical = int(length/2)
# print('ret: ' + str(send_data(vertical, center_point['x'])))
# print('v: ' + str(vertical))
# print(str(center_point['x']) + ' ' + str(center_point['y']))
if initBoundaries is not None:
(success, box) = tracker.update(frame)
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 235, 25), 1)
cv2.circle(frame,(int(float(x + (x + w))/2),int(float(y + (y + h))/2)), 5, (0,0,255), -1)
center_point['x'] = int(float(x + (x + w))/2);
center_point['y'] = int(float(y + (y + h))/2);
ard.write(str.encode(str(send_data(vertical, center_point['x']))))
msg = ard.readline()
print("Message from arduino: ")
print (msg.decode('utf-8'))
fps.update()
fps.stop()
info = [
("FPS", "{:.2f}".format(fps.fps())),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 245, 0), 2)
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('i'):
initBoundaries = cv2.selectROI("frame", frame, fromCenter=False,
showCrosshair=True)
tracker.init(frame, initBoundaries)
fps = FPS().start()
elif key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
# phuong thuc dem mot phan tu trong list xuat hien bn lan
a= [1,2,3,4,5,6]
c = a.count(2)
print(c)
# index dua ra vi tri cua phan tu trong list
d = a.index(3)
print(d)
# copy : sao chep 1 list tuong tu
list_moi = [2,3,4,5]
#clear : xoa moi phan tu
#append: them phan tu
list_them = list_moi.append([5,6])
print(list_moi)
#extend : them tung phan tu
list_open = list_moi.extend(["hello","world"])
print(list_moi)
# insert: them phan tu x vao vi tri y
list_moi.insert(0,"this is a man")# them string "this is a man" vao vi tri dau
print(list_moi)
#pop: bo di phan tu thu i trong list -> tra ve gia tri do
bien_bo = list_moi.pop(1)
print(bien_bo)
#remove : bo di phan tu dau tien trong list co gia tri x
bien_remove = list_moi.remove(3)
print(bien_remove)
print(list_moi)
##phuong thuc reverse() : dao nguoc chuoi
list_moi.reverse()
print(list_moi)
## phuong thuc sort : cac phan tu phai cung kieu du lieu
list_moi.sort()
print(list_moi) |
Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> L = []
>>> type(L)
<class 'list'>
>>> L = ['red', 'green', 12, 45.6, True, False, ['a', 'b', 'c']]
>>>
>>> # Accessability : subscripting
>>>
>>> L[0]
'red'
>>> l[2]
Traceback (most recent call last):
File "<pyshell#7>", line 1, in <module>
l[2]
NameError: name 'l' is not defined
>>> L[2]
12
>>> L[-1]
['a', 'b', 'c']
>>> L[-1][1]
'b'
>>> L[2:5]
[12, 45.6, True]
>>> L[::-1]
[['a', 'b', 'c'], False, True, 45.6, 12, 'green', 'red']
>>>
>>>
>>> # Mutability aspect of lists
>>>
>>> L = ['red', 'green', 'blue']
>>> L[0]
'red'
>>> L[0] = 'orange'
>>> L
['orange', 'green', 'blue']
>>>
>>> L[0]
'orange'
>>> L[0][0]
'o'
>>> L[0][0] = 'R'
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
L[0][0] = 'R'
TypeError: 'str' object does not support item assignment
>>>
>>>
>>>
>>> L
['orange', 'green', 'blue']
>>> L1 = ['white', 'black']
>>> L[1]
'green'
>>> L[1] = L1
>>> L
['orange', ['white', 'black'], 'blue']
>>> L = ['red', 'green', 'blue']
>>> L[1]
'green'
>>> L[1:2]
['green']
>>> L[1:2] = L1
>>> L
['red', 'white', 'black', 'blue']
>>>
>>>
>>> # Operators that work on lists
>>>
>>> L
['red', 'white', 'black', 'blue']
>>> L2 = ['green', 'grey']
>>> L + L2
['red', 'white', 'black', 'blue', 'green', 'grey']
>>> L * 3
['red', 'white', 'black', 'blue', 'red', 'white', 'black', 'blue', 'red', 'white', 'black', 'blue']
>>> 'orange' in L
False
>>> len(L + L1)
6
>>> L
['red', 'white', 'black', 'blue']
>>> del L[-1]
>>> del
SyntaxError: invalid syntax
>>> L
['red', 'white', 'black']
>>> del
SyntaxError: invalid syntax
>>> del L
>>> L
Traceback (most recent call last):
File "<pyshell#54>", line 1, in <module>
L
NameError: name 'L' is not defined
>>>
>>>
>>> # ----------------------------- Operations on lists
>>>
>>> L
Traceback (most recent call last):
File "<pyshell#59>", line 1, in <module>
L
NameError: name 'L' is not defined
>>> L = ['red', 'green', 'blue']
>>>
>>>
>>> # Add elements to the list
>>>
>>> L.append('yellow')
>>> L
['red', 'green', 'blue', 'yellow']
>>> L.append('orange')
>>> L
['red', 'green', 'blue', 'yellow', 'orange']
>>> ['red', 'green', 'blue', 'yellow', 'orange']
['red', 'green', 'blue', 'yellow', 'orange']
>>> L.insert(2, 'pink')
>>> L
['red', 'green', 'pink', 'blue', 'yellow', 'orange']
>>> L1 = ['white', 'grey', 'black']
>>> L.extend(L1)
>>> L
['red', 'green', 'pink', 'blue', 'yellow', 'orange', 'white', 'grey', 'black']
>>>
>>>
>>> # Remove elements
>>>
>>> # del L[2]
>>>
>>> L.pop()
'black'
>>> L
['red', 'green', 'pink', 'blue', 'yellow', 'orange', 'white', 'grey']
>>> L.pop()
'grey'
>>> L.pop(2)
'pink'
>>> L.remove('blue')
>>> L
['red', 'green', 'yellow', 'orange', 'white']
>>>
>>>
>>> # Search
>>>
>>> L.index('grey')
Traceback (most recent call last):
File "<pyshell#91>", line 1, in <module>
L.index('grey')
ValueError: 'grey' is not in list
>>> L.find('grey')
Traceback (most recent call last):
File "<pyshell#92>", line 1, in <module>
L.find('grey')
AttributeError: 'list' object has no attribute 'find'
>>> L.count('red')
1
>>> L.append('red')
>>> L.count('red')
2
>>> L
['red', 'green', 'yellow', 'orange', 'white', 'red']
>>> L.index('green')
1
>>> L.index('red')
0
>>>
>>> # Rearranging the values in the list
>>>
>>> L
['red', 'green', 'yellow', 'orange', 'white', 'red']
>>> sorted(L)
['green', 'orange', 'red', 'red', 'white', 'yellow']
>>> L
['red', 'green', 'yellow', 'orange', 'white', 'red']
>>> sorted(L, reverse=True)
['yellow', 'white', 'red', 'red', 'orange', 'green']
>>> L.sort()
>>> L
['green', 'orange', 'red', 'red', 'white', 'yellow']
>>> L.sort(reverse=True)
>>> L
['yellow', 'white', 'red', 'red', 'orange', 'green']
>>>
>>> L
['yellow', 'white', 'red', 'red', 'orange', 'green']
>>> L.reverse()
>>> L
['green', 'orange', 'red', 'red', 'white', 'yellow']
>>> reversed(L)
<list_reverseiterator object at 0x000002A77644F048>
>>> list(reversed(L))
['yellow', 'white', 'red', 'red', 'orange', 'green']
>>>
>>>
>>> # Copying
>>>
>>> L
['green', 'orange', 'red', 'red', 'white', 'yellow']
>>>
>>> L = ['red', 'white', 'yellow']
>>> L1 = L
>>>
>>> L
['red', 'white', 'yellow']
>>> L1
['red', 'white', 'yellow']
>>> L1[1] = 'black'
>>> L1
['red', 'black', 'yellow']
>>> L
['red', 'black', 'yellow']
>>>
>>>
>>> from copy import deepcopy
>>> # import copy ---> copy.deepcopy()
>>>
>>>
>>> L
['red', 'black', 'yellow']
>>> L2 = deepcopy(L)
>>> L
['red', 'black', 'yellow']
>>> L2
['red', 'black', 'yellow']
>>> L[1] = 'white'
>>> L
['red', 'white', 'yellow']
>>> L2
['red', 'black', 'yellow']
>>> L1
['red', 'white', 'yellow']
>>>
>>>
>>> # Iteration
>>>
>>> L
['red', 'white', 'yellow']
>>> for item in L:
print(item.upper())
RED
WHITE
YELLOW
>>>
|
# -*- coding:utf-8 -*-
"""
作者:xiaodingrong
日期:2021年10月21日
"""
get_number = int(input("Please input a number: "))
for column in range(0,get_number):
for rank in range(0,column+1):
print("*",end="")
print("") |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
import collections
import copy
import datetime
import math
import uuid
import six
from neutron_lib import constants
from oslo_log import log as logging
from oslo_utils import timeutils
from neutron.common import constants as n_const
from neutron.common import topics
from neutron.db import agents_db
from neutron.extensions import wrs_net
from neutron.plugins.ml2 import config
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_agent
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.plugins.wrs import test_extension_host
from neutron.tests.unit.plugins.wrs import test_extension_pnet
from neutron.tests.unit.plugins.wrs import test_wrs_plugin
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
LOG = logging.getLogger(__name__)
HOST1 = {'name': 'compute-0',
'id': '065aa1d1-84ed-4d59-a777-16b0ea8a5640',
'availability': n_const.HOST_DOWN}
HOST2 = {'name': 'compute-1',
'id': '28c25767-e6e7-49c3-9735-2ef5ff04c4a2',
'availability': n_const.HOST_DOWN}
HOST3 = {'name': 'compute-2',
'id': 'c947cbd0-f59a-4ab1-b0c6-1e12bd4846ab',
'availability': n_const.HOST_DOWN}
HOST4 = {'name': 'compute-3',
'id': '89bfbe7a-c416-4c32-ae65-bc390fa0a908',
'availability': n_const.HOST_DOWN}
HOST5 = {'name': 'compute-4',
'id': '81c8fbd6-4512-4d83-9b86-c1ab90bbb587',
'availability': n_const.HOST_DOWN}
HOSTS = (HOST1, HOST2, HOST3, HOST4, HOST5)
PNET1 = {'name': 'vlan-pnet0',
'type': n_const.PROVIDERNET_VLAN,
'description': 'vlan test provider network'}
PNET2 = {'name': 'vlan-pnet1',
'type': n_const.PROVIDERNET_VLAN,
'description': 'vlan test provider network'}
PNET3 = {'name': 'flat-pnet0',
'type': n_const.PROVIDERNET_FLAT,
'description': 'flat test provider network'}
# PNET4 should not be bound to a compute node
PNET4 = {'name': 'flat-pnet1',
'type': n_const.PROVIDERNET_FLAT,
'description': 'flat test provider network'}
PNET5 = {'name': 'flat-sriov-pnet1',
'type': n_const.PROVIDERNET_FLAT,
'description': 'flat test provider network for sriov networks'}
PNET6 = {'name': 'flat-pnet2',
'type': n_const.PROVIDERNET_FLAT,
'description': 'flat test provider network'}
PNETS = (PNET1, PNET2, PNET3, PNET4, PNET5, PNET6)
PNET1_RANGE1 = {'name': 'vlan-pnet0-0',
'description': 'vlan range1',
'shared': False,
'minimum': 1,
'maximum': 100,
'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID}
PNET2_RANGE1 = {'name': 'vlan-pnet1-0',
'description': 'vlan range1',
'shared': False,
'minimum': 101,
'maximum': 200,
'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID}
PNET_RANGES = {'vlan-pnet0': [PNET1_RANGE1],
'vlan-pnet1': [PNET2_RANGE1]}
PNET_BINDINGS = {'compute-0': ['vlan-pnet0', 'vlan-pnet1', 'flat-pnet0'],
'compute-1': ['vlan-pnet0', 'vlan-pnet1', 'flat-pnet0'],
'compute-2': ['vlan-pnet0', 'vlan-pnet1'],
'compute-3': ['flat-sriov-pnet1'],
'compute-4': ['flat-pnet2']}
INTERFACE1 = {'uuid': str(uuid.uuid4()),
'mtu': n_const.DEFAULT_MTU,
'vlans': '',
'network_type': 'data',
'providernets': ','.join(PNET_BINDINGS['compute-0'])}
INTERFACE2 = {'uuid': str(uuid.uuid4()),
'mtu': n_const.DEFAULT_MTU,
'vlans': '4001,,4002, 4003',
'network_type': 'data',
'providernets': ','.join(PNET_BINDINGS['compute-1'])}
INTERFACE3 = {'uuid': str(uuid.uuid4()),
'mtu': n_const.DEFAULT_MTU,
'vlans': '4001',
'network_type': 'data',
'providernets': ','.join(PNET_BINDINGS['compute-2'])}
INTERFACE4 = {'uuid': str(uuid.uuid4()),
'mtu': n_const.DEFAULT_MTU,
'vlans': '4001',
'network_type': 'pci-sriov',
'providernets': ','.join(PNET_BINDINGS['compute-3'])}
INTERFACE5 = {'uuid': str(uuid.uuid4()),
'mtu': n_const.DEFAULT_MTU,
'vlans': '4001',
'network_type': 'data',
'providernets': ','.join(PNET_BINDINGS['compute-4'])}
INTERFACES = {'compute-0': INTERFACE1,
'compute-1': INTERFACE2,
'compute-2': INTERFACE3,
'compute-3': INTERFACE4,
'compute-4': INTERFACE5}
NET1 = {'name': 'tenant-net0',
'provider__physical_network': 'vlan-pnet0',
'provider__network_type': n_const.PROVIDERNET_VLAN}
NET2 = {'name': 'tenant-net1',
'provider__physical_network': 'vlan-pnet1',
'provider__network_type': n_const.PROVIDERNET_VLAN}
NET3 = {'name': 'external-net0',
'router__external': True,
'provider__physical_network': 'flat-pnet0',
'provider__network_type': n_const.PROVIDERNET_FLAT}
NET4 = {'name': 'tenant-net2',
'provider__physical_network': 'flat-pnet1',
'provider__network_type': n_const.PROVIDERNET_FLAT}
NET5 = {'name': 'tenant-net3',
'provider__physical_network': 'flat-sriov-pnet1',
'provider__network_type': n_const.PROVIDERNET_FLAT}
NET6 = {'name': 'tenant-net4',
'provider__physical_network': 'flat-pnet2',
'provider__network_type': n_const.PROVIDERNET_FLAT}
NET7 = {'name': 'tenant-net5',
'provider__physical_network': 'vlan-pnet0',
'provider__network_type': n_const.PROVIDERNET_VLAN}
NETS = (NET1, NET2, NET3, NET4, NET5, NET6, NET7)
SUBNET1 = {'name': 'tenant-subnet0',
'cidr': '192.168.1.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.1.1'}
SUBNET2 = {'name': 'tenant-subnet1',
'cidr': '192.168.2.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.2.1'}
SUBNET3 = {'name': 'external-subnet0',
'cidr': '192.168.3.0/24',
'shared': True,
'enable_dhcp': False,
'gateway': '192.168.3.1'}
SUBNET4 = {'name': 'tenant-subnet3',
'cidr': '192.168.4.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.4.1'}
SUBNET5 = {'name': 'tenant-subnet4',
'cidr': '192.168.5.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.5.1'}
SUBNET6 = {'name': 'tenant-subnet5',
'cidr': '192.168.6.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.6.1'}
SUBNET7 = {'name': 'tenant-subnet6',
'cidr': '192.168.7.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.7.1'}
SUBNET8 = {'name': 'tenant-subnet7',
'cidr': '192.168.8.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.8.1'}
SUBNET9 = {'name': 'tenant-subnet8',
'cidr': '192.168.9.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.9.1'}
SUBNET10 = {'name': 'tenant-subnet9',
'cidr': '192.168.10.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.10.1'}
SUBNET11 = {'name': 'tenant-subnet10',
'cidr': '192.168.11.0/24',
'shared': False,
'enable_dhcp': True,
'gateway': '192.168.11.1'}
SUBNETS = {'tenant-net0': [SUBNET1],
'tenant-net1': [SUBNET2],
'external-net0': [SUBNET3],
'tenant-net2': [SUBNET4],
'tenant-net3': [SUBNET5],
'tenant-net4': [SUBNET6],
'tenant-net5': [SUBNET7, SUBNET8, SUBNET9, SUBNET10]}
L3_AGENT_TEMPLATE = {
'binary': 'neutron-l3-agent',
'host': 'TBD',
'topic': topics.L3_AGENT,
'admin_state_up': True,
'configurations': {'use_namespaces': True,
'router_id': None,
'handle_internal_only_routers': True,
'gateway_external_network_id': None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
DHCP_AGENT_TEMPLATE = {
'binary': 'neutron-dhcp-agent',
'host': 'TBD',
'topic': topics.DHCP_AGENT,
'admin_state_up': True,
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
class FakeAgent(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __getitem__(self, k):
return self.__dict__.get(k)
class WrsAgentSchedulerTestCase(test_extension_pnet.ProvidernetTestCaseMixin,
test_extension_host.HostTestCaseMixin,
test_l3.L3NatTestCaseMixin,
test_agent.AgentDBTestMixIn,
test_wrs_plugin.WrsMl2PluginV2TestCase):
def setup_config(self):
super(WrsAgentSchedulerTestCase, self).setup_config()
# Instantiate a fake host driver to allow us to control the host to
# provider network mappings
config.cfg.CONF.set_override('host_driver',
'neutron.tests.unit.plugins.wrs.'
'test_host_driver.TestHostDriver')
def setUp(self, plugin=None, ext_mgr=None):
self._hosts = {}
self._pnets = {}
self._pnet_ranges = {}
self._nets = {}
self._subnets = {}
self._dhcp_agents = {}
self._l3_agents = {}
super(WrsAgentSchedulerTestCase, self).setUp()
self._plugin = directory.get_plugin()
self._l3_plugin = directory.get_plugin(plugin_constants.L3)
self._l3_scheduler = self._l3_plugin.router_scheduler
self._dhcp_scheduler = self._plugin.network_scheduler
self._host_driver = self._plugin.host_driver
self._l3_plugin.agent_notifiers = {}
self._plugin.agent_notifiers = {}
self._prepare_test_dependencies(hosts=HOSTS,
providernets=PNETS,
providernet_ranges=PNET_RANGES,
interfaces=INTERFACES,
networks=NETS,
subnets=SUBNETS)
def tearDown(self):
self._cleanup_test_dependencies()
super(WrsAgentSchedulerTestCase, self).tearDown()
def _get_subnet_id(self, name):
return self._subnets[name]['id']
def _get_net_id(self, name):
return self._nets[name]['id']
def _get_network(self, name):
return self._nets[name]
def _get_host_id(self, name):
return self._hosts[name]['id']
def _lock_test_host(self, id):
body = {'availability': n_const.HOST_DOWN}
data = self._update_host(id, body)
self.assertEqual(data['host']['availability'],
n_const.HOST_DOWN)
def _lock_test_hosts(self, hosts=HOSTS):
for host in hosts:
self._lock_test_host(host['id'])
def _query_router_host(self, id):
router = self._l3_plugin.get_router(self.adminContext, id)
self.assertIsNotNone(router)
return router[wrs_net.HOST]
def _create_subnets_for_network(self, data, subnets):
network = data['network']
for subnet in subnets[network['name']]:
arg_list = ('enable_dhcp', 'arg_list')
args = dict((k, v) for k, v in six.iteritems(subnet)
if k in arg_list)
subnet_data = self._make_subnet(self.fmt, data,
subnet['gateway'],
subnet['cidr'], **args)
self._subnets[subnet['name']] = subnet_data['subnet']
def _create_test_networks(self, networks, subnets):
for net in networks:
arg_list = ('provider__physical_network',
'provider__network_type',
'provider__segmentation_id',
'router__external')
args = dict((k, v) for k, v in six.iteritems(net)
if k in arg_list)
data = self._make_network(self.fmt,
name=net['name'],
admin_state_up=True,
arg_list=arg_list,
**args)
self._nets[net['name']] = data['network']
self._create_subnets_for_network(data, subnets)
def _delete_test_networks(self):
for name, data in six.iteritems(self._nets):
self._delete('networks', data['id'])
self._nets = []
def _register_dhcp_agent(self, hostname):
agent = copy.deepcopy(DHCP_AGENT_TEMPLATE)
agent['host'] = hostname
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.utcnow().isoformat())
self._dhcp_agents[hostname] = FakeAgent(**agent)
def _register_dhcp_agents(self, hosts):
for host in HOSTS:
self._register_dhcp_agent(host['name'])
def _register_l3_agent(self, hostname):
agent = copy.deepcopy(L3_AGENT_TEMPLATE)
agent['host'] = hostname
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.utcnow().isoformat())
self._l3_agents[hostname] = FakeAgent(**agent)
def _register_l3_agents(self, hosts):
for host in HOSTS:
self._register_l3_agent(host['name'])
def _list_dhcp_agents(self):
return self._list_agents(query_string='binary=neutron-dhcp-agent')
def _list_l3_agents(self):
return self._list_agents(query_string='binary=neutron-l3-agent')
def _prepare_test_dependencies(self, hosts, providernets,
providernet_ranges, interfaces,
networks, subnets):
super(WrsAgentSchedulerTestCase, self)._prepare_test_dependencies(
hosts=hosts, providernets=providernets,
providernet_ranges=providernet_ranges,
interfaces=interfaces)
self._create_test_networks(networks=networks, subnets=subnets)
def _cleanup_test_dependencies(self):
self._delete_test_networks()
super(WrsAgentSchedulerTestCase, self)._cleanup_test_dependencies()
def _test_router_rescheduling_validate_result(
self, agent_ids, initial_expected_distribution,
final_expected_distribution, reschedule_threshold=1,
max_time=None):
# Count routers and check they match expected distribution
agent_count = len(agent_ids)
agent_routers_count = []
for i in range(agent_count):
agent_routers_count.append(
len(self._l3_plugin.list_routers_on_l3_agent(
self.adminContext, agent_ids[i]
)['routers'])
)
self.assertEqual(agent_routers_count, initial_expected_distribution)
reschedule_start_time = datetime.datetime.now()
reschedule_function = (lambda a, b: a > b + reschedule_threshold)
self._l3_plugin.redistribute_routers(self.adminContext,
reschedule_function)
reschedule_end_time = datetime.datetime.now()
reschedule_total_time = (reschedule_end_time - reschedule_start_time)
# Validate maximum time not exceeded
if max_time:
self.assertLessEqual(reschedule_total_time.seconds, max_time)
# Count routers and check they match new expected distribution
agent_routers_count = []
for i in range(agent_count):
agent_routers_count.append(
len(self._l3_plugin.list_routers_on_l3_agent(
self.adminContext, agent_ids[i]
)['routers'])
)
self.assertEqual(sorted(agent_routers_count),
sorted(final_expected_distribution))
def _test_router_rescheduling_by_count(self, router_count, agent_count,
max_time=None, second_zone_count=0):
"""Runs rescheduling test with specified number of routers and agents.
Maximum number of agents is 3 due to hosts that have interfaces on
providernet vlan-pnet0.
"""
# Define expected results
initial_expected_distribution = [0] * agent_count
initial_expected_distribution[0] = router_count
redistributed_value = int(math.floor(router_count / agent_count))
final_expected_distribution = [redistributed_value] * agent_count
assigned_routers = redistributed_value * agent_count
for i in range(router_count - assigned_routers):
final_expected_distribution[i] += 1
if second_zone_count:
initial_expected_distribution.append(second_zone_count)
final_expected_distribution.append(second_zone_count)
# Set up routers and networks
generic_subnets = []
generic_routers = []
network_data = {'network': self._get_network(NET1['name'])}
for i in range(router_count + second_zone_count):
if i == router_count:
network_data = {'network': self._get_network(NET6['name'])}
subnet_name = "generic-subnet-%d" % i
tenant_id = "generic-tenant-%d" % i
generic_subnet = {'name': subnet_name,
'cidr': "172.16.%d.0/24" % i,
'gateway': "172.16.%d.1" % i}
data = self._make_subnet(self.fmt, network_data,
generic_subnet['gateway'],
generic_subnet['cidr'],
tenant_id=tenant_id,
enable_dhcp=False)
generic_subnets.append(data['subnet'])
router_name = "generic-router-%d" % i
generic_router = self._make_router(self.fmt, tenant_id,
router_name)
generic_routers.append(generic_router)
self._router_interface_action(
'add', generic_router['router']['id'],
generic_subnets[i]['id'], None)
# Set up agents and auto-schedule routers
for i in range(agent_count):
self._register_l3_agent(HOSTS[i]['name'])
self._l3_plugin.auto_schedule_routers(self.adminContext,
HOSTS[i]['name'], None)
if second_zone_count:
self._register_l3_agent(HOST5['name'])
self._l3_plugin.auto_schedule_routers(self.adminContext,
HOST5['name'], None)
# Validate that rescheduling with this setup works
agents = self._list_l3_agents()['agents']
agent_ids = [agent['id'] for agent in agents]
self._test_router_rescheduling_validate_result(
agent_ids, initial_expected_distribution,
final_expected_distribution, 1, max_time
)
# Clean up routers
for i in range(router_count + second_zone_count):
self._router_interface_action(
'remove', generic_routers[i]['router']['id'],
generic_subnets[i]['id'], None)
class WrsL3AgentSchedulerTestCase(WrsAgentSchedulerTestCase):
def test_router_without_interfaces(self):
self._register_l3_agents(HOSTS)
data = self._list_l3_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
with self.router(name='router1',
tenant_id=self._tenant_id) as r1:
# Check that it has no candidate hosts
agents = self._l3_scheduler.get_l3_agents_for_router(
self._plugin, self.adminContext, r1['router']['id'])
self.assertEqual(len(agents), 0)
def test_router_with_isolated_host(self):
self._register_l3_agents(HOSTS)
data = self._list_l3_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
with self.router(name='router1',
tenant_id=self._tenant_id) as r1:
# Attach it to an external network
self._add_external_gateway_to_router(
r1['router']['id'], self._get_net_id(NET3['name']))
# Check that it has only 2 of 3 candidate hosts
agents = self._l3_scheduler.get_l3_agents_for_router(
self._plugin, self.adminContext, r1['router']['id'])
self.assertEqual(len(agents), 2)
# Confirm that the 1st host can support this router
routers = self._l3_scheduler._get_routers_can_schedule(
self._l3_plugin, self.adminContext, [r1['router']],
self._l3_agents[HOST1['name']])
self.assertEqual(len(routers), 1)
# Confirm that the 2st host can support this router
routers = self._l3_scheduler._get_routers_can_schedule(
self._l3_plugin, self.adminContext, [r1['router']],
self._l3_agents[HOST2['name']])
self.assertEqual(len(routers), 1)
# Confirm that the 3rd host cannot support this router
routers = self._l3_scheduler._get_routers_can_schedule(
self._l3_plugin, self.adminContext, [r1['router']],
self._l3_agents[HOST3['name']])
self.assertEqual(len(routers or []), 0)
# Remove the attachment
self._remove_external_gateway_from_router(
r1['router']['id'], self._get_net_id(NET3['name']))
# Check that it can no longer be scheduled
agents = self._l3_scheduler.get_l3_agents_for_router(
self._plugin, self.adminContext, r1['router']['id'])
self.assertEqual(len(agents), 0)
def test_router_with_multiple_interfaces(self):
self._register_l3_agents(HOSTS)
data = self._list_l3_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
with self.router(name='router1',
tenant_id=self._tenant_id) as r1:
# Attach to 2 tenant networks
self._router_interface_action(
'add', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
self._router_interface_action(
'add', r1['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
# Check that it has the first 3 hosts as candidates
agents = self._l3_scheduler.get_l3_agents_for_router(
self._plugin, self.adminContext, r1['router']['id'])
self.assertEqual(len(agents), 3)
# Attach it to an external network
self._add_external_gateway_to_router(
r1['router']['id'], self._get_net_id(NET3['name']))
# Check that it can now only be scheduled to 2 of 3 hosts
agents = self._l3_scheduler.get_l3_agents_for_router(
self._plugin, self.adminContext, r1['router']['id'])
self.assertEqual(len(agents), 2)
# Remove the attachments
self._remove_external_gateway_from_router(
r1['router']['id'], self._get_net_id(NET3['name']))
self._router_interface_action(
'remove', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
self._router_interface_action(
'remove', r1['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
def test_router_rescheduled_on_locked_host(self):
self._register_l3_agents(HOSTS)
data = self._list_l3_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
with self.router(name='router1',
tenant_id=self._tenant_id) as r1:
# Attach to 2 tenant networks
self._router_interface_action(
'add', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
self._router_interface_action(
'add', r1['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
# Check that it has the first 3 hosts as candidates
agents = self._l3_scheduler.get_l3_agents_for_router(
self._plugin, self.adminContext, r1['router']['id'])
self.assertEqual(len(agents), 3)
# Check that it was assigned to one of them
original_host = self._query_router_host(r1['router']['id'])
self.assertIsNotNone(original_host)
self.assertIn(original_host, [h['name'] for h in HOSTS])
# Lock that host
self._lock_test_host(self._get_host_id(original_host))
# Check that it was assigned to a different host
current_host = self._query_router_host(r1['router']['id'])
self.assertIsNotNone(current_host)
self.assertIn(current_host, [h['name'] for h in HOSTS])
self.assertNotEqual(current_host, original_host)
# Remove the attachments
self._router_interface_action(
'remove', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
self._router_interface_action(
'remove', r1['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
def test_redistribute_routers_trivial(self):
with self.router(name='router1',
tenant_id='test-tenant') as r1:
self._router_interface_action(
'add', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
with self.router(name='router2',
tenant_id='test-tenant') as r2:
self._router_interface_action(
'add', r2['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
# Set up and auto-schedule routers
self._register_l3_agent(HOST1['name'])
self._register_l3_agent(HOST2['name'])
agents = self._list_l3_agents()['agents']
self._l3_plugin.auto_schedule_routers(self.adminContext,
HOST1['name'], None)
self._l3_plugin.auto_schedule_routers(self.adminContext,
HOST2['name'], None)
# Validate that rescheduling with this setup works
agent_ids = [agent['id'] for agent in agents]
self._test_router_rescheduling_validate_result(agent_ids,
[2, 0],
[1, 1], 1)
self._router_interface_action(
'remove', r2['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
self._router_interface_action(
'remove', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
def test_redistribute_routers_invalid_agent(self):
with self.router(name='router1',
tenant_id='test-tenant') as r1:
self._router_interface_action(
'add', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
with self.router(name='router2',
tenant_id='test-tenant') as r2:
self._router_interface_action(
'add', r2['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
# Set up and auto-schedule routers
self._register_l3_agent(HOST1['name'])
# HOST4 can not host router
self._register_l3_agent(HOST4['name'])
agents = self._list_l3_agents()['agents']
self._l3_plugin.auto_schedule_routers(self.adminContext,
HOST1['name'], None)
self._l3_plugin.auto_schedule_routers(self.adminContext,
HOST4['name'], None)
# Validate that rescheduling with this setup works
agent_ids = [agent['id'] for agent in agents]
self._test_router_rescheduling_validate_result(agent_ids,
[2, 0],
[2, 0], 1)
self._router_interface_action(
'remove', r2['router']['id'],
self._get_subnet_id(SUBNET2['name']), None)
self._router_interface_action(
'remove', r1['router']['id'],
self._get_subnet_id(SUBNET1['name']), None)
def test_redistribute_routers_none(self):
router_count = 5
agent_count = 1
self._test_router_rescheduling_by_count(router_count, agent_count, 1)
def test_redistribute_routers_few(self):
router_count = 5
agent_count = 2
self._test_router_rescheduling_by_count(router_count, agent_count)
def test_redistribute_routers_large_office(self):
router_count = 10
agent_count = 3
self._test_router_rescheduling_by_count(router_count, agent_count,
second_zone_count=9)
# TODO(alegacy): disabled because it is timing out in unit tests
def notest_redistribute_routers_many(self):
router_count = 30
agent_count = 3
self._test_router_rescheduling_by_count(router_count, agent_count, 30)
class WrsDhcpAgentSchedulerTestCase(WrsAgentSchedulerTestCase):
def test_get_dhcp_networks_for_host_with_no_networks(self):
# Check which dhcp networks can be scheduled on this host
data = self._dhcp_scheduler.get_dhcp_subnets_for_host(
self._plugin, self.adminContext, HOST4['name'], fields=None)
# Should not be any networks available for this agent as HOST4 is
# only associated with pci-sriov data interfaces and those interface
# types are excluded by the scheduler
self.assertEqual(len(data), 0)
def test_get_dhcp_networks_for_host(self):
# Check which dhcp networks can be scheduled on this host
data = self._dhcp_scheduler.get_dhcp_subnets_for_host(
self._plugin, self.adminContext, HOST1['name'], fields=None)
# Should be subnets 1, 2, and 7 to 10 that can be scheduled
self.assertEqual(len(data), 6)
def test_get_agents_for_network_without_agents(self):
dhcp_filter = self._dhcp_scheduler.resource_filter
data = dhcp_filter._get_network_hostable_dhcp_agents(
self._plugin, self.adminContext,
self._get_network(NET1['name']))
# Should not have any candidate agents since there are no agents
self.assertEqual(len(data['hostable_agents']), 0)
def test_get_agents_for_network(self):
self._register_dhcp_agents(HOSTS)
data = self._list_dhcp_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
# Get the list of agents that can support this network
dhcp_filter = self._dhcp_scheduler.resource_filter
data = dhcp_filter._get_network_hostable_dhcp_agents(
self._plugin, self.adminContext,
self._get_network(NET1['name']))
# It should be schedulable on the first 3 nodes
self.assertEqual(len(data['hostable_agents']), 3)
def test_get_agents_for_network_isolated(self):
self._register_dhcp_agents(HOSTS)
data = self._list_dhcp_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
# Get the list of agents that can support this network
dhcp_filter = self._dhcp_scheduler.resource_filter
data = dhcp_filter._get_network_hostable_dhcp_agents(
self._plugin, self.adminContext,
self._get_network(NET4['name']))
# It should not be schedulable on any nodes
self.assertEqual(len(data['hostable_agents']), 0)
def test_get_agents_for_network_sriov(self):
self._register_dhcp_agents(HOSTS)
data = self._list_dhcp_agents()
self.assertEqual(len(data['agents']), len(HOSTS))
# Get the list of agents that can support this network
dhcp_filter = self._dhcp_scheduler.resource_filter
data = dhcp_filter._get_network_hostable_dhcp_agents(
self._plugin, self.adminContext,
self._get_network(NET5['name']))
# It should not be schedulable on any nodes because NET5 is
# associated only with pci-sriov data interfaces and the scheduler
# should be excluding these from the choices.
self.assertEqual(len(data['hostable_agents']), 0)
def _get_agent_network_counts(self):
counts = []
agents = self._list_dhcp_agents()['agents']
for agent in agents:
networks = self._plugin.list_networks_on_dhcp_agent(
self.adminContext, agent['id'])['networks']
counts.append((agent['host'], len(networks)))
return collections.OrderedDict(
sorted(counts, reverse=True, key=lambda x: x[1]))
def _assertAgentNetworkCounts(self, a, b):
a_counts = sorted(a.values())
b_counts = sorted(b.values())
self.assertEqual(a_counts, b_counts)
def test_autoschedule_networks(self):
self._register_dhcp_agent(HOST1['name'])
self._plugin.auto_schedule_networks(self.adminContext, HOST1['name'])
counts = self._get_agent_network_counts()
expected = {'compute-0': 3}
self._assertAgentNetworkCounts(expected, counts)
def test_redistribute_networks(self):
self._register_dhcp_agents(HOSTS)
self._plugin.auto_schedule_networks(self.adminContext, HOST1['name'])
self._plugin.redistribute_networks(self.adminContext,
(lambda a, b: a > b + 1))
counts = self._get_agent_network_counts()
expected = {'compute-0': 1,
'compute-1': 1,
'compute-2': 1,
'compute-3': 0,
'compute-4': 0}
self._assertAgentNetworkCounts(expected, counts)
def test_redistribute_networks_with_threshold_1(self):
self._register_dhcp_agents(HOSTS)
self._plugin.auto_schedule_networks(self.adminContext, HOST1['name'])
self._plugin.redistribute_networks(self.adminContext,
(lambda a, b: a > b + 1))
counts = self._get_agent_network_counts()
expected = {'compute-0': 1,
'compute-1': 1,
'compute-2': 1,
'compute-3': 0,
'compute-4': 0}
self._assertAgentNetworkCounts(expected, counts)
def test_redistribute_networks_with_threshold_2(self):
self._register_dhcp_agents(HOSTS)
self._plugin.auto_schedule_networks(self.adminContext, HOST1['name'])
self._plugin.redistribute_networks(self.adminContext,
(lambda a, b: a > b + 2))
counts = self._get_agent_network_counts()
expected = {'compute-0': 2,
'compute-1': 0,
'compute-2': 1,
'compute-3': 0,
'compute-4': 0}
self._assertAgentNetworkCounts(expected, counts)
def test_redistribute_networks_invalid_agent(self):
self._register_dhcp_agent(HOST1['name'])
self._register_dhcp_agent(HOST4['name'])
self._plugin.auto_schedule_networks(self.adminContext, HOST1['name'])
self._plugin.redistribute_networks(self.adminContext,
(lambda a, b: a > b + 2))
counts = self._get_agent_network_counts()
expected = {'compute-0': 3, 'compute-3': 0}
self._assertAgentNetworkCounts(expected, counts)
def test_redistribute_networks_with_locked_host(self):
self._register_dhcp_agent(HOST1['name'])
self._register_dhcp_agent(HOST2['name'])
self._register_dhcp_agent(HOST3['name'])
# Start all the agents on the first host
self._plugin.auto_schedule_networks(self.adminContext, HOST1['name'])
# Lock the second host. The agent will still be seen but we
# want to confirm that it is being ignored when calculating the
# least busiest agents.
self._lock_test_host(HOST2['id'])
# The busiest network should get moved to the third host. The two
# single subnet networks should stay on the first host.
self._plugin.redistribute_networks(self.adminContext,
(lambda a, b: a > b + 1))
counts = self._get_agent_network_counts()
expected = {'compute-0': 2,
'compute-1': 0,
'compute-2': 1}
for k in sorted(counts.iterkeys()):
self.assertEqual(expected[k], counts[k])
self._assertAgentNetworkCounts(expected, counts)
|
# coding=utf-8
from lxml import etree
from Queue import Queue
import threading
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from bs4 import BeautifulSoup
import json, re, cto
from models import Train, Train_course, Course_lesson
from cto import Login
login = Login()
session = login.login()
q = Queue(1000)
sql_uri = 'mysql://root:root@localhost/51cto?charset=utf8mb4'
class Wejob(object,):
action = ('info','download')
engine = create_engine(sql_uri)
con = sessionmaker(bind=engine)
def __init__(self):
self.sign = None
def train(self):
con = self.con()
train_id = q.get()
while train_id:
result = con.query(Train).filter_by(train_id=train_id).all()
if result:
if result[0].status:
train_id = q.get()
continue
else :
train = self.get_train_info(train_id)
tra_id = result[0].id
else:
train = self.get_train_info(train_id)
# 写入train
tra = Train(train_id=train_id, train_name = train['name'], course_num = train['course_num'])
con.add(tra)
con.commit()
tra_id = tra.id
#保留train index page
url = 'http://edu.51cto.com/center/wejob/index/view?id=%d&force=3&orig=try' % (train_id)
for course in train['courses']:
if con.query(Train_course).filter_by(train_course_id=course['train_course_id']).filter_by(tra_id=tra_id).all(): continue
tra_cou = Train_course(tra_id = tra_id, train_course_id = course['train_course_id'], course_name = course['course_name'],
course_id = course['course_id'], lesson_num = course['lesson_num'], number = course['number'])
con.add(tra_cou)
con.commit()
cou_id = tra_cou.id
lessons = self.get_course_info(train_id, int(course['train_course_id']))
for lesson in lessons:
lesson['course_id'] = course['course_id']
lesson['cou_id'] = cou_id
self.insert_lesson(con,lesson)
con.query(Train_course).filter_by(id=cou_id).update({'status':1})
con.query(Train).filter_by(train_id=train_id).update({'status':1})
print('微职位%s爬取完成' % train_id)
if q.qsize():
train_id = q.get()
else:
print('线程退出')
def get_download_url(self,lesson_id,video_id):
url = 'http://edu.51cto.com//center/player/play/m3u8?lesson_id=%s&id=%d&dp=high&type=wejoboutcourse&lesson_type=course'\
%(lesson_id,video_id)
res = session.get(url).text
return re.findall(r'https.*',res)
def get_course_info(self,train_id, course_id):
infos = []
current_page = 1
while(current_page):
url = 'http://edu.51cto.com/center/wejob/usr/course-infoajax?train_id=%d&train_course_id=%d&page=%d&size=20'\
%(train_id, course_id,current_page)
res = session.get(url).text
data = json.loads(res)['data']
current_page = data['current_page'] + 1 if data['current_page'] < data['count_page'] else 0
pages = data['data']
for m in pages:
info = {
'lesson_name': m['lesson_name'],
'lesson_id' : m['lesson_id'],
'video_id' : m['video_id'],
#'video_time' : m['video_time'].split('/')
}
infos.append(info)
return infos
def get_train_info(self,train_id):
train = {'name':self.get_train_name(train_id),'courses':[]}
current_page = 1
while (current_page):
url = 'http://edu.51cto.com/center/wejob/usr/courseajax?train_id=%d&page=%d&size=1000'%\
(train_id,current_page)
res = session.get(url)
res = json.loads(res.text)['data']
current_page = res['current_page']+1 if res['current_page'] < res['count_page'] else 0
for i in res['data']:
course = {
'course_name':cto.filename_reg_check(i['course_name'].encode('utf-8')),
'train_id':i['train_id'],
'train_course_id':i['train_course_id'],
'lesson_num':i['lesson_num'],
'course_id' : i['course_id'],
'number':i['number']
}
train['courses'].append(course)
train['course_num'] = res['current_item']
return train
def get_train_name(self,train_id):
url = 'http://edu.51cto.com/center/wejob/index/view?id=%d&force=3&orig=try' % (train_id)
res = session.get(url).text
soup = BeautifulSoup(res, 'html.parser')
title = soup.find('h2', id='CourseTitle')
if title == None:
exit('找不到该课程')
return title.string
@staticmethod
def get_trains():
url = 'http://edu.51cto.com/center/wejob/index/list'
resp = session.get(url)
html = etree.HTML(resp.text)
hrefs = html.xpath('//div[@class="main"]//a/@href')
for i in hrefs:
train = int(i.split('/')[-1][0:-5])
q.put(train)
def insert_lesson(self,con,lesson):
lesson_id = '_'.join([str(lesson['course_id']), str(lesson['lesson_id'])])
urls = self.get_download_url(lesson_id, lesson['video_id'])
video_num = len(urls)
if video_num == 0: return
video_url = re.sub('_\d+\.', '_{}.', urls[0])
cou_len = Course_lesson(cou_id=lesson['cou_id'], video_id=lesson['video_id'], lesson_id=lesson['lesson_id'],
lesson_name=lesson['lesson_name'], video_url=video_url,
video_num=video_num)
con.add(cou_len)
con.commit()
return
def is_exists_train(train_id):
url = 'http://edu.51cto.com/center/wejob/index/view?id=%d&force=3&orig=try' % (train_id)
res = session.get(url).text
soup = BeautifulSoup(res, 'html.parser')
title = soup.find('h2', id='CourseTitle')
if title : q.put(train_id)
else:print('%s无效' % train_id)
def run():
Wejob.get_trains()
for i in range(4):
obj = Wejob()
t = threading.Thread(target=obj.train)
t.start()
if __name__ == '__main__':
run() |
# -*- coding: utf-8 -*-
import openpyxl
from loyolaCD import employee as E
def get_info(filename, new, wb, ws):
#save data from filename
tmp = filename.split('_')
tmp2 = tmp[2].split('.')
new.student_num = tmp[1]
new.name = tmp2[0]
new.major = ws['E22'].value
new.phone_num = ws['G22'].value
new.late = ws['I39'].value
def read_timetable(new, wb, ws):
#get week timetable
for i in range(9):
for j in range(5):
info = ws.cell(row=26+i, column=5+j).value
if info == None:
new.week[i][j] = '-'
else:
tmp = list(info)
if tmp[0] == 'O' or tmp[0] == 'o':
new.week[i][j] = 'O'
elif tmp[0] == 'X' or tmp[0] == 'x':
new.week[i][j] = 'X'
'''
if info == None:
new.week[i][j] = '-'
else:
tmp = list(info)
new.week[i][j] = tmp[0]
'''
#get weekend timetable
for j in range(2):
info = ws.cell(row=38+j, column=5).value
if info == None:
new.weekend[j] = '-'
else:
tmp = list(info)
if tmp[0] == 'O' or tmp[0] == 'o':
new.weekend[j] = 'O'
elif tmp[0] == 'X' or tmp[0] == 'x':
new.weekend[j] = 'X'
def divided_week(week, student):
tmp1 = week.split('[')
tmp2 = "".join(tmp1)
tmp1 = tmp2.split(']')
tmp2 = "".join(tmp1)
tmp1 = tmp2.split(',')
tmp2 = "".join(tmp1)
tmp1 = tmp2.split("'")
tmp2 = "".join(tmp1)
tmp1 = tmp2.split(" ")
tmp2 = "".join(tmp1)
k = 0
for i in range(9):
for j in range(5):
student[i][j] = tmp2[k]
k += 1
def divided_weekend(weekend, student):
tmp1 = weekend.split('[')
tmp2 = "".join(tmp1)
tmp1 = tmp2.split(']')
tmp2 = "".join(tmp1)
tmp1 = tmp2.split(',')
tmp2 = "".join(tmp1)
tmp1 = tmp2.split("'")
tmp2 = "".join(tmp1)
tmp1 = tmp2.split(" ")
tmp2 = "".join(tmp1)
student[0] = tmp2[0]
student[1] = tmp2[1]
|
from collections import defaultdict
from random import randint
# Bucket Sort
# Time: O(n + klogk) ~ O(n + nlogn)
# Space: O(n)
class BucketSort(object):
def topKFrequent(self, words, k):
counts = defaultdict(int)
for ws in words:
for w in ws:
counts[w] += 1
buckets = [[]] * (sum(counts.values()) + 1)
for i, count in counts.items():
buckets[count].append(i)
result = []
# result_append = result.append
for i in reversed(range(len(buckets))):
for j in range(len(buckets[i])):
# slower
# result_append(buckets[i][j])
result.append(buckets[i][j])
if len(result) == k:
return result
return result
# Quick Select
# Time: O(n) ~ O(n^2), O(n) on average.
# Space: O(n)
class QuickSelect(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
counts = defaultdict(int)
for ws in words:
for w in ws:
counts[w] += 1
p = []
for key, val in counts.items():
p.append((-val, key))
self.kthElement(p, k)
result = []
sorted_p = sorted(p[:k])
for i in range(k):
result.append(sorted_p[i][1])
return result
def kthElement(self, nums, k): # O(n) on average
def PartitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in range(left, right):
if nums[i] < pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
top_k_selector = BucketSort() |
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import traceback
import logging
from django.db import models
from django.contrib.contenttypes.models import ContentType
from chroma_core.lib.job import job_log
from chroma_core.services.rpc import RpcError
from chroma_core.models.alert import AlertState
from chroma_core.models.alert import AlertStateBase
from chroma_core.models.jobs import SchedulingError
class CommandRunningAlert(AlertStateBase):
default_severity = logging.INFO
class Meta:
app_label = "chroma_core"
proxy = True
def alert_message(self):
return "Command %s running" % self.alert_item.message
@property
def require_mail_alert(self):
"""
We do not want to email somebody every time a command is run, it will really annoy them!
:return: False
"""
return False
class CommandSuccessfulAlert(AlertStateBase):
default_severity = logging.INFO
class Meta:
app_label = "chroma_core"
proxy = True
def alert_message(self):
return "Command %s successful" % self.alert_item.message
@property
def require_mail_alert(self):
"""
We do not want to email somebody every time a command is successful, it will really annoy them!
:return: False
"""
return False
class CommandCancelledAlert(AlertStateBase):
default_severity = logging.ERROR
class Meta:
app_label = "chroma_core"
proxy = True
def alert_message(self):
return "Command %s cancelled" % self.alert_item.message
class CommandErroredAlert(AlertStateBase):
default_severity = logging.ERROR
class Meta:
app_label = "chroma_core"
proxy = True
def alert_message(self):
return "Command %s failed" % self.alert_item.message
class Command(models.Model):
command_alert_types = [CommandRunningAlert, CommandSuccessfulAlert, CommandCancelledAlert, CommandErroredAlert]
jobs = models.ManyToManyField("Job")
complete = models.BooleanField(
default=False,
help_text="True if all jobs have completed, or no jobs were needed to \
satisfy the command",
)
errored = models.BooleanField(
default=False,
help_text="True if one or more of the command's jobs failed, or if \
there was an error scheduling jobs for this command",
)
cancelled = models.BooleanField(
default=False,
help_text="True if one or more of the command's jobs completed\
with its cancelled attribute set to True, or if this command\
was cancelled by the user",
)
message = models.CharField(
max_length=512,
help_text="Human readable string about one sentence long describing\
the action being done by the command",
)
created_at = models.DateTimeField(auto_now_add=True)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
Once the command is saved we can then create the alert, we need it to be saved or it has no id.
:param force_insert: bool
:param force_update: bool
:param using: str
"""
super(Command, self).save(force_insert, force_update, using, update_fields)
# This is a little bit messy and maybe shouldn't go here. We want to get the existing alert, if it exists
# which could be of a number of types. CommandRunningAlert, CommandSuccessfulAlert, CommandCancelledAlert
# or CommandErroredAlert so fetch those filter and be sure we have 1 or 0 alerts.
try:
potential_command_alerts = AlertState.objects.filter(
alert_item_id=self.id, alert_item_type=ContentType.objects.get_for_model(self)
)
# We should have tests for the case of more than one and if we find more than 1 then lets not make the users life a misery but take the first one.
command_alert = next(
potential_command_alert
for potential_command_alert in potential_command_alerts
if type(potential_command_alert) in self.command_alert_types
)
except StopIteration:
command_alert = CommandRunningAlert.notify(self, True)
# Now change to the correct alert type.
if not self.complete:
if type(command_alert) != CommandRunningAlert:
command_alert.cast(CommandRunningAlert)
else:
if self.errored:
if type(command_alert) != CommandErroredAlert:
command_alert = command_alert.cast(CommandErroredAlert)
elif self.cancelled:
if type(command_alert) != CommandCancelledAlert:
command_alert = command_alert.cast(CommandCancelledAlert)
else:
if type(command_alert) != CommandSuccessfulAlert:
command_alert = command_alert.cast(CommandSuccessfulAlert)
command_alert.__class__.notify(self, False)
@classmethod
def set_state(cls, objects, message=None, **kwargs):
"""The states argument must be a collection of 2-tuples
of (<StatefulObject instance>, state)"""
# If you ever work on this function please talk to Chris. It should not be in this class. It has nothing to
# do with the Command class other than it makes use of a Command and should be moved to the Stateful object
# class because think about it can only operate on stateful objects.
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
for object, state in objects:
# Check if the state is modified
if object.state != state:
if not message:
old_state = object.state
new_state = state
route = object.get_route(old_state, new_state)
from chroma_core.services.job_scheduler.command_plan import Transition
job = Transition(object, route[-2], route[-1]).to_job()
message = job.description()
object_ids = [
(ContentType.objects.get_for_model(object).natural_key(), object.id, state)
for object, state in objects
]
try:
command_id = JobSchedulerClient.command_set_state(object_ids, message, **kwargs)
except RpcError as e:
job_log.error("Failed to set object state: " + traceback.format_exc())
# FIXME: Would be better to have a generalized mechanism
# for reconstituting remote exceptions, as this sort of thing
# won't scale.
if e.remote_exception_type == "SchedulingError":
raise SchedulingError(e.description)
else:
raise
return Command.objects.get(pk=command_id)
return None
def completed(self, errored, cancelled):
"""
Called when the command completes, sets the appropriate completion more of a notification than something requiring any action.
:param errored: bool True if the command contains an error job.
:param cancelled: bool True if the command was cancelled because of for example a failed job, or user cancelled...
cancelled: Boolean indicating if the command contains a job that was cancelled (ie. the command was cancelled)
"""
self.errored = errored
self.cancelled = cancelled
self.complete = True
self.save()
def __repr__(self):
return "<Command %s: '%s'>" % (self.id, self.message)
class Meta:
app_label = "chroma_core"
ordering = ["id"]
|
# paramiko需要通过pip下载
import paramiko
# import time的目的是为了保证不会因为输入命令或者回显内容过快而导致SSH终端速度跟不上,仅能显示部分命令,而netmiko已经自动解决了此问题
import time
def qytang_ssh(ip, username, password, port=22, cmd='dis cu\n'):
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password, timeout=5, compress=True)
print("You have successfully connect to " + ip + '\n')
command = ssh.invoke_shell()
command.send(cmd)
time.sleep(2)
output = command.recv(65535)
x = output.decode('ascii')
return x
except paramiko.ssh_exception.AuthenticationException:
print("User authentication failed for " + ip + ".")
return
if __name__ == '__main__':
# 创建三个变量,表示SW3的IP地址、SSH的用户名和密码
ip = "192.168.56.11"
username = "prin"
password = "Huawei@123"
qytang_ssh(ip, username, password,)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Define LPD_Net
class LPD_Net(nn.Module):
def __init__(self, LayerNo):
super(LPD_Net, self).__init__()
self.name = "LPD_Net"
self.LayerNo = LayerNo
self.filter_size = 3
self.conv_size = 32
self.eta_step = nn.ParameterList()
self.sigma_step = nn.ParameterList()
self.soft_thr = nn.ParameterList()
self.soft_a = nn.ParameterList()
self.delta = nn.ParameterList()
self.A2 = nn.ModuleList()
self.B = nn.ModuleList()
self.AT2 = nn.ModuleList()
self.BT = nn.ModuleList()
for _ in range(self.LayerNo):
self.eta_step.append(nn.Parameter(torch.Tensor([0.1])))
self.sigma_step.append(nn.Parameter(torch.Tensor([1])))
self.soft_thr.append(nn.Parameter(torch.Tensor([0.1])))
self.soft_a.append(nn.Parameter(torch.Tensor([50])))
self.delta.append(nn.Parameter(torch.Tensor([0.1])))
self.A2.append(nn.Conv2d(1, self.conv_size, kernel_size=3, stride=1, padding=1, bias=False))
self.B.append(nn.Conv2d(self.conv_size, self.conv_size, kernel_size=3, stride=1, padding=1, bias=False))
self.AT2.append(nn.Conv2d(self.conv_size, 1, kernel_size=3, stride=1, padding=1, bias=False))
self.BT.append(nn.Conv2d(self.conv_size, self.conv_size, kernel_size=3, stride=1, padding=1, bias=False))
nn.init.xavier_normal_(self.A2[0].weight)
nn.init.xavier_normal_(self.B[0].weight)
nn.init.xavier_normal_(self.AT2[0].weight)
nn.init.xavier_normal_(self.BT[0].weight)
def activate(self, x, sigma):
mask1 = (x <= -1*sigma).float()
mask2 = (torch.abs(x) < sigma).float()
mask3 = (x >= sigma).float()
return mask1 * 0. + torch.mul(mask2, x*x/(4*sigma) + x/2 + sigma/4.0) + torch.mul(mask3, x)
def dif_activate(self, x, sigma):
mask1 = (x <= -1*sigma).float()
mask2 = (torch.abs(x) < sigma).float()
mask3 = (x >= sigma).float()
return mask1 * 0. + torch.mul(mask2, x/(2*sigma) + 0.5) + torch.mul(mask3, 1.)
def project_sig_y(self, y, thr, a):
return 2 * thr * (torch.sigmoid(a * y) - 0.5)
def project_relu_y(self, y, thr):
return torch.mul(torch.sign(y), -1 * F.relu(thr - torch.abs(y)) + thr)
def forward(self, Phix, Phi, Qinit):
bs = Phix.size(0)
x0 = torch.mm(Phix, torch.transpose(Qinit, 0, 1))
y0 = torch.zeros(size=[bs, self.conv_size, 33, 33], dtype=torch.float32).to(x0.device)
PhiTPhi = torch.mm(torch.transpose(Phi, 0, 1), Phi)
PhiTb = torch.mm(Phix, Phi)
x_out = list()
y_out = list()
x_out.append(x0)
y_out.append(y0)
constraint = list()
delta = 0.1
for i in range(self.LayerNo):
kk = 0
x_input = x_out[-1].view(-1, 1, 33, 33)
y_1 = self.A2[0](x_input)
y_2 = self.activate(y_1, self.delta[i])
dy = self.B[kk](y_2)
y_pred = y_out[-1] + self.sigma_step[i] * dy
y_pred = self.project_sig_y(y_pred, self.soft_thr[i], self.soft_a[i])
y_out.append(y_pred)
s_1 = self.BT[kk](y_out[-1])
s_2 = self.A2[kk](x_out[-1].view(-1, 1, 33, 33))
s_2 = self.dif_activate(s_2, self.delta[i])
s_3 = torch.mul(s_2, s_1)
s_4 = self.AT2[kk](s_3)
s_conv = s_4.view(-1, 1089)
dx = torch.mm(x_out[-1], PhiTPhi) - PhiTb + s_conv
x_pred = x_out[-1] - self.eta_step[i] * dx
x_out.append(x_pred)
constraint.append(self.A2[0].weight.data - self.AT2[0].weight.data.transpose(0, 1).contiguous())
constraint.append(self.B[0].weight.data - self.BT[0].weight.data.transpose(0, 1).contiguous())
return [x_out, constraint]
|
#!/usr/bin/python
import sys
import Adafruit_DHT
sensor = Adafruit_DHT.DHT11
# Example using a Raspberry Pi with DHT sensor
pin = 4
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
|
from django.urls import path
from lineab.views import LineaCreate, LineaList, LineaUpdate, LineaDelete, linea_list_total
app_name = 'lineab'
urlpatterns = [
path("registrar/", LineaCreate.as_view(), name="registrar_linea"),
path("listar/<int:id>", LineaList.as_view(), name="listar_linea"),
path('listaDeLineas/', linea_list_total, name= 'lista_total'),
path("editar/<int:pk>", LineaUpdate.as_view(), name="editar_linea"),
path("eliminar/<int:pk>", LineaDelete.as_view(), name="eliminar_linea"),
] |
# [Prefix-Sum]
# https://leetcode.com/problems/check-if-all-the-integers-in-a-range-are-covered/
# 1893. Check if All the Integers in a Range Are Covered
# History:
# 1.
# Jul 19, 2021
# You are given a 2D integer array ranges and two integers left and right.
# Each ranges[i] = [starti, endi] represents an inclusive interval between starti and endi.
#
# Return true if each integer in the inclusive range [left, right] is covered by at least one interval in ranges.
# Return false otherwise.
#
# An integer x is covered by an interval ranges[i] = [starti, endi] if starti <= x <= endi.
#
#
#
# Example 1:
#
# Input: ranges = [[1,2],[3,4],[5,6]], left = 2, right = 5
# Output: true
# Explanation: Every integer between 2 and 5 is covered:
# - 2 is covered by the first range.
# - 3 and 4 are covered by the second range.
# - 5 is covered by the third range.
# Example 2:
#
# Input: ranges = [[1,10],[10,20]], left = 21, right = 21
# Output: false
# Explanation: 21 is not covered by any range.
#
#
# Constraints:
#
# 1 <= ranges.length <= 50
# 1 <= starti <= endi <= 50
# 1 <= left <= right <= 50
class Solution(object):
def isCovered(self, ranges, left, right):
"""
:type ranges: List[List[int]]
:type left: int
:type right: int
:rtype: bool
"""
mem = [0] * 52
for r in ranges:
mem[r[0]] += 1
mem[r[1] + 1] -= 1
for i in range(1, 52):
mem[i] += mem[i - 1]
for i in range(left, right + 1):
if mem[i] == 0:
return False
return True
|
import logging
from django_filters import rest_framework as filters
from rest_framework import mixins, status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from sme_uniforme_apps.core.models import Uniforme
from sme_uniforme_apps.proponentes.api.serializers.loja_serializer import LojaCreateSerializer
from sme_uniforme_apps.proponentes.models import OfertaDeUniforme
from sme_uniforme_apps.proponentes.services import atualiza_coordenadas_lojas
from ..serializers.proponente_serializer import ProponenteSerializer, ProponenteCreateSerializer
from ...models import Proponente, ListaNegra, Loja
from ....utils.base64ToFile import base64ToFile
log = logging.getLogger(__name__)
class ProponentesViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericViewSet):
permission_classes = [AllowAny]
lookup_field = 'uuid'
queryset = Proponente.objects.all()
serializer_class = ProponenteSerializer
filter_backends = (filters.DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ('end_uf',)
ordering_fields = ('razao_social',)
search_fields = ('uuid', 'cnpj')
def get_queryset(self):
return self.queryset
def get_serializer_class(self):
if self.action in ['retrieve', 'list']:
return ProponenteSerializer
else:
return ProponenteCreateSerializer
@action(detail=True, methods=['patch'], url_path='atualiza-lojas')
def atualiza_lojas(self, request, uuid):
proponente = self.get_object()
lojas = request.data.pop('lojas')
ofertas_de_uniformes = request.data.pop('ofertas_de_uniformes')
if not lojas:
msgError = "Pelo menos uma loja precisa ser enviada!"
log.info(msgError)
raise ValidationError(msgError)
if not ofertas_de_uniformes:
msgError = "Pelo menos um oferta deve ser enviada!"
log.info(msgError)
raise ValidationError(msgError)
proponente.ofertas_de_uniformes.all().delete()
for oferta in ofertas_de_uniformes:
uniforme = Uniforme.objects.get(nome=oferta.get('nome'))
oferta_uniforme = OfertaDeUniforme(
proponente=proponente,
uniforme=uniforme,
preco=oferta.get('valor')
)
oferta_uniforme.save()
lojas_ids = []
for loja in lojas:
if loja.get('id', ''):
lojas_ids.append(loja.get('id'))
loja_obj = Loja.objects.get(id=loja.get('id', ''))
loja_obj.cep = loja.get('cep')
loja_obj.numero = loja.get('numero')
loja_obj.bairro = loja.get('bairro')
loja_obj.cidade = loja.get('cidade')
loja_obj.complemento = loja.get('complemento')
loja_obj.endereco = loja.get('endereco')
loja_obj.uf = loja.get('uf')
loja_obj.nome_fantasia = loja.get('nome_fantasia')
loja_obj.telefone = loja.get('telefone')
loja_obj.site = loja.get('site')
if loja.get('comprovante_endereco') is not None:
file = base64ToFile(loja.get('comprovante_endereco'))
loja_obj.comprovante_endereco.save('comprovante_endereco_loja.' + file['ext'], file['data'])
loja_obj.save()
else:
atributos_extras = ['proponente', 'uuid', 'id', 'email', 'criado_em',
'alterado_em', 'latitude', 'longitude', 'cidade',
'uf', 'firstName']
for attr in atributos_extras:
loja.pop(attr, '')
comprovante = loja.pop('comprovante_endereco', '')
loja_object = LojaCreateSerializer().create(loja)
file = base64ToFile(comprovante)
loja_object.comprovante_endereco.save('comprovante_endereco_loja.' + file['ext'], file['data'])
proponente.lojas.add(loja_object)
lojas_ids.append(loja_object.id)
atualiza_coordenadas_lojas(proponente.lojas)
for loja in proponente.lojas.all():
if loja.id not in lojas_ids:
proponente.lojas.remove(loja)
if proponente.status == Proponente.STATUS_CREDENCIADO:
proponente.status = Proponente.STATUS_ALTERADO
proponente.save()
return Response(ProponenteSerializer(proponente).data, status=status.HTTP_200_OK)
@action(detail=False, url_path='verifica-cnpj')
def verifica_cnpj(self, request):
cnpj = request.query_params.get('cnpj')
if cnpj:
result = {
'result': 'OK',
'cnpj_valido': 'Sim' if Proponente.cnpj_valido(cnpj) else 'Não',
'cnpj_cadastrado': 'Sim' if Proponente.cnpj_ja_cadastrado(cnpj) else 'Não',
'cnpj_bloqueado': 'Sim' if ListaNegra.cnpj_bloqueado(cnpj) else 'Não'
}
else:
result = {
'result': 'Erro',
'mensagem': 'Informe o cnpj na url. Ex: /proponentes/verifica-cnpj/?cnpj=53.894.798/0001-29'
}
return Response(result)
@action(detail=True, url_path='concluir-cadastro', methods=['patch'])
def concluir_cadastro(self, request, uuid):
try:
proponente = Proponente.concluir_cadastro(uuid)
except Exception as e:
return Response({"detail": e.__str__()}, status.HTTP_400_BAD_REQUEST)
serializer = ProponenteSerializer(proponente, many=False, context={'request': request})
return Response(serializer.data)
@action(detail=False, url_path='verifica-email')
def verifica_email(self, request):
email = request.query_params.get('email')
if email:
result = {
'result': 'OK',
'email_valido': 'Sim' if Proponente.email_valido(email) else 'Não',
'email_cadastrado': 'Sim' if Proponente.email_ja_cadastrado(email) else 'Não'
}
else:
result = {
'result': 'Erro',
'mensagem': 'Informe o email na url. Ex: /proponentes/verifica-email/?email=teste@teste.com'
}
return Response(result)
|
#!/usr/bin/python
# This does not do anything useful: it just wastes a random
# amount of time to produce a random result.
import os
import random
import sys
i = int(sys.argv[1])
DIR = os.path.expanduser('~/tmp/example')
resultfile = os.path.join(DIR, "result-%u" % i)
random.seed(i)
j = 0
K = 9
L1 = range(K)
L2 = range(K)
while True:
random.shuffle(L1)
j += 1
if L1 == L2:
break
print "%u: %u" % (i, j)
with open(resultfile, 'w') as f:
f.write('%u\n' % j)
|
import logging
import torch.nn as nn
import torchvision.models as tvmodels
logger = logging.getLogger(__name__)
from ..tresnet import TResnetM, TResnetL, TResnetXL
from ..vision_transformer import *
def create_model(args):
"""Create a model
"""
model_params = {'args': args, 'num_classes': args.num_classes}
args = model_params['args']
args.model_name = args.model_name.lower()
# get torch vision models
model_names = sorted(name for name in tvmodels.__dict__
if name.islower() and not name.startswith("__")
and callable(tvmodels.__dict__[name]))
print("torchvision models: \n", model_names)
# vit/deit models
vitmodeldict = {
'deit_tiny_patch16_224': deit_tiny_patch16_224,
'deit_small_patch16_224': deit_small_patch16_224,
'deit_base_patch16_224': deit_base_patch16_224,
}
vit_model_names = list(vitmodeldict.keys())
print("Vision Transformer models: \n", vit_model_names)
if args.model_name == 'tresnet_m':
model = TResnetM(model_params)
elif args.model_name == 'tresnet_l':
model = TResnetL(model_params)
elif args.model_name == 'tresnet_xl':
model = TResnetXL(model_params)
elif args.model_name in model_names:
logging.info("Use torchvision predefined model")
logging.info("=> using torchvision pre-trained model '{}'".format(args.model_name))
model = tvmodels.__dict__[args.model_name](pretrained=True,)
if model.fc.out_features != args.num_classes:
model.fc = nn.Linear(model.fc.in_features, args.num_classes)
elif args.model_name.startswith('deit_'):
logging.info("Use vision transformer (deit) model")
config = dict(
drop_rate=args.drop if hasattr(args, 'drop') else 0.0,
drop_path_rate=args.drop_path if hasattr(args, 'drop_path') else 0.2,
)
logging.info("=> using DEIT pre-trained model '{}'".format(args.model_name))
model = vitmodeldict[args.model_name](pretrained=True, **config)
if model.num_classes != args.num_classes:
model.head = nn.Linear(model.embed_dim, args.num_classes)
else:
print("model: {} not found !!".format(args.model_name))
exit(-1)
return model
|
import pickle
class SessionService:
def __init__(self):
self.SESSION_FILENAME = ".session"
def save_program_parameters(self, cmd):
with open(self.SESSION_FILENAME, 'wb') as f:
pickle.dump(cmd, f, pickle.DEFAULT_PROTOCOL)
def load_last_program_parameters(self):
with open(self.SESSION_FILENAME, "rb") as f:
return pickle.load(f)
|
from dataclasses import dataclass
from typing import Dict, List, Optional
@dataclass
class AdsManagerUpdateStructureCommand:
client_manager_id: str
client_customer_id: str
edit_details: List
campaign_id: Optional[str] = None
ad_group_id: Optional[str] = None
keyword_id: Optional[str] = None
|
import logging
import pickle
class HumanClassification:
'Class ensures that all human input wont be lost after changing for instance word tokenization.'
def __init__(self, pickle_filename):
self._logger = logging.getLogger()
self.filename = pickle_filename + '.pickle'
self.classification = {}
def load(self):
try:
filehandler = open(self.filename,'rb')
except IOError:
self._logger.warning('Pickle file ' + self.filename + ' does not exist, no previous classifications loaded')
return
self.classification = pickle.load(filehandler)
def store(self):
filehandler = open(self.filename, 'wb')
pickle.dump(self.classification, filehandler)
def to_xml(self, db, filename, language=None):
if language:
self._logger.info('Generating human input XML to file: ' + filename + '_' + language + '.xml ...')
f = open(filename + '_' + language + '.xml', 'w')
else:
self._logger.info('Generating human input XML to file: ' + filename + '.xml ...')
f = open(filename + '.xml', 'w')
print >> f, '<?xml version="1.0" encoding="UTF-8"?>'
print >> f, '<human_input>'
for entry_id in self.classification:
if language:
if list(self.classification[entry_id])[2] == language:
print >> f, ' <entry classification="' + str(list(self.classification[entry_id])[0]) + '" guid="' + str(list(self.classification[entry_id])[1]) + '" language="' + list(self.classification[entry_id])[3] + '" id="' + str(entry_id) + '">' + list(self.classification[entry_id])[2] + '</entry>'
else:
print >> f, ' <entry classification="' + str(list(self.classification[entry_id])[0]) + '" guid="' + str(list(self.classification[entry_id])[1]) + '" language="' + list(self.classification[entry_id])[3] + '" id="' + str(entry_id) + '">' + list(self.classification[entry_id])[2] + '</entry>'
print >> f, '</human_input>'
f.close()
def get_positively_classified_count(self, language):
count = 0
for id in self.classification:
tmp = list(self.classification[id])
if tmp[0] == True:
if tmp[3] == language:
count += 1
return count
def get_negatively_classified_count(self, language):
count = 0
for id in self.classification:
tmp = list(self.classification[id])
if tmp[0] == False:
if tmp[3] == language:
count += 1
return count |
import random
N = 15
L = 10.0
sigma = 0.1
n_configs = 100
for config in range(n_configs):
x = []
while len(x) < N:
x.append(random.uniform(sigma, L - sigma))
for k in range(len(x) - 1):
if abs(x[-1] - x[k]) < 2.0 * sigma:
x = []
break
print(x)
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
"""
модуль з функціями для лінійної апроксимації
розв'язання системи лінійних рівнянь проводиться методом квадратного кореня
"""
import numpy as np
import random
from numpy import sign
from math import sqrt
def print_system(a, b):
"""
Виводить на екран систему рівнянь.
:param a: numpy 2-вимірний масив коефіцієнтів
:param b: numpy масив вільних членів
"""
print("Система:")
for i in range(a.shape[0]):
# a.shape[0] - кількість рядків, a.shape[1] - кількість стовпчиків матриці
row = ["{}*x{}".format(a[i, j], j + 1) for j in range(a.shape[1])]
print(" + ".join(row), " = ", b[i])
print()
def tabulate_with_fault(func, a: float, b: float, n: int, eps: float):
"""
процедура табулювання функції з похибкою
:param func: функція
:param a: ліва межа відрізку
:param b: права межа відрізку
:param n: к-ть точок
:param eps: похибка фізичного експерименту
:return: x - масив numpy точок
y - масив numpy значень функції
"""
x = np.linspace(a, b, n)
eta = 1
while eta == 1:
eta = random.uniform(0, 1)
try:
y = func(x) + eta * eps
except Exception as e:
print("exception:", e)
y = np.zeros(n)
for i in range(n):
y[i] = func(x[i]) + eta * eps
return x, y
def linear_eq_system(x: np.ndarray, y: np.ndarray, n1: int, eps: float):
"""
створення системи лінійних рівнянь для апроксимації методом найменших квадратів
:param x: numpy масив точок функції
:param y: numpy масив значень функції в точках x
:param n1: к-ть ітерацій
:param eps:
:return:
"""
a = np.zeros((n1, n1)) # матриця коефіцієнтів
b = np.zeros(n1) # матриця вільних членів
p = eps ** (-2)
n2 = x.size
for m in range(n1):
for i in range(n2): # реалізація формули (2)
b[m] += p * y[i] * x[i] ** m
for j in range(n1):
for i in range(n2):
a[m, j] += p * x[i] ** (j + m)
return a, b
def _sds_decomposition(a: np.matrix):
"""
функція, яка розкладає матрицю А на добуток матриць S^T * D * S
:param a: матриця numpy
:return: tmp_s - матриця numpy (S)
tmp_d - матриця numpy (D)
"""
tmp_s = np.matrix(np.zeros(a.shape)) # створення матриць з нулями
tmp_d = np.matrix(np.zeros(a.shape))
for k in range(a.shape[0]): # поелементне знаходження всіх елементів
tmp_sum = 0
for i in range(k): # проміжний результат для формул
tmp_sum += tmp_d[i, i] * abs(tmp_s[i, k]) ** 2
tmp_res = a[k, k] - tmp_sum
tmp_d[k, k] = sign(tmp_res) # діагональний елемент матриці D
tmp_s[k, k] = sqrt(abs(tmp_res)) # діагональний елемент матриці S
for l in range(a.shape[0]): # заповнення інших елементів у рядку (верхньотрикутних)
if k + 1 <= l:
tmp_sum = sum(tmp_d[i, i] * tmp_s.transpose()[i, k] * tmp_s[i, l] for i in range(k))
tmp_s[k, l] = (a[k, l] - tmp_sum) / (tmp_s[k, k] * tmp_d[k, k])
print(tmp_s, '\n', tmp_d)
return tmp_s, tmp_d
def square_method(a: np.matrix, b: np.ndarray):
"""
розв'язує симетричну слр методом квадратного кореня
:param a: матриця коефіцієнтів
:param b: матриця вільних членів
:return: матриця невідомих
"""
tmp_s, tmp_d = _sds_decomposition(a) # розклад симетричної матриці
tmp_s_t = tmp_s.transpose() # транспонування
n = a.shape[0]
y = np.zeros(n) # створення допоміжної матриці з проміжними невідомими
y[0] = b[0] / (tmp_s[0, 0] * tmp_d[0, 0]) # заповнення матриці проміжних невідомих
for i in range(1, n):
y[i] = (b[i] - sum(tmp_d[l, l] * y[l] * tmp_s_t[l, i] for l in range(i))) / \
(tmp_s[i, i] * tmp_d[i, i])
x = np.zeros(n) # заповнення матриці кінцевих невідомих
x[n-1] = y[n-1] / tmp_s[n-1, n-1]
for i in range(n-1, -1, -1):
x[i] = (y[i] - sum(tmp_s[l, i] * x[l] for l in range(i + 1, n))) / (tmp_s[i, i])
return x
def result_func(coeff, x, n):
"""
рахує значення апроксимативної функції в точках
:param coeff: масив коефіцієнтів (корені слр)
:param x: масив точок
:param n: к-ть точок
:return: масив значень
"""
res = sum(coeff[i] * x ** i for i in range(n))
return res
if __name__ == '__main__':
test_a = np.matrix([[1, 2, 3], [2, 1, 2], [3, 2, 1]])
test_b = np.array([0, 3, 2])
result = square_method(test_a, test_b)
print(result)
|
import pygame
import pygame.midi
pygame.init()
pygame.midi.init()
for x in range(0, pygame.midi.get_count()):
print pygame.midi.get_device_info(x)
inp = pygame.midi.Input(1)
while True:
if inp.poll():
print inp.read(1000)
pygame.time.wait(10) |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 12:51:47 2017
@author: juang
"""
from difflib import SequenceMatcher
from utils import normalize, similar
from dateutil import parser
def Repetidos(Lista, Influ):
co=0
IndRep=list();
for m in range(1,len(Lista)):
for n in range(0,m):
if m not in IndRep:
if similar(Lista[m][2],Lista[n][2])>0.72:
try:
if Lista[m][1]==Lista[n][1] and Influ[Lista[m][1]][3]>=0:
IndRep.append(m)
except:
pass
Lista = [r for q, r in enumerate(Lista) if q not in IndRep] #elimina los indices de IndRep
return Lista |
from pandas.core.frame import DataFrame
import streamlit as st
import numpy as np
import pandas as pd
from PIL import Image
import time
st.sidebar.title("streamlit 入門")
st.write("ゆいこさん こんばんは")
st.write("プログレスバーの表示")
"start!!"
latest_iteration = st.empty()
bar = st.progress(0)
for i in range(100):
latest_iteration.text(f"iteration {i+1}")
bar.progress(i+1)
time.sleep(0.01)
"done!!!!"
st.write("interactive widgets")
left_column, right_column = st.columns(2)
button = left_column.button("右カラムに文字を表示")
if button:
right_column.write("ここは右カラム")
expander = st.expander("問い合わせ")
expander.write("問い合わせ内容書く")
text = st.sidebar.text_input("あなたの趣味を教えてください")
condition = st.sidebar.slider("あなたの今は調子は?", 0,100,50)
"あなたの趣味は", text
"コンディション:", condition
st.write("Display Image")
option = st.selectbox(
"あなたが好きな数字を教えてください",
list(range(1,11))
)
"あたなの好きな数字は", option, "です."
st.write("Dataframe")
df = pd.DataFrame({
"1列目":[1,2,3,4],
"2列目":[10,20,30,40]
})
st.table(df.style.highlight_max(axis=0))
df = pd.DataFrame(
np.random.rand(100,2)/[50,50] + [35.69,139.70],
columns = ["lat","lon"]
)
st.map(df)
"""
# 章
## 節
### 項
```python
import streamlit as st
import numpy as np
import pandas as pd
```
"""
|
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "operating-humor",
"metadata": {},
"outputs": [],
"source": [
"# USAGE\n",
"# python train_model.py --embeddings output/embeddings.pickle \\\n",
"#\t--recognizer output/recognizer.pickle --le output/le.pickle\n",
"\n",
"# import the necessary packages\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from sklearn.svm import SVC\n",
"import argparse\n",
"import pickle\n",
"\n",
"# construct the argument parser and parse the arguments\n",
"ap = argparse.ArgumentParser()\n",
"ap.add_argument(\"-e\", \"--embeddings\", required=True,\n",
"\thelp=\"path to serialized db of facial embeddings\")\n",
"ap.add_argument(\"-r\", \"--recognizer\", required=True,\n",
"\thelp=\"path to output model trained to recognize faces\")\n",
"ap.add_argument(\"-l\", \"--le\", required=True,\n",
"\thelp=\"path to output label encoder\")\n",
"args = vars(ap.parse_args())\n",
"\n",
"# load the face embeddings\n",
"print(\"[INFO] loading face embeddings...\")\n",
"data = pickle.loads(open(args[\"embeddings\"], \"rb\").read())\n",
"\n",
"# encode the labels\n",
"print(\"[INFO] encoding labels...\")\n",
"le = LabelEncoder()\n",
"labels = le.fit_transform(data[\"names\"])\n",
"\n",
"# train the model used to accept the 128-d embeddings of the face and\n",
"# then produce the actual face recognition\n",
"print(\"[INFO] training model...\")\n",
"recognizer = SVC(C=1.0, kernel=\"linear\", probability=True)\n",
"recognizer.fit(data[\"embeddings\"], labels)\n",
"\n",
"# write the actual face recognition model to disk\n",
"f = open(args[\"recognizer\"], \"wb\")\n",
"f.write(pickle.dumps(recognizer))\n",
"f.close()\n",
"\n",
"# write the label encoder to disk\n",
"f = open(args[\"le\"], \"wb\")\n",
"f.write(pickle.dumps(le))\n",
"f.close()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|
from django import forms
class admin_user_form(forms.Form):
error_css_class = 'error'
username=forms.CharField(max_length=90)
|
#!/usr/bin/python3
'''
Top-level script to start the deployment language processor ('depl')
Created on Oct 15, 2016
Arguments:
model : Name of deployment model file to be processed
@author: riaps
'''
from riaps.lang.depl import main
if __name__ == '__main__':
main(True)
|
from flask import Blueprint, request, jsonify
from api.models import User, Blacklist
from api.global_functions import response_message, get_user
from api.v1.validation import check_email, check_password, check_name
from flask_cors import CORS, cross_origin
auth = Blueprint('auth', __name__)
'''Implementing Register, login and user'''
CORS(auth)
@auth.route('/register', methods=["POST"])
def register():
requestData = request.get_json()
try:
name = check_name(requestData.get('name'))
email = check_email(requestData.get('email'))
password = check_password(requestData.get('password'))
except Exception as exception:
return response_message(exception.args, status_code=400)
#Check if email is already used
user = User.query.filter_by(email=email).first()
if user:
return response_message("email is already in use", status_code=400)
user = User(name, email, password)
user.save()
# return response_message("User %s has been registered successfully" % (name),status_code=201)
return response_message("User has been registered successfully",
status_code=201)
@auth.route('/login', methods=["POST"])
def login():
requestData = request.get_json()
try:
email = check_email(requestData.get('email'))
password = check_password(requestData.get('password'))
except Exception as exception:
return response_message(exception.args, status_code=400)
user = User.query.filter_by(email=email).first()
if not user:
return response_message("You are not registered. Please register before logging in", status_code=400)
if not user.is_correct_password(password):
return response_message("The email or password provided is wrong", status_code=401)
auth_token = user.encode_auth_token(user.id)
if auth_token:
res = {
"message": "You are now logged in as {}".format(user.name),
"auth_token": auth_token.decode(),
"user_id":user.id
}
return jsonify(res), 200
@auth.route('/change-password', methods=['POST'])
def change_password():
auth_token = request.headers.get("Authorization")
user = get_user(auth_token)
if not isinstance(user, User):
return response_message(user, 401)
requestData = request.get_json()
try:
new_password = check_password(requestData.get('new_password'))
except Exception:
return response_message("Enter a valid password", status_code=400)
user.set_password(new_password)
user.save()
return response_message("Password has been succesfully changed", status_code=200)
@auth.route('/reset-password', methods=['POST'])
def reset_password():
requestData = request.get_json()
try:
email = check_email(requestData.get('email'))
except Exception:
return response_message("Enter a valid email", status_code=400)
user = User.query.filter_by(email=email).first()
if user:
auth_token = user.encode_auth_token(user.id)
if auth_token:
link = "http://127.0.0.1:5000/reset-password/{}".format(auth_token.decode())
res = {
"message": "Reset your password from the provided token",
"link": link
}
return jsonify(res), 200
@auth.route('/reset-password/<token>', methods=['POST'])
def new_password(token):
requestData = request.get_json()
try:
new_password = check_password(requestData.get('new_password'))
except Exception:
return response_message("Enter a valid password", status_code=400)
user = get_user(token, split_token=False)
if not isinstance(user, User):
return response_message(user, 401)
user.set_password(new_password)
user.save()
return response_message("Password has been successfully changed", status_code=200)
@auth.route('/logout', methods=['GET'])
def logout():
auth_token = request.headers.get("Authorization")
user = get_user(auth_token)
if not isinstance(user, User):
return response_message(user, 401)
blacklist = Blacklist(auth_token)
blacklist.save()
return response_message("You have been logged out", status_code=200)
|
import sys
sys.path.append('utils/')
from data_utils import *
import argparse
parser = argparse.ArgumentParser(
description="Train a seq2seq model and save in the specified folder.")
parser.add_argument(
"-f",
dest="ori_file",
type=str)
parser.add_argument(
"-r",
dest="ref_file",
type=str)
parser.add_argument(
"-i",
dest="inp_file",
type=str)
args = parser.parse_args()
f = codecs.open(args.ori_file, 'r', 'utf-8')
lines = f.readlines()
f.close()
f = codecs.open(args.inp_file, 'r', 'utf-8')
inp_lines = f.readlines()
f.close()
findc, totalc = 0,0
for i in range(len(lines)):
s = ' <bos> ' + lines[i] + ' <eos> '
r = ' <bos> ' + inp_lines[i] + ' <eos> '
while(s.find(' <UNK>')!=-1):
ns = len(s)
st = s.find(' <UNK>')
if st == -1:
break
en = st+6
ucnt = 1
while en+6<=ns and s[en:en+6]==' <UNK>':
en = en+6
ucnt += 1
prev = s[s.rfind(' ', 0,st-1):st]
nxt = s[en:s.find(' ', en+1)]
rst, ren = 0,0
bfind = False
while(rst != -1):
rst = r.find(prev, rst)
if rst == -1:
break
rst += len(prev)
ren = rst
tcnt = ucnt
while (tcnt>0):
ren = r.find(' ', ren+1)
if ren == -1:
break
tcnt -= 1
if ren != -1 and r[ren: ren+len(nxt)]==nxt:
s = s[:st]+r[rst:ren]+s[en:]
bfind = True
break
if bfind:
findc += 1
else:
s = s[:st]+s[st:en].replace('<UNK>', '<unk>')+s[en:]
totalc += 1
lines[i] = s[7:-7].replace('<UNK>', '').replace('<unk>', '')
if len(lines[i].strip())==0:
lines[i]='.\n'
print(i, lines[i])
print('Rep/Total=%d/%d'%(findc, totalc))
f = codecs.open(args.ori_file, 'w', 'utf-8')
f.writelines(lines)
f.close()
command_s = command_s = './data/lang8/m2scorer/scripts/m2scorer.py %s %s'%(args.ori_file, args.ref_file)
print(os.popen(command_s).readlines())
|
"""empty message
Revision ID: 0bf2391a921d
Revises: 91c22e93edd7
Create Date: 2017-06-17 14:58:04.986219
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0bf2391a921d'
down_revision = '91c22e93edd7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chat', sa.Column('access_key', sa.String(length=256), nullable=True))
op.add_column('chat', sa.Column('is_private', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('chat', 'is_private')
op.drop_column('chat', 'access_key')
# ### end Alembic commands ###
|
import requests
import pprint
payload = {}
r = requests.get('https://api.acrcloud.com/v1/monitor-streams/11578/results?access_key=ACCESS_KEY&limit=5')
pprint.pprint(r.json()) |
import tensorflow as tf
import pretrain
import datasource
from bert_parts import layers
# config和pretrain的是一样时,可以读取预训练模型的bert层参数
config = {
'seq_max_len': 100,
'vocab_size': 7364,
'embedding_size': 128,
'num_transformer_layers': 6,
'num_attetion_heads': 8,
'intermediate_size': 32
}
# 数据准备
train_x, train_y = datasource.fake_data_gen_2(seq_max_len=config['seq_max_len'])
# bert层构建,和预训练模型中的相同
tiny_bert_layer = layers.BertLayer(vocab_size=config['vocab_size'],
embedding_size=config['embedding_size'],
num_transformer_layers=config['num_transformer_layers'],
num_attention_heads=config['num_attetion_heads'],
intermediate_size=config['intermediate_size'])
# 读取预训练模型中的bert层权重
bert_layer_weights = pretrain.bert_layer_weights(filename='pretrain_weights.h5')
# 模型
model = tf.keras.models.Sequential([
tiny_bert_layer,
tf.keras.layers.Lambda(lambda seq: seq[:, 0, :]),
tf.keras.layers.Dense(units=3, activation='softmax')
])
model.build(input_shape=(None, 100))
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy())
model.fit(train_x, train_y,
batch_size=5,
epochs=20,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=5,
restore_best_weights=True)])
model.save_weights('finetune_weights.h5', overwrite=True)
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import copy
import os
import time
import json
import logging
import torchvision
from torchvision import models, transforms
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from network import Discriminator
from network import Generator
np.random.seed(0)
torch.manual_seed(0)# 为CPU设置种子用于生成随机数,以使得结果是确定的
torch.cuda.manual_seed_all(0)# 为所有的GPU设置种子,以使得结果是确定的
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def sample(model, device, epoch):
generator = model[0]
generator.eval()
if mutil_gpu:
generator = generator.module
with torch.no_grad():
sampled_latent = torch.tensor(np.random.normal(0, 1, (16, latent_dim)), dtype=torch.float32)
sampled_latent = sampled_latent.to(device=device)
samples = generator(sampled_latent).cpu()
# print(samples)
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
if not os.path.exists('out_cifar/'):
os.makedirs('out_cifar/')
torchvision.utils.save_image(tensor = samples, nrow = 4, normalize = True, fp = 'out_cifar/{}.png'.format(str(epoch).zfill(3)))
# for i, sample in enumerate(samples):
# ax = plt.subplot(gs[i])
# plt.axis('off')
# ax.set_xticklabels([])
# ax.set_yticklabels([])
# ax.set_aspect('equal')
# plt.imshow(sample.reshape(-1, 32, 32).transpose([1, 2, 0]))
# plt.savefig('out_cifar/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight')
# plt.close(fig)
def train(model, optimizer, lr_scheduler, dataloaders, device, epochs):
generator = model[0]
discriminator = model[1]
optimizer_G = optimizer[0]
optimizer_D = optimizer[1]
for e in range(epochs):
for x, y in tqdm(dataloaders['train']):
generator.train()
discriminator.train()
valid = torch.ones((x.shape[0], 1), requires_grad=False)
fake = torch.zeros((x.shape[0], 1), requires_grad=False)
sampled_latent = torch.tensor(np.random.normal(0, 1, (x.shape[0], latent_dim)), dtype=torch.float32).to(device=device)
x = x.to(device=device)
valid = valid.to(device=device)
fake = fake.to(device=device)
generated_imgs = generator(sampled_latent)
ge_ = discriminator(generated_imgs)
gt_ = discriminator(x)
gen_loss = nn.BCELoss()(ge_, valid)
optimizer_G.zero_grad()
gen_loss.backward()
optimizer_G.step()
dis_loss = (nn.BCELoss()(discriminator(generated_imgs.detach()), fake) + nn.BCELoss()(gt_, valid)) / 2
optimizer_D.zero_grad()
dis_loss.backward()
optimizer_D.step()
if lr_scheduler:
lr_scheduler.step()
print('epoche %d, gen loss = %f, dis loss = %f' % (e, gen_loss.item(), dis_loss.item()))
logging.info('epoche %d, gen loss = %f, dis loss = %f' % (e, gen_loss.item(), dis_loss.item()))
sample(model, device, e)
writer.add_scalars("loss", {"GEN":gen_loss.item(), "DIS":dis_loss.item()}, e)
save_model(save_dir='model_checkpoint', file_name="check_point_G", model=generator, optimizer = optimizer_G, lr_scheduler = lr_scheduler)
save_model(save_dir='model_checkpoint', file_name="check_point_D", model=discriminator, optimizer = optimizer_D, lr_scheduler = lr_scheduler)
save_model(save_dir='model_checkpoint', file_name=task_name + "_G", model=generator, optimizer = optimizer_G, lr_scheduler = lr_scheduler)
save_model(save_dir='model_checkpoint', file_name=task_name + "_D", model=discriminator, optimizer = optimizer_D, lr_scheduler = lr_scheduler)
return model
def save_model(save_dir, model, optimizer, lr_scheduler, file_name=None):
if mutil_gpu:
model = model.module
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if file_name:
save_path = os.path.join(save_dir, file_name)
else:
save_path = os.path.join(save_dir, str(int(time.time())))
if lr_scheduler:
state_dicts = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": lr_scheduler.state_dict()
}
else:
state_dicts = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
torch.save(state_dicts, save_path + '.pkl')
def load_model(file_path, model, optimizer = None, lr_scheduler = None):
state_dicts = torch.load(file_path, map_location="cpu")
model.load_state_dict(state_dicts["model"])
if optimizer:
optimizer.load_state_dict(state_dicts["optimizer"])
if lr_scheduler:
lr_scheduler.load_state_dict(state_dicts["scheduler"])
task_name = "Vanila_GAN_on_CIFAR"
model_name = "Vanila_GAN"
optimizer_name = 'Adam'
lr = 0.0002
weight_decay = 1e-4
step_size = 200
gamma = 0.5
batch_size = 32
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
load_checkpoint = False
mutil_gpu = False
device_ids = [1, 2]
epochs = 1000
logging.basicConfig(filename="{}.log".format(task_name), level=logging.INFO)
logging.info(
"""{}:
- model name: {}
- optimizer: {}
- learning rate: {}
- weight_decay: {}
- step_size: {}
- gamma: {}
- batch size: {}
- device : {}
- epochs: {}
- load_checkpoint: {}
- mutil_gpu: {}
- gpus: {}
""".format(
task_name,
model_name,
optimizer_name,
lr,
weight_decay,
step_size,
gamma,
batch_size,
device,
epochs,
load_checkpoint,
mutil_gpu,
device_ids)
)
print("""{}:
- model name: {}
- optimizer: {}
- learning rate: {}
- weight_decay: {}
- step_size: {}
- gamma: {}
- batch size: {}
- device : {}
- epochs: {}
- load_checkpoint: {}
- mutil_gpu: {}
- gpus: {}
""".format(
task_name,
model_name,
optimizer_name,
lr,
weight_decay,
step_size,
gamma,
batch_size,
device,
epochs,
load_checkpoint,
mutil_gpu,
device_ids))
if __name__ == "__main__":
img_size = (32, 32, 3)
latent_dim = 128
writer = SummaryWriter()
discriminator = Discriminator(img_size = img_size)
generator = Generator(latent_dim = latent_dim, output_size = img_size)
optimizer_G = getattr(optim, optimizer_name)(generator.parameters(), lr=lr, betas = (0.5, 0.999))
optimizer_D = getattr(optim, optimizer_name)(discriminator.parameters(), lr=lr, betas = (0.5, 0.999))
# lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
# if load_checkpoint:
# load_model("model_checkpoint/check_point.pkl", model, optimizer, lr_scheduler)
if mutil_gpu:
discriminator = nn.DataParallel(discriminator, device_ids, device)
generator = nn.DataParallel(generator, device_ids, device)
discriminator = discriminator.to(device=device)
generator = generator.to(device=device)
mnist = torchvision.datasets.CIFAR10(root = "cifar10", train=True, download=True, transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
]
))
dataLoaders = {"train":torch.utils.data.DataLoader(mnist,
batch_size=batch_size, shuffle=True, num_workers= 12, pin_memory=True, drop_last=False)}
train(
model=(generator, discriminator),
optimizer=(optimizer_G, optimizer_D),
lr_scheduler=None,
dataloaders=dataLoaders,
device=device,
epochs=epochs
)
|
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=('title','title_tag','author','body','header_image')
widgets={
'title':forms.TextInput(attrs={'class':'form-control'}),
'title_tag':forms.TextInput(attrs={'class':'form-control'}),
'author':forms.TextInput(attrs={'class':'form-control','value':'','id':'neha','type':'hidden'}),
#'author':forms.Select(attrs={'class':'form-control'}),
'body':forms.Textarea(attrs={'class':'form-control'}),
} |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic import DetailView
from django.views.generic import ListView
from .models import Book
class BookListView(LoginRequiredMixin, ListView):
model = Book
context_object_name = 'book_list'
template_name = 'books/book_list.html'
login_url = 'account_login'
class BookDetailView(LoginRequiredMixin, PermissionRequiredMixin, DetailView):
model = Book
context_object_name = 'book'
template_name = 'books/book_detail.html'
login_url = 'account_login'
permission_required = 'books.special_status'
|
# Список неприемлемых слов
bad_words = [
'порно',
'porno',
'pron',
'прон',
'порнуха',
'секс',
'эротика',
'хуй',
'пизда',
'ебля',
'влагалище',
'ебал',
'ебет',
'ебать',
'шлюха',
'минет',
'сосет',
'трах',
'кончает',
'кончил',
'выебал',
'вагина',
'жесткое порно',
'анал',
'милфа',
'трахнул',
'инцест',
'голая',
'сиськи',
'шок контент',
'ебут',
'долбят',
'18+',
'18'
] |
from tkinter import *
import datetime
import pandas as pd
from generateWindow_5 import *
from generateWindow_6 import *
# stallList = pd.read_csv('stallList.csv')
allStallMenu = pd.read_csv('stallMenu.csv')
# Author: Le Quang Anh
def showMenu(frame, stallMenu, meal):
''' Input:
1) frame: the frame to show the menu items in (tkinter.Frame() object)
2) stallMenu: the menu for the particular store
3) meal: breakfast or lunch or dinner
Output: show the dishes available for that meal
'''
stallMenuParticularMeal = stallMenu[stallMenu['Availability'] == meal]
numRow = stallMenuParticularMeal.shape[0]
# print(meal)
for i in range(numRow):
dishLabel = Label(frame, text=meal+' dishes')
dishLabel.grid(row=0, column=0)
priceLabel = Label(frame, text='Price ($)')
priceLabel.grid(row=0, column=1)
row = stallMenu.iloc[i]
dishName = row[1]
dishPrice = row[2]
dishNameLabel = Label(frame, text=dishName)
dishNameLabel.grid(row=i+1, column=0, sticky=W)
dishPriceLabel = Label(frame, text=dishPrice)
dishPriceLabel.grid(row=i+1, column=1, sticky=W)
# Author: Le Quang Anh
def generateWindow_4(userDatePara, userTimePara, stallName, statusMealTime, operatingTimeButtonFunction=generateWindow_5):
'''
Input:
1) stallIndex is index of stall in the file
2) userDatePara and userTimePara are parameters to determine if store is open or closed and available menu
3) operatingTimeButtonFunction is the function to call when the operatingTimeButton is pressed
Output:
1) window_4 is created to show information of chosen store
2) operatingTime
'''
window_4 = Toplevel()
window_4.title(stallName)
particularStallMenu = allStallMenu[allStallMenu['Stall'] == stallName]
topFrame = Frame(window_4)
topFrame.pack()
bottomFrame = Frame(window_4)
bottomFrame.pack()
if statusMealTime != 'Closed':
showMenu(frame=topFrame, stallMenu=particularStallMenu, meal=statusMealTime)
waitingTimeButton = Button(bottomFrame, text='Estimate waiting time', padx=10, pady=5, bg="yellow", \
fg="black", command=generateWindow_6)
waitingTimeButton.pack()
else:
closedLabel = Label(topFrame, text='This store is currently closed. Would you like to check the operating time?')
closedLabel.pack()
operatingTimeButtonCommand = lambda: generateWindow_5(stallName=stallName)
operatingTimeButton = Button(bottomFrame, text='Check operating time', padx=10, pady=5, bg="purple", fg="white", \
command=operatingTimeButtonCommand)
operatingTimeButton.pack()
|
import itertools
T = int(raw_input())
Trees = {}
def generate_match(winner, N):
def get_next_level_element(el):
if el == 'R':
return ['S', 'R']
elif el == 'S':
return ['S', 'P']
elif el == 'P':
return ['P', 'R']
def get_next_level(cur_level):
next_level = [get_next_level_element(x) for x in cur_level]
return list(itertools.chain.from_iterable(next_level))
cur_level = [winner]
for i in xrange(0, N):
cur_level = get_next_level(cur_level)
return cur_level
def count_match(match):
counter = {}
for x in match:
if not x in counter:
counter[x] = 0
counter[x] += 1
return counter
def special_sort(match):
smatch = match[:]
while len(smatch) > 1:
for i in xrange(0, len(smatch), 2):
if smatch[i] > smatch[i+1]:
temp = smatch[i]
smatch[i] = smatch[i+1]
smatch[i+1] = temp
smatch = [smatch[i] + smatch[i+1] for i in xrange(0, len(smatch), 2)]
return [x for x in smatch[0]]
for t in xrange(1, T+1):
NRPS = raw_input().strip().split()
N,R,P,S = tuple([int(x) for x in NRPS])
solution_match = None
#Case where last winner is R
match = generate_match('R', N)
match_counter = count_match(match)
if match_counter.get('R',0) == R and match_counter.get('P',0) == P and match_counter.get('S',0) == S:
if solution_match == None or solution_match > match:
solution_match = match
#Case where last winner is S
match = generate_match('S', N)
match_counter = count_match(match)
if match_counter.get('R',0) == R and match_counter.get('P',0) == P and match_counter.get('S',0) == S:
if solution_match == None or solution_match > match:
solution_match = match
#Case where last winner is P
match = generate_match('P', N)
match_counter = count_match(match)
if match_counter.get('R',0) == R and match_counter.get('P',0) == P and match_counter.get('S',0) == S:
if solution_match == None or solution_match > match:
solution_match = match
if solution_match != None:
solution_match = special_sort(solution_match)
print('Case #%d: %s' %(t, ''.join(solution_match)))
else:
print('Case #%d: IMPOSSIBLE' %(t))
|
# -*- coding: utf-8 -*-
"""Unit-tests for pyfun/core/chebfun.py"""
from __future__ import division
from operator import __add__
from operator import __mul__
from operator import __neg__
from operator import __pos__
from operator import __sub__
from operator import truediv
binops = [__add__, __mul__, __sub__, truediv]
try:
# for Python 2 we need to test div separately
from operator import __div__
binops.append(__div__)
div_binops = (__div__, truediv)
except ImportError:
# Python 3
div_binops = (truediv,)
from unittest import TestCase
from itertools import combinations
from numpy import arccos
from numpy import arccosh
from numpy import arcsin
from numpy import arcsinh
from numpy import arctan
from numpy import arctanh
from numpy import cos
from numpy import cosh
from numpy import exp
from numpy import exp2
from numpy import expm1
from numpy import sin
from numpy import sinh
from numpy import tan
from numpy import tanh
from numpy import log
from numpy import log2
from numpy import log10
from numpy import log1p
from numpy import sqrt
from numpy import ndarray
from numpy import array
from numpy import arange
from numpy import append
from numpy import sum
from numpy import abs
from numpy import pi
from numpy import linspace
from numpy import maximum
from numpy import minimum
from numpy import equal
from numpy import isscalar
from numpy import isfinite
from numpy.random import rand
from matplotlib.pyplot import subplots
from chebpy.core.bndfun import Bndfun
from chebpy.core.chebfun import Chebfun
from chebpy.core.settings import DefaultPrefs
from chebpy.core.utilities import Domain
from chebpy.core.utilities import Interval
from chebpy.core.exceptions import IntervalGap
from chebpy.core.exceptions import IntervalOverlap
from chebpy.core.exceptions import BadDomainArgument
from chebpy.core.exceptions import BadFunLengthArgument
from chebpy import chebfun
from tests.utilities import infnorm
from tests.utilities import testfunctions
eps = DefaultPrefs.eps
class Construction(TestCase):
def setUp(self):
f = lambda x: exp(x)
self.f = f
self.fun0 = Bndfun.initfun_adaptive(f, Interval(-1,0))
self.fun1 = Bndfun.initfun_adaptive(f, Interval(0,1))
self.fun2 = Bndfun.initfun_adaptive(f, Interval(-.5,0.5))
self.fun3 = Bndfun.initfun_adaptive(f, Interval(2,2.5))
self.fun4 = Bndfun.initfun_adaptive(f, Interval(-3,-2))
self.funs_a = array([self.fun1, self.fun0, self.fun2])
self.funs_b = array([self.fun1, self.fun2])
self.funs_c = array([self.fun0, self.fun3])
self.funs_d = array([self.fun1, self.fun4])
def test__init__pass(self):
Chebfun([self.fun0])
Chebfun([self.fun1])
Chebfun([self.fun2])
Chebfun([self.fun0, self.fun1])
def test__init__fail(self):
self.assertRaises(IntervalOverlap, Chebfun, self.funs_a)
self.assertRaises(IntervalOverlap, Chebfun, self.funs_b)
self.assertRaises(IntervalGap, Chebfun, self.funs_c)
self.assertRaises(IntervalGap, Chebfun, self.funs_d)
def test_initempty(self):
emptyfun = Chebfun.initempty()
self.assertEqual(emptyfun.funs.size, 0)
def test_initconst(self):
self.assertTrue(Chebfun.initconst(1, [-1,1]).isconst)
self.assertTrue(Chebfun.initconst(-10, linspace(-1,1,11)).isconst)
self.assertTrue(Chebfun.initconst(3, [-2,0,1]).isconst)
self.assertTrue(Chebfun.initconst(3.14, linspace(-100,-90,11)).isconst)
self.assertFalse(Chebfun([self.fun0]).isconst)
self.assertFalse(Chebfun([self.fun1]).isconst)
self.assertFalse(Chebfun([self.fun2]).isconst)
self.assertFalse(Chebfun([self.fun0, self.fun1]).isconst)
def test_initidentity(self):
_doms = (
linspace(-1,1,2),
linspace(-1,1,11),
linspace(-10,17,351),
linspace(-9.3,-3.2,22),
linspace(2.5,144.3,2112),
)
for _dom in _doms:
ff = Chebfun.initidentity(_dom)
a, b = ff.support
xx = linspace(a, b, 1001)
tol = eps * ff.hscale
self.assertLessEqual(infnorm(ff(xx)-xx), tol)
# test the default case
ff = Chebfun.initidentity()
a, b = ff.support
xx = linspace(a, b, 1001)
tol = eps * ff.hscale
self.assertLessEqual(infnorm(ff(xx)-xx), tol)
def test_initfun_adaptive_continuous_domain(self):
ff = Chebfun.initfun_adaptive(self.f, [-2,-1])
self.assertEqual(ff.funs.size, 1)
a, b = ff.breakdata.keys()
fa, fb, = ff.breakdata.values()
self.assertEqual(a,-2)
self.assertEqual(b,-1)
self.assertLessEqual(abs(fa-self.f(-2)), eps)
self.assertLessEqual(abs(fb-self.f(-1)), eps)
def test_initfun_adaptive_piecewise_domain(self):
ff = Chebfun.initfun_adaptive(self.f, [-2,0,1])
self.assertEqual(ff.funs.size, 2)
a, b, c = ff.breakdata.keys()
fa, fb, fc = ff.breakdata.values()
self.assertEqual(a,-2)
self.assertEqual(b, 0)
self.assertEqual(c, 1)
self.assertLessEqual(abs(fa-self.f(-2)), eps)
self.assertLessEqual(abs(fb-self.f( 0)), eps)
self.assertLessEqual(abs(fc-self.f( 1)), 2*eps)
def test_initfun_adaptive_raises(self):
initfun = Chebfun.initfun_adaptive
self.assertRaises(BadDomainArgument, initfun, self.f, [-2])
self.assertRaises(BadDomainArgument, initfun, self.f, domain=[-2])
self.assertRaises(BadDomainArgument, initfun, self.f, domain=None)
def test_initfun_fixedlen_continuous_domain(self):
ff = Chebfun.initfun_fixedlen(self.f, 20, [-2,-1])
self.assertEqual(ff.funs.size, 1)
a, b = ff.breakdata.keys()
fa, fb, = ff.breakdata.values()
self.assertEqual(a,-2)
self.assertEqual(b,-1)
self.assertLessEqual(abs(fa-self.f(-2)), eps)
self.assertLessEqual(abs(fb-self.f(-1)), eps)
def test_initfun_fixedlen_piecewise_domain_0(self):
ff = Chebfun.initfun_fixedlen(self.f, 30, [-2,0,1])
self.assertEqual(ff.funs.size, 2)
a, b, c = ff.breakdata.keys()
fa, fb, fc = ff.breakdata.values()
self.assertEqual(a,-2)
self.assertEqual(b, 0)
self.assertEqual(c, 1)
self.assertLessEqual(abs(fa-self.f(-2)), 3*eps)
self.assertLessEqual(abs(fb-self.f( 0)), 3*eps)
self.assertLessEqual(abs(fc-self.f( 1)), 3*eps)
def test_initfun_fixedlen_piecewise_domain_1(self):
ff = Chebfun.initfun_fixedlen(self.f, [30,20], [-2,0,1])
self.assertEqual(ff.funs.size, 2)
a, b, c = ff.breakdata.keys()
fa, fb, fc = ff.breakdata.values()
self.assertEqual(a,-2)
self.assertEqual(b, 0)
self.assertEqual(c, 1)
self.assertLessEqual(abs(fa-self.f(-2)), 3*eps)
self.assertLessEqual(abs(fb-self.f( 0)), 3*eps)
self.assertLessEqual(abs(fc-self.f( 1)), 6*eps)
def test_initfun_fixedlen_raises(self):
initfun = Chebfun.initfun_fixedlen
self.assertRaises(BadDomainArgument, initfun, self.f, 10, [-2])
self.assertRaises(BadDomainArgument, initfun, self.f, n=10, domain=[-2])
self.assertRaises(BadDomainArgument, initfun, self.f, n=10, domain=None)
self.assertRaises(BadFunLengthArgument, initfun, self.f, [30,40], [-1,1])
self.assertRaises(TypeError, initfun, self.f, None, [-1,1])
def test_initfun_fixedlen_succeeds(self):
self.assertTrue(Chebfun.initfun_fixedlen(self.f, [], [-2,-1,0]).isempty)
# check that providing a vector with None elements calls the
# Tech adaptive constructor
g0 = Chebfun.initfun_adaptive(self.f, [-2,-1,0])
g1 = Chebfun.initfun_fixedlen(self.f, [None,None], [-2,-1,0])
g2 = Chebfun.initfun_fixedlen(self.f, [None,40], [-2,-1,0])
for fun1, fun2 in zip(g1,g0):
self.assertEqual(sum(fun1.coeffs-fun2.coeffs), 0)
self.assertEqual(sum(g2.funs[0].coeffs-g0.funs[0].coeffs), 0)
class Properties(TestCase):
def setUp(self):
self.f0 = Chebfun.initempty()
self.f1 = Chebfun.initfun_adaptive(lambda x: x**2, [-1,1])
self.f2 = Chebfun.initfun_adaptive(lambda x: x**2, [-1,0,1,2])
def test_breakpoints(self):
self.assertEqual(self.f0.breakpoints.size, 0)
self.assertTrue(equal(self.f1.breakpoints,[-1,1]).all())
self.assertTrue(equal(self.f2.breakpoints,[-1,0,1,2]).all())
def test_domain(self):
d1 = Domain([-1,1])
d2 = Domain([-1,0,1,2])
self.assertIsInstance(self.f0.domain, ndarray)
self.assertIsInstance(self.f1.domain, Domain)
self.assertIsInstance(self.f2.domain, Domain)
self.assertEqual(self.f0.domain.size, 0)
self.assertEqual(self.f1.domain, d1)
self.assertEqual(self.f2.domain, d2)
def test_hscale(self):
self.assertEqual(self.f0.hscale, 0)
self.assertEqual(self.f1.hscale, 1)
self.assertEqual(self.f2.hscale, 2)
def test_isempty(self):
self.assertTrue(self.f0.isempty)
self.assertFalse(self.f1.isempty)
self.assertFalse(self.f2.isempty)
def test_isconst(self):
self.assertFalse(self.f0.isconst)
self.assertFalse(self.f1.isconst)
self.assertFalse(self.f2.isconst)
c1 = Chebfun.initfun_fixedlen(lambda x: 0*x+3, 1, [-2,-1,0,1,2,3])
c2 = Chebfun.initfun_fixedlen(lambda x: 0*x-1, 1, [-2,3])
self.assertTrue(c1.isconst)
self.assertTrue(c2.isconst)
def test_support(self):
self.assertIsInstance(self.f0.support, ndarray)
self.assertIsInstance(self.f1.support, ndarray)
self.assertIsInstance(self.f2.support, ndarray)
self.assertEqual(self.f0.support.size, 0)
self.assertTrue(equal(self.f1.support,[-1,1]).all())
self.assertTrue(equal(self.f2.support,[-1,2]).all())
def test_vscale(self):
self.assertEqual(self.f0.vscale, 0)
self.assertEqual(self.f1.vscale, 1)
self.assertEqual(self.f2.vscale, 4)
class ClassUsage(TestCase):
def setUp(self):
self.f0 = Chebfun.initempty()
self.f1 = Chebfun.initfun_adaptive(lambda x: x**2, [-1,1])
self.f2 = Chebfun.initfun_adaptive(lambda x: x**2, [-1,0,1,2])
def test_copy(self):
f0_copy = self.f0.copy()
f1_copy = self.f1.copy()
f2_copy = self.f2.copy()
self.assertTrue(f0_copy.isempty)
self.assertEquals(f1_copy.funs.size, 1)
for k in range(self.f1.funs.size):
fun = self.f1.funs[k]
funcopy = f1_copy.funs[k]
self.assertNotEqual(fun, funcopy)
self.assertEquals(sum(fun.coeffs-funcopy.coeffs), 0)
for k in range(self.f2.funs.size):
fun = self.f2.funs[k]
funcopy = f2_copy.funs[k]
self.assertNotEqual(fun, funcopy)
self.assertEquals(sum(fun.coeffs-funcopy.coeffs), 0)
def test__iter__(self):
for f in [self.f0, self.f1, self.f2]:
a1 = [x for x in f]
a2 = [x for x in f.funs]
self.assertTrue(equal(a1,a2).all())
def test_x_property(self):
_doms = (
linspace(-1,1,2),
linspace(-1,1,11),
linspace(-9.3,-3.2,22),
)
for _dom in _doms:
f = Chebfun.initfun_fixedlen(sin, 1000, _dom)
x = f.x
a, b = x.support
pts = linspace(a, b, 1001)
tol = eps * f.hscale
self.assertLessEqual(infnorm(x(pts)-pts), tol)
def test_restrict_(self):
# test a variety of domains with breaks
doms = [(-4,4), (-4,0,4), (-2,-1, 0.3, 1, 2.5)]
for dom in doms:
ff = Chebfun.initfun_fixedlen(cos, 25, domain=dom)
# define some arbitrary subdomains
yy = linspace(dom[0], dom[-1], 11)
subdoms = [yy, yy[2:7], yy[::2]]
for subdom in subdoms:
xx = linspace(subdom[0], subdom[-1], 1001)
gg = ff._restrict(subdom)
vscl = ff.vscale
hscl = ff.hscale
lscl = max([fun.size for fun in ff])
tol = vscl*hscl*lscl*eps
# sample the restricted function and comapre with original
self.assertLessEqual(infnorm(ff(xx)-gg(xx)), tol)
# check there are at least as many funs as subdom elements
self.assertGreaterEqual(len(gg.funs), len(subdom)-1)
for fun in gg:
# chec each fun has length 25
self.assertEqual(fun.size, 25)
def test_restrict__empty(self):
self.assertTrue(self.f0._restrict([-1,1]).isempty)
def test_simplify(self):
dom = linspace(-2,1.5,13)
f = chebfun(cos, dom, 70).simplify()
g = chebfun(cos, dom)
self.assertEquals(f.domain, g.domain)
for n, fun in enumerate(f):
# we allow one degree of freedom difference
# TODO: check this
self.assertLessEqual(fun.size-g.funs[n].size, 1)
def test_simplify_empty(self):
self.assertTrue(self.f0.simplify().isempty)
def test_restrict(self):
dom1 = Domain(linspace(-2,1.5,13))
dom2 = Domain(linspace(-1.7,0.93,17))
dom3 = dom1.merge(dom2).restrict(dom2)
f = chebfun(cos, dom1).restrict(dom2)
g = chebfun(cos, dom3)
self.assertEquals(f.domain, g.domain)
for n, fun in enumerate(f):
# we allow two degrees of freedom difference either way
# TODO: once standard chop is fixed, may be able to reduce 4 to 0
self.assertLessEqual(fun.size-g.funs[n].size, 4)
def test_restrict_empty(self):
self.assertTrue(self.f0.restrict([-1,1]).isempty)
class Algebra(TestCase):
def setUp(self):
self.emptyfun = Chebfun.initempty()
self.yy = -1 + 2*rand(1000)
# check +(empty Chebfun) = (empty Chebfun)
def test__pos__empty(self):
self.assertTrue((+self.emptyfun).isempty)
# check -(empty Chebfun) = (empty Chebfun)
def test__neg__empty(self):
self.assertTrue((-self.emptyfun).isempty)
# check (empty Chebfun) + (Chebfun) = (empty Chebfun)
# and (Chebfun) + (empty Chebfun) = (empty Chebfun)
def test__add__radd__empty(self):
for (f, _, _) in testfunctions:
for dom, _ in chebfun_testdomains:
a, b = dom
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 13))
self.assertTrue((self.emptyfun+ff).isempty)
self.assertTrue((ff+self.emptyfun).isempty)
# check the output of (constant + Chebfun)
# and (Chebfun + constant)
def test__add__radd__constant(self):
for (f, _, _) in testfunctions:
for c in (-1, 1, 10, -1e5):
for dom, _ in chebfun_testdomains:
a, b = dom
xx = linspace(a, b, 1001)
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 11))
g = lambda x: c + f(x)
gg1 = c + ff
gg2 = ff + c
vscl = ff.vscale
hscl = ff.hscale
lscl = max([fun.size for fun in ff])
tol = 2*abs(c)*vscl*hscl*lscl*eps
self.assertLessEqual(infnorm(g(xx)-gg1(xx)), tol)
self.assertLessEqual(infnorm(g(xx)-gg2(xx)), tol)
# check (empty Chebfun) - (Chebfun) = (empty Chebfun)
# and (Chebfun) - (empty Chebfun) = (empty Chebfun)
def test__sub__rsub__empty(self):
for (f, _, _) in testfunctions:
for dom, _ in chebfun_testdomains:
a, b = dom
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 13))
self.assertTrue((self.emptyfun-ff).isempty)
self.assertTrue((ff-self.emptyfun).isempty)
# check the output of (constant - Chebfun)
# and (Chebfun - constant)
def test__sub__rsub__constant(self):
for (f, _, _) in testfunctions:
for c in (-1, 1, 10, -1e5):
for dom, _ in chebfun_testdomains:
a, b = dom
xx = linspace(a, b, 1001)
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 11))
g = lambda x: c - f(x)
gg1 = c - ff
gg2 = ff - c
vscl = ff.vscale
hscl = ff.hscale
lscl = max([fun.size for fun in ff])
tol = 2*abs(c)*vscl*hscl*lscl*eps
self.assertLessEqual(infnorm(g(xx)-gg1(xx)), tol)
self.assertLessEqual(infnorm(-g(xx)-gg2(xx)), tol)
# check (empty Chebfun) * (Chebfun) = (empty Chebfun)
# and (Chebfun) * (empty Chebfun) = (empty Chebfun)
def test__mul__rmul__empty(self):
for (f, _, _) in testfunctions:
for dom, _ in chebfun_testdomains:
a, b = dom
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 13))
self.assertTrue((self.emptyfun*ff).isempty)
self.assertTrue((ff*self.emptyfun).isempty)
# check the output of (constant * Chebfun)
# and (Chebfun * constant)
def test__mul__rmul__constant(self):
for (f, _, _) in testfunctions:
for c in (-1, 1, 10, -1e5):
for dom, _ in chebfun_testdomains:
a,b = dom
xx = linspace(a, b, 1001)
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 11))
g = lambda x: c * f(x)
gg1 = c * ff
gg2 = ff * c
vscl = ff.vscale
hscl = ff.hscale
lscl = max([fun.size for fun in ff])
tol = 2*abs(c)*vscl*hscl*lscl*eps
self.assertLessEqual(infnorm(g(xx)-gg1(xx)), tol)
self.assertLessEqual(infnorm(g(xx)-gg2(xx)), tol)
# check (empty Chebfun) / (Chebfun) = (empty Chebfun)
# and (Chebfun) / (empty Chebfun) = (empty Chebfun)
def test_truediv_empty(self):
for (f, _, _) in testfunctions:
for dom, _ in chebfun_testdomains:
a, b = dom
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 13))
self.assertTrue((self.emptyfun/ff).isempty)
self.assertTrue((ff/self.emptyfun).isempty)
# check the output of (constant / Chebfun)
# and (Chebfun / constant)
def test_truediv_constant(self):
for (f, _, hasRoots) in testfunctions:
for c in (-1, 1, 10, -1e5):
for dom, _ in chebfun_testdomains:
a,b = dom
xx = linspace(a, b, 1001)
ff = Chebfun.initfun_adaptive(f, linspace(a, b, 11))
g = lambda x: f(x) / c
gg = ff / c
vscl = gg.vscale
hscl = gg.hscale
lscl = max([fun.size for fun in gg])
tol = 2*abs(c)*vscl*hscl*lscl*eps
self.assertLessEqual(infnorm(g(xx)-gg(xx)), tol)
# don't do the following test for functions with roots
if not hasRoots:
h = lambda x: c / f(x)
hh = c / ff
vscl = hh.vscale
hscl = hh.hscale
lscl = max([fun.size for fun in hh])
tol = 2*abs(c)*vscl*hscl*lscl*eps
self.assertLessEqual(infnorm(h(xx)-hh(xx)), tol)
# domain, test_tolerance
chebfun_testdomains = [
([-1,1], 2*eps),
([-2,1], eps),
([-1,2], eps),
([-5,9], 35*eps),
]
# add tests for the binary operators
def binaryOpTester(f, g, binop, dom, tol):
a, b = dom
xx = linspace(a,b,1001)
n, m = 3, 8
ff = Chebfun.initfun_adaptive(f, linspace(a,b,n+1))
gg = Chebfun.initfun_adaptive(g, linspace(a,b,m+1))
FG = lambda x: binop(f(x), g(x))
fg = binop(ff, gg)
def tester(self):
vscl = max([ff.vscale, gg.vscale])
hscl = max([ff.hscale, gg.hscale])
lscl = max([fun.size for fun in append(ff.funs, gg.funs)])
self.assertEqual(ff.funs.size, n)
self.assertEqual(gg.funs.size, m)
self.assertEqual(fg.funs.size, n+m-1)
self.assertLessEqual(infnorm(fg(xx)-FG(xx)), vscl*hscl*lscl*tol)
return tester
for binop in binops:
for (f, _, _), (g, _, denomHasRoots) in combinations(testfunctions, 2):
for dom, tol in chebfun_testdomains:
if binop in div_binops and denomHasRoots:
# skip truediv test if denominator has roots on the real line
pass
else:
_testfun_ = binaryOpTester(f, g, binop, dom, 2*tol)
a, b = dom
binopname = binop.__name__
# case of truediv: add leading and trailing underscores
if binopname[0] != '_':
binopname = '_' + binopname
if binopname[-1] != '_':
binopname = binopname + '_'
_testfun_.__name__ = \
"test{}{}_{}_[{:.0f},..,{:.0f}]".format(
binopname, f.__name__, g.__name__, a, b)
setattr(Algebra, _testfun_.__name__, _testfun_)
# add tests for the unary operators
def unaryOpTester(f, unaryop, dom, tol):
a, b = dom
xx = linspace(a,b,1001)
ff = Chebfun.initfun_adaptive(f, linspace(a,b,9))
GG = lambda x: unaryop(f(x))
gg = unaryop(ff)
def tester(self):
vscl = ff.vscale
hscl = ff.hscale
lscl = max([fun.size for fun in ff])
self.assertEqual(ff.funs.size, gg.funs.size)
self.assertLessEqual(infnorm(gg(xx)-GG(xx)), vscl*hscl*lscl*tol)
return tester
unaryops = (
__pos__,
__neg__,
)
for unaryop in unaryops:
for (f, _, _) in testfunctions:
for dom, tol in chebfun_testdomains:
_testfun_ = unaryOpTester(f, unaryop, dom, tol)
_testfun_.__name__ = \
"test{}{}_[{:.0f},..,{:.0f}]".format(
unaryop.__name__, f.__name__, dom[0], dom[1])
setattr(Algebra, _testfun_.__name__, _testfun_)
class Ufuncs(TestCase):
def setUp(self):
self.emptyfun = Chebfun.initempty()
self.yy = -1 + 2*rand(1000)
ufuncs = (
arccos, arccosh, arcsin, arcsinh, arctan, arctanh, cos, cosh, exp, exp2,
expm1, log, log2, log10, log1p, sinh, sin, tan, tanh, sqrt,
)
# empty-case tests
def ufuncEmptyCaseTester(ufunc):
def tester(self):
self.assertTrue(ufunc(self.emptyfun).isempty)
return tester
for ufunc in ufuncs:
_testfun_ = ufuncEmptyCaseTester(ufunc)
_testfun_.__name__ = "test_emptycase_{}".format(ufunc.__name__)
setattr(Ufuncs, _testfun_.__name__, _testfun_)
# TODO: Add more test cases
# add ufunc tests:
# (ufunc, [([fun1, interval1], tol1), ([fun2, interval2], tol2), ... ])
uf1 = lambda x: x
uf1.__name__ = "x"
ufunc_test_params = [
(arccos, [([uf1, (-.8,.8)], eps), ]),
(arccosh, [([uf1, (2,3) ], eps), ]),
(arcsin, [([uf1, (-.8,.8)], eps), ]),
(arcsinh, [([uf1, (2,3) ], eps), ]),
(arctan, [([uf1, (-.8,.8)], eps), ]),
(arctanh, [([uf1, (-.8,.8)], eps), ]),
(cos, [([uf1, (-3,3) ], eps), ]),
(cosh, [([uf1, (-3,3) ], eps), ]),
(exp, [([uf1, (-3,3) ], eps), ]),
(exp2, [([uf1, (-3,3) ], eps), ]),
(expm1, [([uf1, (-3,3) ], eps), ]),
(log, [([uf1, (2,3) ], eps), ]),
(log2, [([uf1, (2,3) ], eps), ]),
(log10, [([uf1, (2,3) ], eps), ]),
(log1p, [([uf1, (-.8,.8)], eps), ]),
(sinh, [([uf1, (-3,3) ], eps), ]),
(sin, [([uf1, (-3,3) ], eps), ]),
(tan, [([uf1, (-.8,.8)], eps), ]),
(tanh, [([uf1, (-3,3) ], eps), ]),
(sqrt, [([uf1, (2,3) ], eps), ]),
]
def ufuncTester(ufunc, f, interval, tol):
a,b = interval
ff = Chebfun.initfun_adaptive(f, linspace(a,b,13))
gg = lambda x: ufunc(f(x))
GG = ufunc(ff)
def tester(self):
xx = interval(self.yy)
vscl = GG.vscale
lscl = sum([fun.size for fun in GG])
self.assertLessEqual(infnorm(gg(xx)-GG(xx)), vscl*lscl*tol)
return tester
for (ufunc, [([f, intvl], tol), ]) in ufunc_test_params:
interval = Interval(*intvl)
_testfun_ = ufuncTester(ufunc, f, interval, tol)
_testfun_.__name__ = \
"test_{}_{}_[{:.1f},..,{:.1f}]".format(
ufunc.__name__, f.__name__, *intvl)
setattr(Ufuncs, _testfun_.__name__, _testfun_)
class Evaluation(TestCase):
def setUp(self):
self.f0 = Chebfun.initempty()
self.f1 = Chebfun.initfun_adaptive(lambda x: x**2, [-1,1])
self.f2 = Chebfun.initfun_adaptive(lambda x: x**2, [-1,0,1,2])
def test__call__empty_chebfun(self):
self.assertEqual(self.f0(linspace(-1,1,100)).size, 0)
def test__call__empty_array(self):
self.assertEqual(self.f0(array([])).size, 0)
self.assertEqual(self.f1(array([])).size, 0)
self.assertEqual(self.f2(array([])).size, 0)
def test__call__point_evaluation(self):
# check we get back a scalar for scalar input
self.assertTrue(isscalar(self.f1(0.1)))
def test__call__singleton(self):
# check that the output is the same for the following inputs:
# array(x), array([x]), [x]
a = self.f1(array(0.1))
b = self.f1(array([0.1]))
c = self.f1([0.1])
self.assertEqual(a.size, 1)
self.assertEqual(b.size, 1)
self.assertEqual(c.size, 1)
self.assertTrue(equal(a,b).all())
self.assertTrue(equal(b,c).all())
self.assertTrue(equal(a,c).all())
def test__call__breakpoints(self):
# check we get the values at the breakpoints back
x1 = self.f1.breakpoints
x2 = self.f2.breakpoints
self.assertTrue(equal(self.f1(x1), [1,1]).all())
self.assertTrue(equal(self.f2(x2), [1,0,1,4]).all())
def test__call__outside_interval(self):
# check we are able to evaluate the Chebfun outside the
# interval of definition
x = linspace(-3,3,100)
self.assertTrue(isfinite(self.f1(x)).all())
self.assertTrue(isfinite(self.f2(x)).all())
def test__call__general_evaluation(self):
f = lambda x: sin(4*x) + exp(cos(14*x)) - 1.4
npts = 50000
dom1 = [-1,1]
dom2 = [-1,0,1]
dom3 = [-2,-0.3,1.2]
ff1 = Chebfun.initfun_adaptive(f, dom1)
ff2 = Chebfun.initfun_adaptive(f, dom2)
ff3 = Chebfun.initfun_adaptive(f, dom3)
x1 = linspace(dom1[0], dom1[-1], npts)
x2 = linspace(dom2[0], dom2[-1], npts)
x3 = linspace(dom3[0], dom3[-1], npts)
self.assertLessEqual(infnorm(f(x1)-ff1(x1)), 5e1*eps)
self.assertLessEqual(infnorm(f(x2)-ff2(x2)), 2e1*eps)
self.assertLessEqual(infnorm(f(x3)-ff3(x3)), 5e1*eps)
class Calculus(TestCase):
def setUp(self):
f = lambda x: sin(4*x-1.4)
self.df = lambda x: 4*cos(4*x-1.4)
self.If = lambda x: -.25*cos(4*x-1.4)
self.f1 = Chebfun.initfun_adaptive(f, [-1,1])
self.f2 = Chebfun.initfun_adaptive(f, [-3,0,1])
self.f3 = Chebfun.initfun_adaptive(f, [-2,-0.3,1.2])
self.f4 = Chebfun.initfun_adaptive(f, linspace(-1,1,11))
def test_sum(self):
self.assertLessEqual(abs(self.f1.sum()-0.372895407327895),2*eps)
self.assertLessEqual(abs(self.f2.sum()-0.382270459230604),2*eps)
self.assertLessEqual(abs(self.f3.sum()-(-0.008223712363936)),2*eps)
self.assertLessEqual(abs(self.f4.sum()-0.372895407327895),2*eps)
def test_diff(self):
xx = linspace(-5,5,10000)
for f in [self.f1, self.f2, self.f3, self.f4]:
a, b = f.support
x = xx[(xx>a)&(xx<b)]
self.assertLessEqual(infnorm(f.diff()(x)-self.df(x)), 2e3*eps)
def test_cumsum(self):
xx = linspace(-5,5,10000)
for f in [self.f1, self.f2, self.f3, self.f4]:
a, b = f.support
x = xx[(xx>a)&(xx<b)]
fa = self.If(a)
self.assertLessEqual(infnorm(f.cumsum()(x)-self.If(x)+fa), 3*eps)
def test_sum_empty(self):
f = Chebfun.initempty()
self.assertEqual(f.sum(), .0)
def test_cumsum_empty(self):
If = Chebfun.initempty().cumsum()
self.assertIsInstance(If, Chebfun)
self.assertTrue(If.isempty)
def test_diff_empty(self):
df = Chebfun.initempty().diff()
self.assertIsInstance(df, Chebfun)
self.assertTrue(df.isempty)
class Roots(TestCase):
def setUp(self):
self.f1 = Chebfun.initfun_adaptive(lambda x: cos(4*pi*x), linspace(-10,10,101))
self.f2 = Chebfun.initfun_adaptive(lambda x: sin(2*pi*x), linspace(-1,1,5))
self.f3 = Chebfun.initfun_adaptive(lambda x: sin(4*pi*x), linspace(-10,10,101))
def test_empty(self):
rts = Chebfun.initempty().roots()
self.assertIsInstance(rts, ndarray)
self.assertEqual(rts.size, 0)
def test_multiple_pieces(self):
rts = self.f1.roots()
self.assertEqual(rts.size, 80)
self.assertLessEqual(infnorm(rts-arange(-9.875,10,.25)), 10*eps)
# check we don't get repeated roots at breakpoints
def test_breakpoint_roots_1(self):
rts = self.f2.roots()
self.assertEqual(rts.size, 5)
self.assertLessEqual(infnorm(rts-self.f2.breakpoints), eps)
# check we don't get repeated roots at breakpoints
def test_breakpoint_roots_2(self):
rts = self.f3.roots()
self.assertEqual(rts.size, 81)
self.assertLessEqual(infnorm(rts-arange(-10,10.25,.25)), 1e1*eps)
class Plotting(TestCase):
def setUp(self):
f = lambda x: sin(4*x) + exp(cos(14*x)) - 1.4
self.f1 = Chebfun.initfun_adaptive(f, [-1,1])
self.f2 = Chebfun.initfun_adaptive(f, [-3,0,1])
self.f3 = Chebfun.initfun_adaptive(f, [-2,-0.3,1.2])
def test_plot(self):
for fun in [self.f1, self.f2, self.f3]:
fig, ax = subplots()
fun.plot(ax=ax)
def test_plotcoeffs(self):
for fun in [self.f1, self.f2, self.f3]:
fig, ax = subplots()
fun.plotcoeffs(ax=ax)
class PrivateMethods(TestCase):
def setUp(self):
f = lambda x: sin(x-.1)
self.f1 = Chebfun.initfun_adaptive(f, [-2,0,3])
self.f2 = Chebfun.initfun_adaptive(f, linspace(-2,3,5))
# in the test_break_x methods, we check that (1) the newly computed domain
# is what it should be, and (2) the new chebfun still provides an accurate
# approximation
def test__break_1(self):
altdom = Domain([-2,-1,1,2,3])
newdom = self.f1.domain.union(altdom)
f1_new = self.f1._break(newdom)
self.assertEqual(f1_new.domain, newdom)
self.assertNotEqual(f1_new.domain, altdom)
self.assertNotEqual(f1_new.domain, self.f1.domain)
xx = linspace(-2,3,1000)
error = infnorm(self.f1(xx)-f1_new(xx))
self.assertLessEqual(error, 3*eps)
def test__break_2(self):
altdom = Domain([-2,3])
newdom = self.f1.domain.union(altdom)
f1_new = self.f1._break(newdom)
self.assertEqual(f1_new.domain, newdom)
self.assertNotEqual(f1_new.domain, altdom)
xx = linspace(-2,3,1000)
error = infnorm(self.f1(xx)-f1_new(xx))
self.assertLessEqual(error, 3*eps)
def test__break_3(self):
altdom = Domain(linspace(-2,3,1000))
newdom = self.f2.domain.union(altdom)
f2_new = self.f2._break(newdom)
self.assertEqual(f2_new.domain, newdom)
self.assertNotEqual(f2_new.domain, altdom)
self.assertNotEqual(f2_new.domain, self.f2.domain)
xx = linspace(-2,3,1000)
error = infnorm(self.f2(xx)-f2_new(xx))
self.assertLessEqual(error, 3*eps)
class DomainBreakingOps(TestCase):
pass
# domain, test_tolerance
domainBreakOp_args = [
(lambda x: x, 0, [-1,1], eps),
(sin, cos, [-1,1], eps),
# ([-2,1], eps),
# ([-1,2], eps),
# ([-5,9], 35*eps),
]
# add tests for maximu, minimum
def domainBreakOpTester(domainBreakOp, f, g, dom, tol):
a, b = dom
xx = linspace(a,b,1001)
ff = chebfun(f, dom)
gg = chebfun(g, dom)
# convert constant g to to callable
if isinstance(g, (int, float)):
ffgg = domainBreakOp(f(xx), g)
else:
ffgg = domainBreakOp(f(xx), g(xx))
fg = getattr(ff, domainBreakOp.__name__)(gg)
def tester(self):
vscl = max([ff.vscale, gg.vscale])
hscl = max([ff.hscale, gg.hscale])
lscl = max([fun.size for fun in append(ff.funs, gg.funs)])
self.assertLessEqual(infnorm(fg(xx)-ffgg), vscl*hscl*lscl*tol)
return tester
for domainBreakOp in (maximum, ):
for n, args in enumerate(domainBreakOp_args):
ff, gg, dom, tol = args
_testfun_ = domainBreakOpTester(domainBreakOp, ff, gg, dom, tol)
_testfun_.__name__ = "test_{}_{}".format(domainBreakOp.__name__, n)
setattr(DomainBreakingOps, _testfun_.__name__, _testfun_)
# reset the testsfun variable so it doesn't get picked up by nose
_testfun_ = None
|
# This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) 2019 NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Helper methods for workflow template parameters."""
from benchtmpl.workflow.parameter.base import TemplateParameter
import benchtmpl.error as err
import benchtmpl.workflow.parameter.declaration as pd
def create_parameter_index(parameters, validate=True):
"""Create instances of template parameters from a list of dictionaries
containing parameter declarations. The result is a dictionary containing the
top-level parameters, indexed by their unique identifier.
Parameters
----------
parameters: list(dict)
List of dictionaries containing template parameter declarations
validate: bool, optional
Flag indicating if given template parameter declarations are to be
validated against the parameter schema or not.
Returns
-------
dict(benchtmpl.workflow.parameter.base.TemplateParameter)
Raises
------
benchtmpl.error.InvalidTemplateError
benchtmpl.error.UnknownParameterError
"""
result = dict()
for para in parameters:
# Validate the template parameters if the validate flag is True
if validate:
pd.validate_parameter(para)
# Create a TemplateParameter instance for the parameter. Keep
# track of children for parameter that are of type DT_LIST or
# DT_RECORD. Children are added after all parameters have been
# instantiated.
p_id = para[pd.LABEL_ID]
# Ensure that the identifier of all parameters are unique
if p_id in result:
raise err.InvalidTemplateError('parameter \'{}\' not unique'.format(p_id))
c = None
if para[pd.LABEL_DATATYPE] in [pd.DT_LIST, pd.DT_RECORD]:
c = list()
tp = TemplateParameter(pd.set_defaults(para), children=c)
result[p_id] = tp
# Add parameter templates to the list of children for their
# respective parent (if given). We currently only support one level
# of nesting.
for para in parameters:
if pd.LABEL_PARENT in para:
p_id = para[pd.LABEL_ID]
parent = para[pd.LABEL_PARENT]
if not parent is None:
result[parent].add_child(result[p_id])
return result
def sort_parameters(parameters):
"""Sort a given list of parameter declaration by the parameter index that is
part of the parameter declaration. Parameters with the same index value are
ordered by increasing value of their name.
Parameters
----------
parameters: list(benchtmpl.workflow.parameter.base.TemplateParameter)
List of termplate parameters
Returns
-------
list(benchtmpl.workflow.parameter.base.TemplateParameter)
"""
return sorted(parameters, key=lambda p: (p.index, p.identifier))
|
import datetime
from urllib.error import URLError
import xmltodict
from django.db import transaction
from Bio import Entrez
from bioseq.models.Taxon import Taxon, TaxonName
from bioseq.models.Term import Term
from bioseq.models.Ontology import Ontology
from bioseq.models.Bioentry import Bioentry
from bioresources.models.Assembly import Assembly
from bioresources.models.Sample import Sample
from bioresources.models.Publication import Publication
from bioresources.models.ExternalId import ExternalId
from bioresources.models.Resource import Resource
from bioresources.models.Organization import Organization
from bioresources.models.ReadsArchive import ReadsArchive
from bioresources.models.Structure import Structure
from bioresources.models.Expression import Expression
from bioresources.models.Affiliation import Affiliation
from bioresources.models.BioProject import BioProject
from bioresources.models.Person import Person
from bioresources.models.ResourceProperty import ResourceProperty, ResourcePropertyValue
def retry(q, n=4):
for _ in range(n):
try:
data = q()
except URLError as ex:
continue
return data
raise ex
def ncbi_link(dbfrom, id, linkname):
pass
def scopus_publication(article):
if Publication.objects.filter(scopus_id=article['dc:identifier']).exists():
publication = Publication.objects.get(scopus_id=article['dc:identifier'])
elif ("prism:doi" in article) and \
Publication.objects.filter(doi=article['prism:doi']).exists():
publication = Publication.objects.get(doi=article['prism:doi'])
elif Publication.objects.filter(name=article["dc:title"][:350]).exists():
publication = Publication.objects.get(name=article["dc:title"][:350])
else:
publication = Publication(
type=Resource.RESOURCE_TYPES.PUBLICATION,
name=article["dc:title"][:350],
date_of_publication=datetime.datetime.strptime(article['prism:coverDate'], "%Y-%m-%d"),
)
publication.scopus_id = article['dc:identifier']
publication.electronic_id = article["eid"]
if "dc:description" in article:
publication.description = article["dc:description"]
if 'pubmed-id' in article:
publication.pubmed_id = article['pubmed-id']
if "prism:doi" in article:
publication.doi = article["prism:doi"]
if 'prism:issn' in article:
publication.issn = article['prism:issn']
return publication
def scopus_affiliation(affiliation):
afcountry = affiliation["affiliation-country"]
org = Organization(name=affiliation["affilname"],
country=afcountry,
city=affiliation["affiliation-city"],
source=Organization.objects.get(name=Organization.SCOPUS)
)
# TODO country detection based on the city
if not affiliation["affiliation-country"]:
if any([x in str(org.city) for x in [
"CABA", "Buenos Aires", "Rosario"
]]):
org.country = "Argentina"
if "afid" in affiliation:
org.scopus_id = affiliation['afid']
if Organization.objects.filter(scopus_id=org.scopus_id).exists():
org = Organization.objects.get(scopus_id=org.scopus_id)
else:
org.save()
return org
def scopus_author(author, publication, arg):
person = Person(surname=author["surname"],
name=author["given-name"] if author["given-name"] else "",
scopus_id=author["authid"])
if Person.objects.filter(scopus_id=person.scopus_id).exists():
person = Person.objects.get(scopus_id=person.scopus_id)
else:
person.save()
if ("afid" in author):
aff = Affiliation(resource=publication, author=person)
aff.save()
for affdict in author["afid"]:
aff.organizations.add(Organization.objects.get(scopus_id=affdict["$"]))
aff.save()
if [x for x in author["afid"] if x["$"] in arg]:
person.save()
return person
def scopus_extended_publication(article):
publication = scopus_publication(article)
publication.save(force_insert=True)
arg = [] # TODO criterio pais?
for affiliation in article["affiliation"]:
org = scopus_affiliation(affiliation)
# if org.country == "Argentina" and org.scopus_id:
arg.append(org.scopus_id)
#
# if not arg:
# # for a in article["affiliation"]:
# # if not a["affiliation-country"]:
# # if a["affiliation-city"]:
# # pepe.append(a["affiliation-city"])
# return
if "author" in article:
for author in article["author"]:
scopus_author(author, publication, arg)
return publication
class NCBIBioSampleAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
slist = Entrez.read(retry(lambda: Entrez.esummary(db="biosample", id=ncbi_ids)),validate=False)["DocumentSummarySet"][
"DocumentSummary"]
return [xmltodict.parse(biosampledata["SampleData"])["BioSample"] for biosampledata in slist]
def adapt(self, summaryData) -> Sample:
acc = summaryData["@accession"]
desc = summaryData["Description"]["Title"]
s = Sample(name=acc, description=desc, type=Sample.TYPE)
if ("Organism" in summaryData["Description"]) and ("@taxonomy_id" in summaryData["Description"]["Organism"]):
tax = summaryData["Description"]["Organism"]['@taxonomy_id']
s.ncbi_tax = Taxon.objects.filter(ncbi_taxon_id=int(tax)).first()
try:
s.publication_date = datetime.datetime.strptime(summaryData["@publication_date"].split("T")[0], "%Y-%m-%d")
except ValueError:
pass
try:
s.update_date = datetime.datetime.strptime(summaryData["@last_update"].split("T")[0], "%Y-%m-%d")
except ValueError:
pass
return s
def save(self, summaryData, ncbi_id):
s = self.adapt(summaryData)
s.save()
ncbi_org = Organization.objects.get(name="NCBI")
s.publishers.add(ncbi_org)
self.save_attributes(summaryData,s)
ExternalId(resource=s, organization=ncbi_org,
identifier=s.name, type="accession").save()
ExternalId(resource=s, organization=ncbi_org,
identifier=ncbi_id, type="identifier").save()
return s
def save_attributes(self, summaryData,s):
ncbi_org = Organization.objects.get(name="NCBI")
ontology = Ontology.objects.get_or_create(name="NCBI sample", definition="Attributes of an NCBI Sample")[0]
records = summaryData["Attributes"]["Attribute"]
if isinstance(records, dict):
records = [records]
for x in records:
if x["#text"].strip() and (x["#text"] != "."):
term = Term.objects.get_or_create(
ontology=ontology, name=x["@attribute_name"], identifier=x["@attribute_name"][:255])[0]
prop = ResourceProperty.objects.get_or_create(term=term, resource=s, organization=ncbi_org)[0]
ResourcePropertyValue.objects.create(property=prop, value=x["#text"][:200])
class NCBISRAAdapter:
def fetch(self, ncbi_id):
return Entrez.read(retry(lambda: Entrez.esummary(db="sra", id=ncbi_id)))[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="sra", id=ncbi_ids)))
def adapt(self, summaryData):
# sra_record = Entrez.read(retry(lambda: Entrez.esummary(db="sra", id=ncbi_id)))
expData = xmltodict.parse("<xml>" + summaryData["ExpXml"] + "</xml>")["xml"]
acc = expData["Experiment"]["@acc"]
qs = ReadsArchive.objects.filter(external_ids__identifier=acc, external_ids__type="accession")
if qs.exists():
return qs.first()
s = ReadsArchive(type=Resource.RESOURCE_TYPES.READS, name=acc, description=expData["Summary"]["Title"])
s.id = Resource.objects.latest('id').id + 1
try:
s.release_date = datetime.datetime.strptime(summaryData["CreateDate"].split(" ")[0], "%Y/%m/%d")
except ValueError:
pass
try:
s.update_date = datetime.datetime.strptime(summaryData["UpdateDate"].split(" ")[0], "%Y/%m/%d")
except ValueError:
pass
if ("Organism" in expData) and ("@taxid" in expData["Organism"]):
s.ncbi_tax = Taxon.objects.filter(ncbi_taxon_id=int(expData["Organism"]["@taxid"])).first()
return s
def save(self, summaryData, ncbi_id):
s = self.adapt(summaryData)
ncbi_org = Organization.objects.get(name="NCBI")
s.type = s.__class__.TYPE
s.save(force_insert=True)
s.publishers.add(ncbi_org)
ExternalId(resource=s, organization=ncbi_org,
identifier=s.name, type="accession").save(force_insert=True)
ExternalId(resource=s, organization=ncbi_org,
identifier=ncbi_id, type="identifier").save(force_insert=True)
return s
class NCBIPmcAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="pmc", id=ncbi_ids)))
def adapt(self, summaryData, ncbi_id):
s = Publication(name=summaryData["Title"], description="")
s.type = s.__class__.TYPE
return s
class NCBIPubmedAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="pubmed", id=ncbi_ids)))
def adapt(self, summaryData, ncbi_id):
s = Publication(name=summaryData["Title"], description="")
s.type = s.__class__.TYPE
return s
class NCBIGeneAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="gene", id=ncbi_ids)))["DocumentSummarySet"][
"DocumentSummary"]
def adapt(self, summaryData, ncbi_id):
s = Bioentry(name=summaryData["Name"], description=summaryData["Summary"])
return s
class NCBIProteinAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="protein", id=ncbi_ids)))["DocumentSummarySet"][
"DocumentSummary"]
def adapt(self, summaryData):
return Bioentry(name=summaryData["title"], description=summaryData["abstract"])
class NCBINuccoreAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="nuccore", id=ncbi_ids)))
def adapt(self, summaryData):
return Bioentry(name=str(summaryData["Gi"]), description=summaryData["Title"])
class NCBIBioProject:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="bioproject", id=ncbi_ids)))["DocumentSummarySet"][
"DocumentSummary"]
def adapt(self, summaryData):
s = BioProject(name=str(summaryData["Project_Acc"]), description=summaryData["Project_Title"])
s.type = s.__class__.TYPE
return s
def save(self, summaryData, ncbi_id):
s = self.adapt(summaryData)
s.save(force_insert=True)
ncbi_org = Organization.objects.get(name="NCBI")
s.publishers.add(ncbi_org)
ExternalId(resource=s, organization=ncbi_org,
identifier=s.name, type="accession").save(force_insert=True)
ExternalId(resource=s, organization=ncbi_org,
identifier=ncbi_id, type="identifier").save(force_insert=True)
return s
class NCBIAssemblyAdapter:
def fetch(self, ncbi_id):
return self.fetch_list(ncbi_id)[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="assembly", id=ncbi_ids)))["DocumentSummarySet"][
"DocumentSummary"]
def adapt(self, summaryData):
name = summaryData["AssemblyName"]
acc = summaryData["AssemblyAccession"]
tax = Taxon.objects.filter(ncbi_taxon_id=int(summaryData["Taxid"])).first()
level_dict = {v: k for k, v in dict(Assembly.ASSEMBLY_LEVEL).items()}
type_dict = {v: k for k, v in dict(Assembly.ASSEMBLY_TYPES).items()}
s = Assembly(type=Resource.RESOURCE_TYPES.ASSEMBLY, name=acc + "_" + name,
description=summaryData["AssemblyDescription"],
ncbi_tax=tax,
ncbi_org=summaryData["SubmitterOrganization"],
level=level_dict[summaryData["AssemblyStatus"].lower()],
assembly_type=type_dict[summaryData["AssemblyType"].lower()],
species_name=summaryData["SpeciesName"])
s.url = "https://www.ncbi.nlm.nih.gov/assembly/" + acc
s.type = s.__class__.TYPE
try:
s.intraspecific_name = str(
summaryData["Biosource"]["InfraspeciesList"][0]["Sub_type"]) + " " + \
summaryData["Biosource"]["InfraspeciesList"][0]["Sub_value"]
except IndexError:
pass
try:
s.release_date = datetime.datetime.strptime(summaryData["SeqReleaseDate"].split(" ")[0],
"%Y/%m/%d")
except ValueError:
pass
try:
s.update_date = datetime.datetime.strptime(summaryData["LastUpdateDate"].split(" ")[0],
"%Y/%m/%d")
except ValueError:
pass
return s
def save(self, summaryData, ncbi_id):
s = self.adapt(summaryData)
s.save(force_insert=True)
ncbi_org = Organization.objects.get(name="NCBI")
s.publishers.add(ncbi_org)
ExternalId(resource=s, organization=ncbi_org,
identifier=s.name, type="accession").save(force_insert=True)
ExternalId(resource=s, organization=ncbi_org,
identifier=ncbi_id, type="identifier").save(force_insert=True)
return s
class NCBIStructureAdapter:
def fetch(self, ncbi_id):
return Entrez.read(retry(lambda: Entrez.esummary(db="structure", id=ncbi_id)), validate=False)[0]
def fetch_list(self, ncbi_ids):
fetch = Entrez.read(retry(lambda: Entrez.esummary(db="structure", id=ncbi_ids)))
return fetch
def adapt(self, summaryData):
acc = summaryData["PdbAcc"]
s = Structure(type=Resource.RESOURCE_TYPES.STRUCTURE, name=acc, description=summaryData["PdbDescr"],
method=summaryData["ExpMethod"])
s.type = s.__class__.TYPE
try:
s.deposit_date = datetime.datetime.strptime(summaryData["PdbDepositDate"],
"%Y/%m/%d %H:%M") # 2017/08/10 00:00
except ValueError:
pass
if "OrganismList" in summaryData and summaryData["OrganismList"]:
tax = TaxonName.objects.get(name=summaryData["OrganismList"][0])
if tax:
tax = tax.taxon
s.ncbi_tax = tax
return s
def save(self, summaryData, ncbi_id):
ncbi_org = Organization.objects.get(name="NCBI")
# qs = Structure.objects.filter(external_ids__identifier=acc, external_ids__type="accession")
# if qs.exists():
# return qs.first()
s = self.adapt(summaryData)
s.save()
s.publishers.add(ncbi_org)
ExternalId(resource=s, organization=ncbi_org,
identifier=s.name, type="accession").save()
ExternalId(resource=s, organization=ncbi_org,
identifier=ncbi_id, type="identifier").save()
return s
class NCBIGDSAdapter:
def fetch(self, ncbi_id):
return Entrez.read(retry(lambda: Entrez.esummary(db="gds", id=ncbi_id)))[0]
def fetch_list(self, ncbi_ids):
return Entrez.read(retry(lambda: Entrez.esummary(db="gds", id=ncbi_ids)))
def adapt(self, summaryData) -> Expression:
acc = str(summaryData["Accession"])
s = Expression(type=Resource.RESOURCE_TYPES.EXPRESSION, name=acc,
description=summaryData["title"] + "." + summaryData["summary"],
gdstype=summaryData["gdsType"])
s.type = s.__class__.TYPE
if "OrganismList" in summaryData:
s.ncbi_org = "||".join(summaryData["OrganismList"])
if "ExpMethod" in summaryData:
s.method = str(summaryData["ExpMethod"])
try:
s.pdat = datetime.datetime.strptime(summaryData["PDAT"], "%Y/%m/%d") # 2017/08/10 00:00
except ValueError:
pass
tax = TaxonName.objects.get(name=summaryData["taxon"].split(";")[0])
if tax:
tax = tax.taxon
s.ncbi_tax = tax
return s
def save(self, summaryData, ncbi_id):
s = self.adapt(summaryData)
s.save(force_insert=True)
ncbi_org = Organization.objects.get(name="NCBI")
s.publishers.add(ncbi_org)
ExternalId(resource=s, organization=ncbi_org,
identifier=s.name, type="accession").save()
ExternalId(resource=s, organization=ncbi_org,
identifier=ncbi_id, type="identifier").save()
return s
|
import mxnet as mx
import mxnet.ndarray as nd
data_shape = 304
batch_size = 32
rgb_mean = nd.array([123, 117, 104])
def get_iterators(data_shape, batch_size):
"""256, 32"""
train_iter = mx.image.ImageDetIter(
batch_size=batch_size,
data_shape=(3, data_shape, data_shape),
path_imgrec='./REC_Data/voc2012.rec',
path_imgidx='./REC_Data/voc2012.idx',
shuffle=True,
mean=True,
rand_crop=1,
min_object_covered=0.5,
max_attempts=200)
# val_iter = image.ImageDetIter(
# batch_size=batch_size,
# data_shape=(3, data_shape, data_shape),
# path_imgrec=data_dir+'val.rec',
# shuffle=False,
# mean=True)
return train_iter
# train_data, test_data, class_names, num_class = \
train_data = get_iterators(data_shape, batch_size)
batch = train_data.next()
# (32, 1, 5)
# 1:图像中只有一个目标
# 5:第一个元素对应物体的标号,-1表示非法物体;后面4个元素表示边框,0~1
# 多个目标时list[nd(batch_size, 目标数目, 目标信息)]
print(batch)
# list[nd(batch_size,channel,width,higth)]
print(batch.data[0].shape)
print(batch.data[0])
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# .asnumpy比使用np.array速度快得多
plt.imshow(batch.data[0][2].asnumpy().transpose(1, 2, 0)/255.)
currentAxis = plt.gca()
for i in range(6):
box = batch.label[0][2][i][1:].asnumpy()*300
if any(box < 0):
continue
print(int(batch.label[0][2][i][0].asscalar()))
rect = patches.Rectangle((box[1], box[0]), box[3]-box[1], box[2]-box[0],
linewidth=1, edgecolor='g', facecolor='none')
currentAxis.add_patch(rect)
plt.show()
|
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import auth
from models import Reply_Thread, Reply, UserData
from django.core.mail import send_mail
from forms import ReplyForm
import outmail
def base(request):
"""Redirect to mailbox."""
return HttpResponseRedirect('/mail/box/')
def mailbox(request):
"""Render the mailbox template."""
#Return all threads including the user, properly ordered
mail = Reply_Thread.objects.filter(bulletin__creator__pk=request.session['pk'])
mail2 = Reply_Thread.objects.filter(replier__pk=request.session['pk'])
mail = mail | mail2
mail = mail.order_by("-update")
#Filter out the threads that ONLY include the user
pks = []
for thread in mail:
if thread.replier == thread.bulletin.creator:
pks.append(thread.id)
mail = mail.exclude(id__in=pks)
return render(request, 'mailbox.html', {'mail':mail})
def thread(request, pk):
"""Render a single thread's info, usually for the mailbox"""
print 'made it!'
thread = Reply_Thread.objects.get(id=pk)
info={}
#You have to belong to the thread to access it!
if request.session['pk'] in [thread.bulletin.creator.pk, thread.replier.pk]:
info.update({'thread':thread})
new=[]
for reply in thread.reply_set.exclude(sender__id=request.session['pk']).filter(read=False):
new.append(reply.id)
reply.read=True
reply.save()
#Handle request to add replies to the thread
if request.method == 'POST':
form = ReplyForm(request.POST)
if form.is_valid():
cleaned_data = form.clean()
if request.POST['visibility'] == 'Public':
public = True
else: public = False
#Maybe works?
user = UserData.objects.get(pk==request.session['pk'])
message = cleaned_data['message']
outmail.replyWithThread(thread, message, user, public)
#Filling out the necessary context
form = ReplyForm()
info.update({'form':form, 'new':new});
return render(request, 'elements/replythread.html', info)
def newmail(request):
replies = Reply.objects.filter(read=False)\
.filter(thread__bulletin__creator=request.session['pk'])\
.exclude(sender=request.session['pk'])
replies2 = Reply.objects.filter(read=False)\
.filter(thread__replier=request.session['pk'])\
.exclude(sender=request.session['pk'])
replies = replies | replies2
length = len(replies)
if length >= 1:
new = "(%d)" % len(replies)
else: new = ""
for reply in replies:
new = "%s %d" %(new, reply.thread.pk)
return HttpResponse(new)
def intro(request):
return render(request, 'intros/mailintro.html')
from django.conf.urls import patterns, url
urls = patterns('',
url(r'^$', base),
url(r'^box/$', mailbox),
url(r'^thread/help/$', intro),
url(r'^thread/(?P<pk>\d+)/$', thread),
url(r'^newmail/$', newmail),
)
|
def merge_sort(arr):
fir = arr[:len(arr)//2]
las = arr[len(arr)//2:]
#print(fir, las)
if min(len(fir), len(las)) == 1:
# print([min(int(fir[0]), int(las[0]))])
li = []
while (any(fir) and any(las)):
if fir[0] < las[0]:
li.append(fir.pop(0))
else:
li.append(las.pop(0))
else:
li.extend(fir)
li.extend(las)
return li
else:
fir = merge_sort(fir)
las = merge_sort(las)
li = []
while (any(fir) and any(las)):
if fir[0] < las[0]:
li.append(fir.pop(0))
else:
li.append(las.pop(0))
#print(fir, las)
else:
li.extend(fir)
li.extend(las)
return li
print(merge_sort([3,2,6,1,5,4,16,11,13,14,12,20,18,17,19,15,8,7,9])) |
#!/usr/bin/env python3
"""
.. module:: testReweighting
:synopsis: Tests the function of lifetime reweighting
.. moduleauthor:: Alicia Wongel <alicia.wongel@gmail.com>
"""
import sys
sys.path.insert(0,"../")
import unittest
from smodels.share.models import SMparticles, mssm
from smodels.theory.branch import Branch
from smodels.theory.element import Element
from smodels.tools.reweighting import calculateProbabilities,reweightFactorFor
from smodels.tools.physicsUnits import GeV
class ReweightingTest(unittest.TestCase):
def testcalculateProbabilities(self):
gluino = mssm.gluino.copy()
gluino.totalwidth = 1.*10**(-30)*GeV
prob = calculateProbabilities(gluino.totalwidth.asNumber(GeV),
Leff_inner=0.000769,Leff_outer=7.0)
F_long, F_prompt, F_displaced = prob['F_long'],prob['F_prompt'],prob['F_displaced']
self.assertAlmostEqual(F_long, 1.)
self.assertEqual(F_prompt, 0.)
self.assertAlmostEqual(F_displaced, 0.)
def testreweightFactorFor(self):
n1 = mssm.n1.copy()
n1.totalwidth = 0.*GeV
st1 = mssm.st1.copy()
st1.totalwidth = 1e-13*GeV
F_prompt = 0.3228249017964917
Fdisp = 0.6771750982035083
gluino = mssm.gluino.copy()
gluino.totalwidth = 1.*10**(-30)*GeV
t = SMparticles.t
branch1 = Branch()
branch1.oddParticles = [n1]
branch2 = Branch()
branch2.oddParticles = [gluino]
el1 = Element([branch1,branch2])
f = reweightFactorFor(el1, 'prompt')
self.assertAlmostEqual(f,1.,places=3)
f = reweightFactorFor(el1, 'displaced')
self.assertAlmostEqual(f,0.,places=3)
branch3 = Branch()
branch3.oddParticles = [st1,n1]
branch3.evenParticles = [[t]]
el2 = Element([branch1,branch3])
f = reweightFactorFor(el2, resType='prompt')
self.assertAlmostEqual(f,F_prompt,places=3)
f = reweightFactorFor(el2, resType='displaced')
self.assertAlmostEqual(f,Fdisp,places=3)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python3
def format_mac_to_type_1(addr):
"""
Transform MAC address to specified format.
example:
00-2B-67-59-47-0E --> 002b.6759.470e
00:2B:67:59:47:0E --> 002b.6759.470e
"""
mac_without_delimiter = addr.strip().replace("-", "").replace(":", "").lower()
mac = list()
for i, x in enumerate(mac_without_delimiter):
mac.append(x)
if (i + 1) % 4 == 0 and i != (len(mac_without_delimiter) - 1):
mac.append('.')
return "".join(mac)
if __name__ == '__main__':
print(format_mac_to_type_1("00-2B-67-59-47-0E"))
|
class Game:
def __init__(self):
self.throwBalls = []
def throwBall(self, number):
self.throwBalls.append(number)
def isStrike(self):
return self.throwBalls[self.ball] == 10
def isSpare(self):
return self.throwBalls[self.ball] + self.throwBalls[self.ball+1] == 10
def scoreStrike(self):
self.score += 10 + self.throwBalls[self.ball+1] +self.throwBalls[self.ball+2];
self.ball += 1
return True
def scoreSpare(self):
self.score += 10 + self.throwBalls[self.ball+2]
self.ball += 2
return True
def scoreNormal(self):
self.score += self.throwBalls[self.ball] + self.throwBalls[self.ball+1]
self.ball += 2
return True
def getScore(self, turnNumber):
turn = 1
self.score = 0
self.ball = 0
while (turn <= turnNumber):
(self.isStrike() and self.scoreStrike()) \
or (self.isSpare() and self.scoreSpare()) \
or (self.scoreNormal())
turn += 1
return self.score |
#!/bin/python
import random
def available_moves(board):
zz = 0
moves = []
for v in (board):
if v == "_":
moves.append(zz)
zz+=1
return moves
def get_squares(board, player):
zz = 0
moves = []
for v in (board):
if v == player:
moves.append(zz)
zz+=1
return moves
def make_move(board, position, player):
board[position] = player
def get_enemy(player):
if player == 'X':
return 'O'
return 'X'
def X_won(board):
return winner(board) == 'X'
def O_won(board):
return winner(board) == 'O'
def winner(board):
winning_combos = (
[0, 1, 2], [3, 4, 5], [6, 7, 8],
[0, 3, 6], [1, 4, 7], [2, 5, 8],
[0, 4, 8], [2, 4, 6])
for player in ('X', 'O'):
positions = get_squares(board, player)
for combo in winning_combos:
win = True
for pos in combo:
if pos not in positions:
win = False
if win:
return player
return None
def complete(board):
if "_" not in board:
return True
if winner(board) != None:
return True
return False
def determine(board, player):
if get_bot_move(board, player):
return get_bot_move(board, player)-1
if len(available_moves(board)) == 7 and len(get_squares(board, get_enemy(player))) == 1 and board[4] == player:
aaz = get_squares(board, get_enemy(player))
if 2 in aaz:
return 0
else:
return 2
a = 2 if player == "X" else -2
choices = []
if len(available_moves(board)) == 9:
return 4
for move in available_moves(board):
make_move(board, move, player)
val = alphabeta(board, get_enemy(player), -2, 2)
make_move(board, move, "_")
winners = ('X-win', 'Draw', 'O-win')
if (val < a) and (player == "X") or (val > a) and (player == "O"):
a = val
choices = [move]
elif val == a:
choices.append(move)
return random.choice(choices)
def alphabeta(node, player, alpha, beta):
if complete(node):
if X_won(node):
return -1
elif tied(node):
return 0
elif O_won(node):
return 1
for move in available_moves(node):
make_move(node, move, player)
val = alphabeta(node, get_enemy(player), alpha, beta)
make_move(node, move, "_")
if player == 'O':
if val > alpha:
alpha = val
if alpha >= beta:
return beta
else:
if val < beta:
beta = val
if beta <= alpha:
return alpha
if player == 'O':
return alpha
else:
return beta
def is_winner(board, marker):
winning_combos = ([6, 7, 8], [3, 4, 5], [0, 1, 2], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6])
for combo in winning_combos:
if (board[combo[0]] == board[combo[1]] == board[combo[2]] == marker):
return True
return False
def empty(board):
for i in range(0,len(board)):
if i != "_":
return False
return True
def tied(board):
return complete(board) == True and winner(board) is None
def win(board):
for i in range(0,len(board)):
board_copy = board[:]
board_copy2 = board[:]
if is_space_free(board_copy, i):
make_move(board_copy,i,"O")
if is_winner(board_copy, "O"):
return True
make_move(board_copy2,i,"X")
if is_winner(board_copy2, "X"):
return True
return False
def get_bot_move(board, mark):
if mark == "X":
umark = "X"
omark = "O"
else:
umark = "O"
omark = "X"
for i in range(0,len(board)):
board_copy = board[:]
if is_space_free(board_copy, i):
make_move(board_copy,i,umark)
if is_winner(board_copy, umark):
return i+1
for i in range(0,len(board)):
board_copy = board[:]
if is_space_free(board_copy, i):
make_move(board_copy,i,omark)
if is_winner(board_copy, omark):
return i+1
def is_space_free(board, index):
return board[index] == '_'
def make_move(board,index,move):
board[index] = move
def cal_first_bids_num(first_player_bids, second_player_bids):
bids_num = 4;
same_num = 0;
for i in range(len(first_player_bids)):
if(first_player_bids[i] < second_player_bids[i]):
bids_num += second_player_bids[i]
elif (first_player_bids[i] > second_player_bids[i]):
bids_num -= first_player_bids[i]
else:
same_num+=1
if (same_num % 2 == 0):
bids_num += first_player_bids[i]
else:
bids_num -= first_player_bids[i]
return bids_num
def next_move(player, first_player_bids, second_player_bids, board, move):
player_num = 0;
player_num = (cal_first_bids_num(first_player_bids, second_player_bids)) if (player == 'X') else (8 - cal_first_bids_num(first_player_bids, second_player_bids));
rival_num = 8 - player_num;
same = 0
for i in range(len(first_player_bids)):
if(first_player_bids[i] == second_player_bids[i]):
same+=1
tsp = 0 if (player == "X" and same%2==0) or (player== "O" and same%2==1) else 1
nboard = []
for i in board:
for j in i:
nboard.append(j)
if move == "PLAY":
num = determine(nboard, player)
if num <3:
print 0, num
elif num < 6:
print 1, num-3
else:
print 2, num-6
else:
if len(first_player_bids) == 2 and nboard[4] == get_enemy(move):
print 1 + tsp
return
if len(first_player_bids) == 0 and player == "O":
print 2
return
if win(nboard):
print min(player_num, max(rival_num+tsp,1))
elif player_num > 6:
print max(rival_num +tsp, 1)
else:
print min(1,player_num)
#gets the id of the player
player = raw_input()
move = raw_input() #current position of the scotch
first_player_bids = [int(i) for i in raw_input().split()]
second_player_bids = [int(i) for i in raw_input().split()]
board = []
for i in xrange(0, 3):
board.append(raw_input())
next_move(player, first_player_bids, second_player_bids, board, move) |
#!/usr/bin/env python3
# -*-coding: utf-8-*-
"""The neptune class"""
from __future__ import (division, absolute_import, unicode_literals,
print_function)
import os
# Since these modules depend on the above variable, load after
from .system import System
from .local import Local
# do not import gtk by default
# from . indicator import Indicator
from . import tools
DBUS_SERVICE = "org.neptune.Service"
DBUS_INTERFACE = "org.neptune.Interface"
POLKIT_SERVICE = "org.neptune.service"
DBUS_PATH = "/Control"
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
ICON = os.path.join(ROOT, "icons", "neptune.svg")
CONFIG_DIR = os.path.join(ROOT, "config")
BACKLIGHT_DIR = "/sys/class/backlight"
BACKLIGHT_ORDER = ("acpi_video0",)
CPU_DIR = "/sys/devices/system/cpu"
SCREEN_OFF = "xset dpms force off"
BATTERIES = {
"/sys/class/power_supply/BAT0/uevent": {
"POWER_SUPPLY_CHARGE_NOW": "capacity",
"POWER_SUPPLY_CURRENT_NOW": "rate",
"POWER_SUPPLY_VOLTAGE_NOW": "voltage",
"POWER_SUPPLY_STATUS": "state"},
"/sys/class/power_supply/BAT1/uevent": {
"POWER_SUPPLY_CHARGE_NOW": "capacity",
"POWER_SUPPLY_CURRENT_NOW": "rate",
"POWER_SUPPLY_VOLTAGE_NOW": "voltage",
"POWER_SUPPLY_STATUS": "state"},
"/proc/acpi/battery/BAT0/state": {
"present rate": "rate",
"remaining capacity": "capacity",
"present voltage": "voltage",
"charging state": "state"}}
BATTERY_INFO = "/proc/acpi/battery/BAT0/info"
AUTHOR = "Sander van Noort"
EMAIL = "Sander.van.Noort@gmail.com"
COPYRIGHT = "(C) 2011 Sander van Noort <Sander.van.Noort@gmail.com>"
NAME = "Neptune Indicator"
VERSION = "0.2"
DESCRIPTION = """Control the power consumption, brightness and input devices"""
APP_DIR = "/usr/share/applications"
AUTOSTART_DIR = os.path.join(os.path.expanduser("~"), ".config", "autostart")
class Error(Exception):
"""Error class for caught errors"""
def __init__(self, value):
# (__init__ from base not called) pylint: disable=W0231
self.value = value
def __str__(self):
return self.value
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
@author: Link
@contact: zheng.long@sfy.com
@module: service.py
@date: 2018-12-14
@usage:
$>nameko run service --broker amqp://guest:guest@localhost
$>nameko shell --broker amqp://guest:guest@localhost
"""
import yagmail
from nameko.rpc import rpc, RpcProxy
class Mail(object):
name = "mail"
@rpc
def send(self, to, subject, contents):
yag = yagmail.SMTP(
user='data.admin@sfy.com',
password='123123123',
host="smtp.exmail.qq.com",
port=465
)
# 以上的验证信息请从安全的地方进行读取
# 贴士: 可以去看看 Dynaconf 设置模块
yag.send(to=to,
subject=subject,
contents=[contents])
class Compute(object):
name = "compute"
mail = RpcProxy('mail')
@rpc
def compute(self, operation, value, other, email):
operations = {'sum': lambda x, y: int(x) + int(y),
'mul': lambda x, y: int(x) * int(y),
'div': lambda x, y: int(x) / int(y),
'sub': lambda x, y: int(x) - int(y)}
try:
result = operations[operation](value, other)
except Exception as e:
# 异步调用
self.mail.send.call_async(email, "An error occurred", str(e))
# self.mail.send(email, "An error occurred", str(e))
raise
else:
self.mail.send.call_async(
email,
"Your operation is complete!",
"The result is: %s" % result
)
return result
if __name__ == '__main__':
pass
|
import gym
import sys
import itertools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import common.tf_util as U
import logger
import deepq
from deepq.replay_buffer import ReplayBuffer
from deepq.utils import ObservationInput
from common.schedules import LinearSchedule
### 라이브러리 환경 설정한 모음입니다.
import time
start = time.time() # 학습 시작 시간을 저장합니다.
def model(inpt, num_actions, scope, reuse=False):
"""이 모델은 observation을 입력으로 하고 모든 행동의 값들을 반환합니다."""
with tf.variable_scope(scope, reuse=reuse):
out = inpt
### num_outputs 값 수정(32, 64, 128)_DQN Hidden Layer Neuron수를 조정합니다.
out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
################
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
if __name__ == '__main__':
with U.make_session(num_cpu=1):
# CartPole-v0이라는 gym 환경을 만들어서 env에 저장합니다.
env = gym.make("CartPole-v0")
# 모델 학습에 필요한 모든 함수들(act, train, update_target, debug)을 만듭니다.
act, train, update_target, debug = deepq.build_train(
make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
q_func=model, # 타겟 Q함수는 학습의 대상이 되는 Q함수가 학습이 되면서 계속 바뀌는 문제점을 해결하기 위한 해법입니다. 이렇게 함으로써 일정 스텝이 될 때마다 Q함수 가중치들을 타겟 Q함수에 업데이트합니다.
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
)
# replay buffer를 생성합니다.
replay_buffer = ReplayBuffer(50000)
# 탐험할 스케줄을 1부터 0.02까지 감소하도록 만듭니다. 10000 step만 실행합니다.
# 이때, 각 행동은 랜덤으로 하고 모델에 의해 예측된 값들에 의해 98% 의 행동들이 선택됩니다.
exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02)
# 파라미터들을 초기화하고 target network로 복사합니다.
U.initialize()
update_target()
reward_list = [] # reward의 총합을 파일에 저장하는 리스트를 만듭니다.
episode_rewards = [0.0]
obs = env.reset() # 환경을 초기화합니다.
for t in itertools.count(): # 학습이 끝날 때까지 반복합니다.
# 행동을 하고 가장 새로운 값으로 탐험을 업데이트합니다.
action = act(obs[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action) # 에이전트에게 명령을 내리고 환경에서 observation 변수 등 여러 변수들을 반환합니다.
# replay buffer에 바뀐 값을 저장합니다.
replay_buffer.add(obs, action, rew, new_obs, float(done))
reward_list.append(rew) # 리스트에 reward를 추가합니다.
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset() # 환경을 초기화합니다.
episode_rewards.append(0)
###Reward 파일에 저장합니다.(각각 맞게 파일명을 변경)
with open("../../target1000_3.txt", "a") as f:
f.write(str(sum(reward_list)) + "\n")
reward_list = [] # reward 리스트를 초기화합니다.
is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
is_finished = len(episode_rewards) > 2000 and is_solved
if is_solved:
if len(episode_rewards) > 1998:
# 결과 상태를 화면에 출력합니다.
env.render()
if is_finished:
sys.exit(0)
else:
# replay buffer로부터 샘플된 배치에 벨만 방정식의 에러를 최소화합니다.
if t > 1000:
###Replay Buffer Sample 수 수정(16, 32, 64)
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32)
################
train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))
# target network를 주기적으로 업데이트합니다.
### Target Update 주기를 수정합니다(250, 500, 1000)
if t % 1000 == 0:
#################
update_target()
if done and len(episode_rewards) % 10 == 0: #에피소드가 끝날 때마다 기록을 남깁니다.
logger.record_tabular("steps", t)
logger.record_tabular("episodes", len(episode_rewards))
logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
print("time :", time.time() - start) # 끝에 학습 소요 시간을 같이 출력합니다.
|
from __future__ import absolute_import, division, print_function
from datetime import datetime
from rfc822 import parsedate_tz, mktime_tz
from urlparse import urlparse
from time import time
from typing import Any, Optional # NOQA
from changes.utils.http import build_patch_uri
from .base import Vcs, RevisionResult, BufferParser, CommandError, UnknownRevision
import logging
LOG_FORMAT = '{node}\x01{author}\x01{date|rfc822date}\x01{p1node} {p2node}\x01{branches}\x01{desc}\x02'
BASH_CLONE_STEP = """
#!/bin/bash -eux
REMOTE_URL=%(remote_url)s
LOCAL_PATH=%(local_path)s
REVISION=%(revision)s
if [ ! -d $LOCAL_PATH/.hg ]; then
hg clone $REMOTE_URL $LOCAL_PATH
pushd $LOCAL_PATH
else
pushd $LOCAL_PATH
hg recover || true
hg pull $REMOTE_URL
fi
if ! hg up %(clean_arg)s $REVISION ; then
echo "Failed to update to $REVISION"
exit 1
fi
# similar to hg purge, but without requiring the extension
hg status -un0 | xargs -0 rm -rf
""".strip()
BASH_PATCH_STEP = """
#!/bin/bash -eux
LOCAL_PATH=%(local_path)s
PATCH_URL=%(patch_url)s
pushd $LOCAL_PATH
PATCH_PATH=/tmp/$(mktemp patch.XXXXXXXXXX)
curl -o $PATCH_PATH $PATCH_URL
hg import --no-commit $PATCH_PATH
""".strip()
class MercurialVcs(Vcs):
binary_path = 'hg'
def get_default_env(self):
return {
'HGPLAIN': '1',
}
# This is static so that the repository serializer can easily use it
@staticmethod
def get_default_revision():
return 'default'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'hg',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def run(self, cmd, **kwargs):
cmd = [
self.binary_path,
'--config',
'ui.ssh={0}'.format(self.ssh_connect_path)
] + cmd
try:
return super(MercurialVcs, self).run(cmd, **kwargs)
except CommandError as e:
if "abort: unknown revision '" in e.stderr:
raise UnknownRevision(
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
)
raise
def clone(self):
self.run(['clone', self.remote_url, self.path], cwd='/')
def update(self):
self.run(['pull'])
def log(self, parent=None, branch=None, author=None, offset=0, limit=100, paths=None):
""" Gets the commit log for the repository.
Each revision returned has exactly one branch name associated with it.
This is the branch name encoded into the revision changeset description.
See documentation for the base for general information on this function.
"""
start_time = time()
# TODO(dcramer): we should make this streaming
cmd = ['log', '--template=%s' % (LOG_FORMAT,)]
if parent and branch:
raise ValueError('Both parent and branch cannot be set')
# Build the -r parameter value into r_str with branch, parent and author
r_str = None
if branch:
cmd.append('-b{0}'.format(branch))
if parent:
r_str = ('ancestors(%s)' % parent)
if author:
r_str = ('({r}) and author("{0}")' if r_str else 'author("{0}")')\
.format(author, r=r_str)
if r_str:
cmd.append('-r reverse({0})'.format(r_str))
if limit:
cmd.append('--limit=%d' % (offset + limit,))
if paths:
cmd.extend(["glob:" + p.strip() for p in paths])
result = self.run(cmd)
self.log_timing('log', start_time)
for idx, chunk in enumerate(BufferParser(result, '\x02')):
if idx < offset:
continue
(sha, author, author_date, parents, branches, message) = chunk.split('\x01')
branches = filter(bool, branches.split(' ')) or ['default']
parents = filter(lambda x: x and x != '0' * 40, parents.split(' '))
author_date = datetime.utcfromtimestamp(
mktime_tz(parsedate_tz(author_date)))
yield RevisionResult(
id=sha,
author=author,
author_date=author_date,
message=message,
parents=parents,
branches=branches,
)
def export(self, id):
"""Get the textual diff for a revision.
Args:
id (str): The id of the revision.
Returns:
A string with the text of the diff for the revision.
Raises:
UnknownRevision: If the revision wasn't found.
"""
cmd = ['diff', '-g', '-c %s' % (id,)]
result = self.run(cmd)
return result
def get_changed_files(self, id):
"""Returns the list of files changed in a revision.
Args:
id (str): The id of the revision.
Returns:
A set of filenames
Raises:
UnknownRevision: If the revision wan't found.
"""
cmd = ['status', '--rev', '%s^..%s' % (id, id), '-n']
output = self.run(cmd)
return set([x.strip() for x in output.splitlines()])
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['debugancestor', parent_in_question, child_in_question]
result = self.run(cmd)
return parent_in_question in result
def get_known_branches(self):
""" Gets all the named branches.
:return: A list of unique names for the branches.
"""
start_time = time()
cmd = ['branches']
results = self.run(cmd)
branch_names = set()
for line in results.splitlines():
if line:
name = line.split(None, 1)
if name[0]:
branch_names.add(name[0])
self.log_timing('get_known_branches', start_time)
return list(branch_names)
@staticmethod
def get_clone_command(remote_url, path, revision, clean=True, cache_dir=None):
# type: (str, str, str, bool, Optional[str]) -> str
if cache_dir is not None:
logging.warning("unexpected cache_dir provided for hg repository")
return BASH_CLONE_STEP % dict(
remote_url=remote_url,
local_path=path,
revision=revision,
clean_arg='--clean' if clean else '',
)
def get_buildstep_clone(self, source, workspace, clean=True, cache_dir=None):
# type: (Any, str, bool, Optional[str]) -> str
return MercurialVcs.get_clone_command(self.remote_url, workspace, source.revision_sha, clean, cache_dir)
def get_buildstep_patch(self, source, workspace):
# type: (Any, str) -> str
return BASH_PATCH_STEP % dict(
local_path=workspace,
patch_url=build_patch_uri(source.patch_id),
)
def read_file(self, sha, file_path, diff=None):
"""Read the content of a file at a given revision.
Args:
sha (str): the sha identifying the revision
file_path (str): the path to the file from the root of the repo
diff (str): the optional patch to apply before reading the config
Returns:
str - the content of the file
Raises:
CommandError - if the file or the revision cannot be found
"""
content = self.run(['cat', '-r', sha, file_path])
if diff is None:
return content
return self._selectively_apply_diff(file_path, content, diff)
def get_patch_hash(self, rev_sha):
"""Not possible for mercurial repositories"""
return None
|
#!/usr/bin/python
from collections import Counter
import sys, subprocess, math, re, os, os.path, marshal
import parse_wiki_xml, annotator, listWikiSenses
"""
Counters for sense
'cur_word': Counter of cur words
'cur_word_pos': Counter of POS for cur word
'context_words': Counter of how often a word is a context word
'context_poses': Counter of the POS surrounding a word
'global_context': Dunno about this one
num_examples: Number of paragraphs with this word and a link
"""
GENERAL_WIKI_LINK_RE = re.compile("\[\[" + "([^\[\]]*)" + "\]\]")
SPECIFIC_WIKI_LINK_RE = re.compile("\[\[" + "([^\[\]\|]*)" + "\|" + \
"([^\[\]\|]*)" + "\]\]")
NUM_WHATLINKS_PER_WORD = 20
UNKNOWN = "<unknown>"
PARAGRAPH_SEP = "REMINGTONSEPARATOR"
PARAGRAPH_SEP_SENTENCE= PARAGRAPH_SEP + "!"
SMOOTHING_EPSILON = 0.001
MAX_SENSES = 300
LOCAL_CONTEXT_WINDOW = 3
GLOBAL_CONTEXT_WINDOW = 25
wsd_output_file = None
def replace_spaces(str):
return re.sub("\s", "_", str)
def debug(item):
s = str(item)
#if wsd_output_file:
# wsd_output_file.write('# ' + s + '\n')
print '# ' + s
def output(str):
if wsd_output_file:
wsd_output_file.write(str + '\n')
print str
"""
Updates the counters.
"""
def update_features(pos_entries, index, counters_for_sense, stop_words):
cur_word = pos_entries[index]['word']
counters_for_sense['cur_word'][cur_word] += 1
# Current word feature and current word POS features
cur_word_pos = pos_entries[index]['pos']
counters_for_sense['cur_word_pos'][cur_word_pos] += 1
# Context word features
local_context_word_pairs, local_context_pos_pairs = \
annotator.get_context_list(pos_entries, index, LOCAL_CONTEXT_WINDOW)
counters_for_sense['context_words'].update(local_context_word_pairs)
counters_for_sense['context_poses'].update(local_context_pos_pairs)
global_context_set = annotator.get_context_set(pos_entries, index, \
GLOBAL_CONTEXT_WINDOW) - stop_words
counters_for_sense['global_context_words'].update(global_context_set)
counters_for_sense['num_examples'] += 1
"""
Returns a set of at most 5 words that appeared at least 3 times in the global
context of a word.
"""
def get_global_context_words(word_counter):
return set([word for word, count in word_counter.most_common(5) if count >= 3])
"""
Returns the prob equivalent of the given counter C. Applies add epsilon smoothing
using NUM_EXAMPLES and NUM_TOTAL_FEATURES.
"""
def pfs_with_smoothing(c, num_examples, num_total_features):
p = {}
denom = num_examples + SMOOTHING_EPSILON * num_total_features
log_denom = math.log(denom)
p[UNKNOWN] = math.log(SMOOTHING_EPSILON) - log_denom
for w, count in c.iteritems():
p[w] = math.log(count + SMOOTHING_EPSILON) - log_denom
return p
"""
Gets Bernoulli-like log probs for certain "buzzwords" appearing in the global
context.
"""
def get_global_context_probs(counter, num_examples):
probs = {}
for word in get_global_context_words(counter):
denom = num_examples + 2.0 * SMOOTHING_EPSILON
present_prob = (counter[word] + SMOOTHING_EPSILON) / denom
not_present_prob = 1 - present_prob
probs[word] = (math.log(present_prob), math.log(not_present_prob))
return probs
"""
Returns a dictionary that transforms the counters given into a probabilities.
"""
def counters_to_probs(counters_for_sense, vocab, poses):
probs_for_sense = {}
num_examples = counters_for_sense['num_examples']
probs_for_sense['cur_word'] = pfs_with_smoothing(\
counters_for_sense['cur_word'], num_examples, len(vocab))
probs_for_sense['cur_word_pos'] = pfs_with_smoothing(\
counters_for_sense['cur_word_pos'], num_examples, len(poses))
probs_for_sense['context_words'] = pfs_with_smoothing(\
counters_for_sense['context_words'], num_examples, \
len(vocab) * 2 * LOCAL_CONTEXT_WINDOW)
probs_for_sense['context_poses'] = pfs_with_smoothing(\
counters_for_sense['context_poses'], num_examples, \
len(poses) * 2 * LOCAL_CONTEXT_WINDOW)
counters_for_sense['global_context'] = \
get_global_context_words(counters_for_sense['context_words'])
probs_for_sense['global_context_words'] = get_global_context_probs(\
counters_for_sense['global_context_words'], num_examples)
return probs_for_sense
def get_stop_words(lang):
file_name = 'stopWords-' + lang + '.txt'
words = set()
with open(file_name, 'r') as f:
for line in f:
words.add(line.split(",")[0].strip())
return words
"""
Should not be necessary anymore.
"""
def preprocess_content(content):
triple_quote_index = content.find("'''")
if triple_quote_index >= 0:
return content[(triple_quote_index + 1):]
return content
def lookup_prob(probs, key):
if key in probs:
return probs[key]
return probs[UNKNOWN]
"""
Estimates P(pos_entries | sense).
"""
def get_nb_prob(pos_entries, index, probs_for_sense, stop_words):
cur_word = pos_entries[index]['word']
prob = lookup_prob(probs_for_sense['cur_word'], cur_word)
cur_word_pos = pos_entries[index]['pos']
prob += lookup_prob(probs_for_sense['cur_word_pos'], cur_word_pos)
context_word_pairs, context_pos_pairs = \
annotator.get_context_list(pos_entries, index, LOCAL_CONTEXT_WINDOW)
for context_word_pair in context_word_pairs:
prob += lookup_prob(probs_for_sense['context_words'], context_word_pair)
for context_pos_pair in context_pos_pairs:
prob += lookup_prob(probs_for_sense['context_poses'], context_pos_pair)
global_context_set = annotator.get_context_set(pos_entries, index, \
GLOBAL_CONTEXT_WINDOW) - stop_words
for global_context_word, probs_pair in \
probs_for_sense['global_context_words'].iteritems():
if global_context_word in global_context_set:
prob += probs_pair[0] # present log prob
else:
prob += probs_pair[1] # not present log prob
return prob
"""
Predicts the sense for word at POS_ENTRIES[INDEX]. Will not NB probs if
PROBS_BY_SENSE has only one sense.
"""
def predict_sense(pos_entries, index, probs_by_sense, stop_words):
if not probs_by_sense:
return pos_entries[index]['word']
if len(probs_by_sense) == 1:
for sense in probs_by_sense:
return sense
nb_probs = Counter()
for sense in probs_by_sense:
nb_probs[sense] = get_nb_prob(pos_entries, index, probs_by_sense[sense], \
stop_words)
top_senses = nb_probs.most_common(5)
debug(top_senses)
top_sense, top_prob = top_senses[0]
return top_sense
"""
Not used anymore.
"""
def get_senses_old(word, lang):
return [e['title'] for e in listWikiSenses.getListOfSenses(word, lang)]
def get_senses(word, lang):
output_file_name = 'output-' + word + '-' + lang + '.txt'
try:
output = subprocess.check_output(['python', 'listSenses.py', '-q', lang, \
word])
except:
return []
output_file_contents = ""
if os.path.exists(output_file_name):
with open(output_file_name, 'r') as f:
output_file_contents = f.read()
remove_if_exists(output_file_name)
ignored, links = annotator.extract_links(output_file_contents, \
GENERAL_WIKI_LINK_RE)
return [link['page'] for link in links]
def print_list_lines(list):
for item in list:
debug(item)
"""
We are only training on lower case words.
page is something like 'bar (law)'
word is something like 'bar'
"""
def is_valid_keyword(page, word):
return len(word) > 0 and word[:1].islower() and ':' not in page \
and word != 'thumb'
"""
Keyword = word to disambiguate
"""
def get_keywords_from_pos_entries(pos_entries):
keywords = set()
for entry in pos_entries:
if annotator.is_link_entry(entry):
word = entry['word']
if is_valid_keyword(entry['page'], word):
keywords.add(word)
return keywords
"""
Debugging function.
"""
def print_links_in_pos_entries(pos_entries):
for pos_entry in pos_entries:
if annotator.is_link_entry(pos_entry):
debug(pos_entry['link_text_words'])
def get_keywords_in_paragraphs(pos_entries_by_paragraph, lang):
keywords = set()
for pos_entries in pos_entries_by_paragraph:
keywords = keywords.union(get_keywords_from_pos_entries(pos_entries))
return keywords
"""
Shortens the given string of page content by not including paragraphs that don't have the link in them.
"""
def filter_page_contents(pages, wiki_link_re):
paragraphs_with_links = []
for page in pages:
for paragraph in page["content"].split("\n"):
if wiki_link_re.search(paragraph):
paragraphs_with_links.append(paragraph)
return PARAGRAPH_SEP.join(paragraphs_with_links)
"""
Will actually smush pages into one paragraph separated by REMINGTON!
"""
def get_annotated_paragraphs_in_pages(pages, lang, wiki_link_re):
linkless_paragraphs = []
pos_entries_by_paragraph = []
filtered = filter_page_contents(pages, wiki_link_re)
a, b = get_annotated_paragraphs(filtered, lang, wiki_link_re)
if a:
linkless_paragraphs.extend(a)
pos_entries_by_paragraph.extend(b)
return (linkless_paragraphs, pos_entries_by_paragraph)
"""
Will actually treat content as one giant paragraph an return a tuple
with one-item lists.
"""
def get_annotated_paragraphs(content, lang, wiki_link_re):
linkless_paragraph, pos_entries = \
annotator.annotate_paragraph(content, lang, wiki_link_re)
if pos_entries:
return ([linkless_paragraph], [pos_entries])
return ([], [])
def output_prediction(keyword, sense):
link = "[[" + sense + "|" + keyword + "]]"
output(link)
"""
Dumps VALUE into FILE_NAME inside the cache folder, overwriting any
existing file in the cache.
"""
def insert_into_cache(file_name, value):
cache_file_name = os.path.join('cache', file_name)
remove_if_exists(cache_file_name)
with open(cache_file_name, 'w') as f:
marshal.dump(value, f)
def remove_if_exists(file_name):
try:
os.remove(file_name)
except OSError:
pass
def get_from_cache(file_name):
cache_file_name = os.path.join('cache', file_name)
if not os.path.exists(cache_file_name):
return None
item = None
with open(cache_file_name, 'r') as f:
item = marshal.load(f)
return item
def get_pair_from_cache(file_name):
pair = get_from_cache(file_name)
if not pair:
return (None, None)
return pair
def get_joined_name(list):
return '-'.join(list)
def get_cache_file_name(file_name):
cache_file_name = os.path.join('cache', file_name)
if os.path.exists(cache_file_name):
return cache_file_name
return None
def move_to_cache(file_name):
cache_file_name = os.path.join('cache', file_name)
os.rename(file_name, cache_file_name)
return cache_file_name
def get_sense_link_re(sense):
sense = re.escape(sense)
re_str = "\[\[" + sense + "\]\]" + "|" + "\[\[" + sense + "\|([^\[\]]*)\]\]"
return re.compile(re_str, flags=re.IGNORECASE)
def get_empty_counters_for_sense():
counters = {'num_examples': 0}
counter_keys = ['cur_word', 'cur_word_pos', 'context_words', \
'context_poses', 'global_context_words']
for key in counter_keys:
counters[key] = Counter()
return counters
def add_list_to_vocab(vocab, poses, pos_entries_list):
for pos_entries in pos_entries_list:
add_to_vocab(vocab, poses, pos_entries)
def add_to_vocab(vocab, poses, pos_entries):
for pos_entry in pos_entries:
vocab.add(pos_entry['word'])
poses.add(pos_entry['pos'])
"""
TRAINING_DATA is either a list of pos_entry lists or True.
"""
def get_probs_for_sense(sense, lang, vocab, poses, training_data, stop_words):
pos_entries_by_paragraph = training_data
if training_data == True or len(pos_entries_by_paragraph) == 0:
return True
counters_for_sense = get_empty_counters_for_sense()
for pos_entries in pos_entries_by_paragraph:
for i, pos_entry in enumerate(pos_entries):
if annotator.is_link_entry(pos_entry):
update_features(pos_entries, i, counters_for_sense, stop_words)
return counters_to_probs(counters_for_sense, vocab, poses)
def get_probs_by_sense(word, lang, vocab, poses, training_data_by_sense, stop_words):
probs_by_sense = {}
if len(training_data_by_sense) <= 1:
for only_sense in training_data_by_sense:
probs_by_sense[only_sense] = True
for sense, training_data_for_sense in training_data_by_sense.iteritems():
probs = get_probs_for_sense(sense, lang, vocab, poses, training_data_for_sense, \
stop_words)
if probs:
probs_by_sense[sense] = probs
else:
debug("Threw out sense " + sense)
return probs_by_sense
"""
Simply replaces all links with just their text.
"""
def get_linkless_paragraph(content):
content = SPECIFIC_WIKI_LINK_RE.sub(lambda m: m.group(2), content)
content = GENERAL_WIKI_LINK_RE.sub(lambda m: m.group(1), content)
return content
"""
Given annotated paragraphs for just the links of a particular sense,
returns True iff the vast majority of the link texts start with an
uppercase letter (indicating it's most likely an name entity.
"""
def is_likely_lower_sense(pos_entries_by_paragraph):
num_lower = 0
num_total = 0
for pos_entries in pos_entries_by_paragraph:
for pos_entry in pos_entries:
if annotator.is_link_entry(pos_entry):
num_total += 1
if is_valid_keyword(pos_entry['page'], pos_entry['word']):
num_lower += 1
if num_total == 0:
return False
fraction = 1.0 * num_lower / num_total
return fraction >= 0.1
def is_multi_cap(sense):
tokens = sense.split()
return len(tokens) >= 2 and tokens[0][0].isupper() and tokens[-1][0].isupper()
"""
Returns training data for a sense. Here training data is a list of
annotated paragraphs, where each annotated paragraph is a list of pos
entries with special link entries for which the sense was the destination
page.
"""
def get_training_data_for_sense_cache(sense, lang):
if '/' in sense or sense == "Bomb the Bass" or is_multi_cap(sense) or sense == "Kirklees" or sense == "Fallout 2":
return []
file_name = get_joined_name(['sense', lang, sense])
sense_data = get_from_cache(file_name)
if sense_data:
debug("# Getting (cached) sense training data for " + sense)
return sense_data
debug("Getting sense training data for " + sense)
output_file_name = 'output-' + sense + '-' + lang + '.xml'
output = subprocess.check_output(['python', 'whatLinksHere.py', \
sense, lang, str(NUM_WHATLINKS_PER_WORD)])
sense_link_re = get_sense_link_re(sense)
pos_entries_by_paragraph = []
if os.path.exists(output_file_name):
file_size = os.path.getsize(output_file_name)
debug(file_size)
pages = parse_wiki_xml.parse_articles_xml(output_file_name)
ignored, pos_entries_by_paragraph = \
get_annotated_paragraphs_in_pages(pages, lang, sense_link_re)
sense_data = pos_entries_by_paragraph
if not is_likely_lower_sense(sense_data):
sense_data = []
insert_into_cache(file_name, sense_data)
remove_if_exists(output_file_name)
return sense_data
"""
Given a word, returns a dictionary mapping sense -> training data.
If there is at most one sense, returns a dictionary where the only key
is the only sense and its value is True. No list of training data is
provided.
"""
def get_training_data_by_keyword_cache(word, lang):
file_name = get_joined_name(['keyword', lang, word])
data_by_sense = get_from_cache(file_name)
if data_by_sense:
debug("Getting (cached) keyword training data for " + word)
debug(str(len(data_by_sense)) + " senses found.")
return data_by_sense
data_by_sense = {}
senses = get_senses(word, lang)
num_senses = len(senses)
debug("Getting keyword training data for " + word)
debug(str(num_senses) + " senses found.")
if len(senses) == 0:
data_by_sense[word] = True
return data_by_sense
if len(senses) == 1:
data_by_sense[senses[0]] = True
return data_by_sense
if len(senses) > MAX_SENSES:
debug("Too many senses. Truncating to just " + str(MAX_SENSES))
senses = senses[:MAX_SENSES]
for sense in senses:
data_for_sense = get_training_data_for_sense_cache(sense, lang)
if data_for_sense:
data_by_sense[sense] = data_for_sense
else:
debug("Throwing out sense " + sense + " due to lack of data (may be upper)")
insert_into_cache(file_name, data_by_sense)
return data_by_sense
def has_no_ambiguity(training_data_by_keyword):
return len(training_data_by_keyword) <= 1
# TODO: Does not cache yet
def get_required_data_cache(pageid, title, content, lang):
file_name = get_joined_name(["article", pageid, lang])
cached = get_from_cache(file_name)
if cached:
debug("Getting (cached) required data to disambiguate " + title)
return cached
vocab = set()
poses = set()
# TODO: Cache?
linkless_paragraphs, pos_entries_by_paragraph = \
get_annotated_paragraphs(content, lang, GENERAL_WIKI_LINK_RE)
add_list_to_vocab(vocab, poses, pos_entries_by_paragraph)
keywords = get_keywords_in_paragraphs(pos_entries_by_paragraph, lang)
training_data_by_keyword = {}
for keyword in keywords:
training_data_by_keyword[keyword] = \
get_training_data_by_keyword_cache(keyword, lang)
if not has_no_ambiguity(training_data_by_keyword[keyword]):
for sense, data_by_sense in training_data_by_keyword[keyword].iteritems():
add_list_to_vocab(vocab, poses, data_by_sense)
to_return = {'doc': pos_entries_by_paragraph, 'vocab': vocab, 'poses': poses, \
'training_data_by_keyword': training_data_by_keyword}
insert_into_cache(file_name, to_return)
return to_return
# TODO: What links here provides no prior for NB
def wsd_page(pageid, title, content, lang, stop_words):
required_data = get_required_data_cache(pageid, title, content, lang)
pos_entries_by_paragraph = required_data['doc']
vocab = required_data['vocab']
poses = required_data['poses']
training_data_by_keyword = required_data['training_data_by_keyword']
keyword_sense_probs = {}
for keyword, training_data in training_data_by_keyword.iteritems():
debug("Training for keyword " + keyword)
keyword_sense_probs[keyword] = get_probs_by_sense(keyword, lang, vocab, poses, \
training_data, stop_words)
for pos_entries in pos_entries_by_paragraph:
for i, pos_entry in enumerate(pos_entries):
if annotator.is_link_entry(pos_entry):
keyword = pos_entry['word']
if keyword in training_data_by_keyword:
probs_for_sense = keyword_sense_probs[keyword]
sense = predict_sense(pos_entries, i, probs_for_sense, stop_words)
output_prediction(keyword, sense)
def wsd(xml_file_name, lang):
global wsd_output_file
stop_words = get_stop_words(lang)
for page in parse_wiki_xml.parse_articles_xml(xml_file_name):
output_file_base = os.path.join(lang + '2', page['pageid'] + '-' + lang)
output_file_tmp = output_file_base + '.tmp'
output_file_name = output_file_base + '.txt'
debug("Disambiguating " + page['title'])
if os.path.exists(output_file_name):
continue
wsd_output_file = open(output_file_tmp, 'w')
wsd_page(page['pageid'], page['title'], page['content'], lang, stop_words)
wsd_output_file.close()
wsd_output_file = None
os.rename(output_file_tmp, output_file_name)
if __name__ == '__main__':
if len(sys.argv) >= 4:
output_file_name = sys.argv[3]
if len(sys.argv) == 1:
wsd("output-1-en.xml", "en")
elif len(sys.argv) >= 3:
wsd(sys.argv[1], sys.argv[2])
else:
debug("Usage: python wsd2.py articles.xml en <output file name>")
|
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
# import datasets (images: 55,000, test: 10,000, validation: 5,000)
## images: 784 (28*28 pixels) values between 0 and 1
## labels: 10 one-hot vector
def main():
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x, W) + b)
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary("accuracy", accuracy)
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter("/tmp/mnist_logs/train", sess.graph)
tf.initialize_all_variables().run()
for i in range(7000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
summary, acc = sess.run([merged, accuracy], feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
train_writer.add_summary(summary, i)
print("step %d, training accuracy %f"%(i, acc))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
saver = tf.train.Saver()
saver.save(sess, "model.ckpt")
print("test accuracy %f"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if __name__ == "__main__":
main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Recursively find :file:`info` and :file:`pickle` files within a directory
This module can be called from the shell, it will recursively look for
:file:`info` and :file:`pickle` files in the current working directory::
$ python pathtococo/bbob_pproc/findfiles.py
Searching in ...
Found ... file(s)!
"""
import os
import warnings
#import zipfile
#import tarfile
# Initialization
def is_recognized_repository_filetype(filename):
return os.path.isdir(filename.strip()) or filename.find('.tar') > 0 or filename.find('.tgz') > 0
def main(directory='.', verbose=True):
"""Lists data files recursively in a given directory, tar files
are extracted.
The data files have :file:`info` and :file:`pickle` extensions.
TODO: not only recognize .tar and .tar.gz and .tgz but .zip...
"""
filelist = list()
directory = directory.strip()
#~ if directory.endswith('.zip'):
#~ archive = zipfile.ZipFile(directory)
#~ for elem in archive.namelist():
#~ if elem.endswith('.info'):
#~ (root,elem) = os.path.split(elem)
#~ filelist = IndexFile(root,elem,archive)
if not os.path.isdir(directory) and is_recognized_repository_filetype(directory):
import tarfile
dirname = '_extracted_' + directory[:directory.find('.t')]
tarfile.TarFile.open(directory).extractall(dirname)
# TarFile.open handles tar.gz/tgz
directory = dirname
print( ' archive extracted to folder', directory, '...' )
# archive = tarfile.TarFile(directory)
# for elem in archivefile.namelist():
# ~ if elem.endswith('.info'):
# ~ (root,elem) = os.path.split(elem)
# ~ filelist = IndexFile(root,elem,archive)
#~ else:
# Search through the directory directory and all its subfolders.
for root, _dirs, files in os.walk(directory):
if verbose:
print( 'Searching in %s ...' % root )
for elem in files:
if elem.endswith('.info') or elem.endswith('.pickle') or elem.endswith('.pickle.gz'):
filelist.append(os.path.join(root, elem))
if verbose:
print( 'Found %d file(s).' % (len(filelist)) )
if not filelist:
warnings.warn('Could not find any file of interest in %s!' % root)
return filelist
if __name__ == '__main__':
main()
|
import threading
import time
from playsound import playsound
import pyttsx3
import random
import warnings
warnings.filterwarnings("ignore")
engine = pyttsx3.init()
def bgm():
playsound('music.mp3')
t_bgm = threading.Thread(target=bgm) # 后台播放bgm
t_bgm.start()
word1 = ['Python、', 'Java、', '数据库、', '.Net、', 'GO、', 'html、', 'C++、', 'web、']
word2 = ['前端、', '后端、', '运维、', '开发、', '测试、', '产品、', '算法、', '全栈、', '工程师、']
word3 = ['程序员、', '单身、', '写代码、', '码农、', 'Bug、', '秃头、',
'九九六、', '加班、', '头发、', '年终奖、', '女朋友、', '暴富、']
word4 = ['我、', '他没', '他要''这', '那', '没了', '不过', '许我一场', '最后不过', '后来', '而']
a = random.choice(word1) + random.choice(word2) + random.choice(word2) + \
random.choice(word4) + random.choice(word3)
b = random.choice(word1) + random.choice(word2) + random.choice(word2) + \
random.choice(word4) + random.choice(word3)
c = random.choice(word1) + random.choice(word2) + random.choice(word2) + \
random.choice(word4) + random.choice(word3)
d = random.choice(word1) + random.choice(word2) + random.choice(word2) + \
random.choice(word4) + random.choice(word3)
print(a)
print(b)
print(c)
print(d)
msg = a + b + c + d
time.sleep(2)
engine.say(msg)
engine.runAndWait()
|
import functools
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj, attr):
return functools.reduce(getattr, [obj]+attr.split('.'))
|
https://leetcode.com/problems/add-binary/
class Solution:
def addBinary(self, a: str, b: str) -> str:
res = ""
carry = 0
n1 = len(a)
n2 = len(b)
if n2 > n1:
a = '0'*(n2-n1) + a
elif n1 > n2:
b = '0'*(n1-n2) + b
for i in range(len(a)-1, -1, -1):
x = int(a[i]) + int(b[i]) + carry
if x==1 or x==0:
res+=str(x)
carry = 0
elif x==2:
res+=str(0)
carry = 1
else:
res+=str(1)
carry = 1
if carry==1:
res+=str(1)
return res[::-1]
# built in method
res = bin(int(a, 2) + int(b, 2))[2:] |
import time
import pytest
from selenium import webdriver
@pytest.mark.baidu
class TestBaidu:
def setup_method(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.base_url = "http://www.baidu.com/"
"""pytest.mark.parametrize 多参数"""
@pytest.mark.parametrize('search_string,expect_string',[('iTesting', 'iTesting'), ('helloqa.com', 'iTesting')])
def test_baidu_search(self, search_string, expect_string):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_id("kw").send_keys(search_string)
driver.find_element_by_id("su").click()
time.sleep(2)
search_results = driver.find_element_by_xpath('//*[@id="1"]/h3/a').get_attribute('innerHTML')
assert (expect_string in search_results) is True
def teardown_method(self):
self.driver.quit()
if __name__ == '__main__':
pytest.main(["-m","baidu","-s","-v","-k","test_baidu_search","test_baidu.py"])
|
from dataclasses import dataclass
from project_management.entities.task import Task
from project_management.basecamp3 import util
from datetime import datetime, timezone
@dataclass
class Basecamp3Task(Task):
due_date: str = None
created_date:str = None
def __post_init__(self):
if not (self.due_date is None or "-" in self.due_date):
format="%Y-%m-%d"
self.due_date = util.epoch_to_format(format,self.due_date)
|
def song_playlist(songs, max_size):
"""
songs: list of tuples, ('song_name', song_len, song_size)
max_size: float, maximum size of total songs that you can fit
Start with the song first in the 'songs' list, then pick the next
song to be the one with the lowest file size not already picked, repeat
Returns: a list of a subset of songs fitting in 'max_size' in the order
in which they were chosen.
"""
playlist = []
copy_songs = list(songs)
#Pop first song - which is stupid
if copy_songs[0][2] > max_size:
return playlist
else:
cur_song = copy_songs.pop(0)
playlist.append(cur_song[0])
max_size -= cur_song[2]
songs_sorted_by_size = sorted(copy_songs, key=lambda tup: tup[2])
for i in range(len(songs_sorted_by_size)):
cur_size = songs_sorted_by_size[i][2]
cur_name = songs_sorted_by_size[i][0]
if cur_size <= max_size:
playlist.append(cur_name)
max_size -= cur_size
else:
return playlist
return playlist
print(song_playlist([('a', 4.4, 4.0), ('b', 3.5, 7.7), ('c', 5.1, 6.9), ('d', 2.7, 1.2)], 20)) |
import logging
logging.basicConfig(level=logging.INFO)
def partition(array, start, end):
p_idx = start
p = array[p_idx]
print('p -> array:', array)
while start < end:
while start < len(array) and array[start] <= p:
start = start + 1
while array[end] > p:
end = end - 1
if start < end:
array[start], array[end] = array[end], array[start]
print('while array:', array)
array[end], array[p_idx] = array[p_idx], array[end]
print('final return array:', array)
return end
def quickSort(a, start, end):
if start < end:
p = partition(a, start, end)
quickSort(a, start, p - 1)
quickSort(a, p + 1, end)
return a
# a = [5, 3, 2, 1, 6]
a = [38, 27, 43, 3, 9, 82, 10]
# end, array = partition(a, start=0, end=len(a) - 1)
array = quickSort(a, start=0, end=len(a) - 1)
print(array)
|
#!/usr/bin/python3
##############################################################################
# (c)Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#This python script has to be run from a client LPAR (Ex: AIX system)
#This script reads data either from DB2 database or csv file and sends data to server for inferencing
import requests
import time
global server_name, port_number
def predict_results(features):
global model, graph
with graph.as_default():
result = model.predict_classes(features)
return result
def predict_using_rest_api(colheaders, features):
global server_name, port_number
server_string = "http://" + server_name + ":" + port_number + "/predict"
url = server_string
#print ("url is " + url)
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
data = [ { 'headers': colheaders, 'features': features }]
prediction = requests.post(url, json=data, headers=headers).json()
pred=prediction[0]['prediction']
extract_pred=pred[0]
final_prediction=extract_pred[0]
#print "final prediction is " + str(final_prediction)
#prediction = np.squeeze(prediction[0]['prediction'])
return final_prediction
def main():
global server_name, port_number
import sys
if len(sys.argv)!= 4 and len(sys.argv)!= 5 :
print_usage()
sys.exit(1)
option = sys.argv[1]
server_name=sys.argv[2]
port_number=sys.argv[3]
if option!="-db2" and option!="-csv":
print("Error: Invalid option " + option +". Please provide either -db2 or -csv as input")
print_usage()
sys.exit(1)
if not port_number.isdigit():
print("Error: Invalid Port Number")
print_usage()
sys.exit(1)
if (len(sys.argv) == 5):
merchant=sys.argv[4]
else:
merchant=''
headers = ['ACCT_STATUS_K_USD', 'CONTRACT_DURATION_MONTH', 'HISTORY','CREDIT_PROGRAM', 'AMOUNT_K_USD', 'ACCOUNT_TYPE', 'ACCT_AGE', 'STATE','IS_URBAN', 'IS_XBORDER', 'SELF_REPORTED_ASMT', 'CO_APPLICANT', 'GUARANTOR', 'PRESENT_RESIDENT', 'OWN_REAL_ESTATE', 'PROP_UNKN', 'ESTABLISHED_MONTH', 'OTHER_INSTALL_PLAN', 'RENT', 'OWN_RESIDENCE', 'NUMBER_CREDITS', 'RFM_SCORE', 'BRANCHES', 'TELEPHONE', 'SHIP_INTERNATIONAL']
if option=="-db2":
predict_fromdb2(headers,merchant)
if option=="-csv":
predict_fromcsv(headers,merchant)
def predict_fromcsv(headers,merchant):
import ast
merchant_present=0
filename="new_customers.csv"
file1 = open(filename,'r')
lines=file1.read().splitlines()
#remove column names
lines_data=lines[1:]
#length=len(lines)
if not merchant:
merchant_present=1
for row in lines_data:
#convert string to dictionary
row=ast.literal_eval(row)
merchant_name=row[0]
#after removing merchant name
final_row=row[1:]
print ("Data sent to server:")
print ("====================")
print (row)
p = predict_using_rest_api(headers,list(final_row))
if p==0:
print("Prediction value is 0. This account might not default\n")
else:
print("Prediction value is 1. This account might default\n")
else:
for row in lines_data:
row=ast.literal_eval(row)
merchant_name=row[0]
final_row=row[1:]
if merchant==merchant_name:
print ("Data sent to server:")
print ("====================")
print (row)
p = predict_using_rest_api(headers,list(final_row))
merchant_present=1
if p==0:
print("Prediction value is 0. This account might not default\n")
else:
print("Prediction value is 1. This account might default\n")
if merchant_present == 0:
print("No such merchant with name " + merchant)
def predict_fromdb2(headers,merchant):
import ibm_db_dbi as db
import sys
if not merchant:
query = 'SELECT * FROM NEW_CUSTOMERS'
else:
query = 'SELECT * FROM NEW_CUSTOMERS where merchant=\'' + merchant +'\''
#print (query)
conn = db.connect("DATABASE=LOANDB;UID=db2inst1;PWD=db2inst1")
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
if len(rows)== 0:
print("No matching data found in database")
sys.exit(1)
#data = datas[1:]
#print data
i = 1
for row in rows:
#skip merchant name
data = row[1:]
print ("Processing record " + str(i))
i = i + 1
#print (list(data))
#print headers
p = predict_using_rest_api(headers,list(data))
print ("Data sent to server:")
print ("====================")
print (row)
print ("------------------------------------------------------------------------")
if p==0:
print("Prediction value is 0. This account might not default\n")
else:
print("Prediction value is 1. This account might default\n")
def print_usage():
print ("Usage: python flask-aix-client.py <-db2|-csv> <Linux LPAR> <Port Number> [merchant_name]")
print ("<-db2>: if your data is in db2 database")
print ("<-csv>: if your data is in csv format")
print ("<Server Name>: Name or IP address of Linux LPAR where flask server is running")
print ("<Port Number>: Port number on which server is running")
print ("[merchant_name]: Optional argument. Input merchant name.")
if __name__ == '__main__':
main()
|
from estructuras import *
def ucs(g, s, m):
frontera = ColaPriorizada()
frontera.put(0, s)
anteriores = {}
anteriores[s] = None
acumulado = {}
acumulado[s] = 0
while not frontera.esVacia():
actual = frontera.get()
if actual == m:
break
for vecino in g.vecinos(actual):
costo = acumulado[actual] + g.costo(actual, vecino)
if vecino not in acumulado or costo < acumulado[vecino]:
acumulado[vecino] = costo
frontera.put(costo, vecino)
anteriores[vecino] = actual
return acumulado, acumulado[actual]
def main():
g = Grafo()
with open('espana.txt') as f:
for l in f:
(c1, c2, c) = l.split(',')
g.aAristaPeso(c1, c2, c)
print(ucs(g, 'Coruna', 'Vigo'))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
'UDP server' #doc comment
__autor__ = 'myth'
import socket
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
#绑定端口:
s.bind(('127.0.0.1',9999))
print('Bind UDP on 9999...')
while True:
#接收数据:
data,addr = s.recvfrom(1024)
print('Received from %s:%s.' %addr)
s.sendto(b'Hello, %s!' %data,addr) |
class Solution:
def binSearch(self, nums, target):
if len(nums) <= 0:
return -1
low, high = 0, len(nums) - 1
while low <= high:
mid = (low + high) // 2
if nums[mid] == target:
return mid
elif nums[mid] > target:
high = mid - 1
elif nums[mid] < target:
low = mid + 1
return -1
def sortArray(self, nums):
if len(nums) <= 0:
return nums
left, right = 0, len(nums) - 1
def quickSort(nums, left, right):
if left < right:
partitionIndex = self.partition(nums, left, right)
quickSort(nums, left, partitionIndex - 1)
quickSort(nums, partitionIndex + 1, right)
quickSort(nums, left, right)
return nums
def partition(self, nums, low, high):
pivot = nums[low]
while low < high:
while low < high and pivot <= nums[high]:
high -= 1
nums[low] = nums[high]
while low < high and pivot > nums[low]:
low += 1
nums[high] = nums[low]
nums[high] = pivot
return high
def mergeSortArry(self, nums):
if len(nums) <= 0:
return nums
self.mergeSort(nums, 0, len(nums) - 1)
return nums
def mergeSort(self, nums, low, high):
mid = (low + high) // 2
if low < high:
# 左边
self.mergeSort(nums, low, mid)
# 右边
self.mergeSort(nums, mid + 1, high)
# 左右合并
temp = [0] * (high - low + 1)
# 左指针
i = low
# 右指针
j = mid + 1
k = 0
# 把较小的数先移动到新的数组中
while i <= mid and j <= high:
if nums[i] < nums[j]:
temp[k] = nums[i]
k += 1
i += 1
else:
temp[k] = nums[j]
k += 1
j += 1
# 把左边剩余的数移入数组中
while i <= mid:
temp[k] = nums[i]
k += 1
i += 1
# 把右边剩余的数移入到数组中
while j <= high:
temp[k] = nums[j]
k += 1
j += 1
# 把临时数组里排好序的数放回原数组中
for i in range(0, len(temp)):
nums[i + low] = temp[i]
return nums
if __name__ == "__main__":
sol = Solution()
# 1. 二分查找
# nums = [-1, 0, 3, 5, 9, 12]
# target = 0
# print(sol.binSearch(nums, target))
# 2. 快速排序
# nums = [5, 1, 1, 2, 0, 0]
# print(sol.sortArray(nums))
# 3. 归并排序
nums = [7, 5, 6, 4]
print(sol.mergeSortArry(nums))
|
#own for loop and range function
def special_for(iterable):
iterator=iter(iterable)
while True:
try:
print(iterator)
print(next(iterator))
except StopIteration:
break
special_for([1,2,3])
#own range function
class MyGen():
current=0
def __init__(self,first,last):
self.first=first
self.last=last
def __iter__(self):
return self
def __next__(self):
if MyGen.current<self.last:
number=MyGen.current
MyGen.current+=1
return number
raise StopIteration
gen=MyGen(0,100)
for i in gen:
print(i) |
import math
def productPrimeFactors(n):
product = 1
# Handle prime factor 2 explicitly so that
# can optimally handle other prime factors.
if (n % 2 == 0):
product *= 2
while (n%2 == 0):
n = n/2
# n must be odd at this point. So we can
# skip one element (Note i = i +2)
for i in range (3, int(math.sqrt(n)), 2):
# While i divides n, print i and
# divide n
if (n % i == 0):
product = product * i
while (n%i == 0):
n = n/i
# This condition is to handle the case when n
# is a prime number greater than 2
if (n > 2):
product = product * n
return product
# main()
n = 510143758735509025530880200653196460532653147
print (int(productPrimeFactors(n)))
|
import requests
class InvalidTokenException(Exception):
pass
class Config(object):
def __init__(self, github_token):
if not github_token:
raise InvalidTokenException()
self.github_token = github_token
self.github_username = self.fetch_github_username()
def fetch_github_username(self):
response = requests.get('https://api.github.com/user', headers={
'Authorization': 'token ' + self.github_token
})
if response.status_code != 200:
raise InvalidTokenException()
return response.json()['login']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.