index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,400 | b227f222569761493f50f9dfee32f21e0e0a5cd6 | #Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_r1 import *
import m3.actuator_ec_pb2 as aec
# ####################################################################################################
config_default_g1_j0={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
config_default_g1_j1={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider=3V3
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
class M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):
def __init__(self):
M3CalibrateActuatorEcR1.__init__(self)
self.joint_names=['Left Digit J0',
'Right Digit J1']
self.config_default=[
config_default_g1_j0,
config_default_g1_j1]
def start(self,ctype):
if not M3CalibrateActuatorEcR1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
self.param_internal=self.config_default[self.jid]['param_internal']
print 'Calibrating joint',self.joint_names[self.jid]
return True
def do_task(self,ct):
if ct=='ch':
self.reset_sensor('torque')
self.calibrate_torque()
self.write_config()
return True
if ct=='tt':
self.reset_sensor('theta')
self.calibrate_theta()
self.write_config()
return True
if M3CalibrateActuatorEc.do_task(self,ct):
return True
return False
def print_tasks(self):
M3CalibrateActuatorEcR1.print_tasks(self)
print 'ch: calibrate torque'
print 'tt: calibrate theta'
def display_sensors(self):
M3CalibrateActuatorEcR1.display_sensors(self)
q_on=self.comp_ec.status.qei_on
q_p=self.comp_ec.status.qei_period
q_r=self.comp_ec.status.qei_rollover
c=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)
pos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0
print 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r
raw=self.comp_ec.status.adc_torque
c=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)
mN=c/self.comp_j.config['calib']['cb_drive_radius_m']
print 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw
def calibrate_torque(self):
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Place digit in zero load condition'
print 'Hit enter when ready'
raw_input()
self.step()
raw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_a=0
print 'Hang 1Kg weight from gripper near slider'
print 'Hit enter to move joint in first direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
print 'Was load in the opening direction [y]?'
if m3t.get_yes_no('y'):
load_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
else:
load_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
print 'Hit enter to move joint in second direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_c=-1*load_b
log_adc_torque=[raw_a,raw_b,raw_c]
log_load_mNm=[load_a,load_b,load_c]
poly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)
self.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,
'cb_torque':poly,'cb_inv_torque':inv_poly})
self.comp_rt.config['calib']['torque']['cb_torque']=poly
self.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly
print 'Poly',poly
s=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])
m3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',
y1name='load',y2name='raw')
def calibrate_theta(self):
pconfig=self.comp_ec.param.config #disable qei limits
self.comp_ec.param.config=0
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Moving joint to first limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_a=self.comp_ec.status.qei_on
q_p_a=self.comp_ec.status.qei_period
q_r_a=self.comp_ec.status.qei_rollover
print 'RawA',q_on_a
print 'Moving joint to second limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_b=self.comp_ec.status.qei_on
q_p_b=self.comp_ec.status.qei_period
q_r_b=self.comp_ec.status.qei_rollover
print 'Rawb',q_on_b
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'Did this last motion open the gripper [y]?' #At zero position
if m3t.get_yes_no('y'):
theta_b=0
theta_a=abs(theta_bs-theta_as)
else:
theta_a=0
theta_b=abs(theta_bs-theta_as)
self.comp_rt.set_mode_off()
self.comp_ec.param.config=pconfig #enable qei limits
self.step()
self.proxy.make_safe_operational(self.name_rt)
self.step()
print 'Raw',[theta_as,theta_bs]
print 'True',[theta_a,theta_b]
poly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)
self.comp_rt.config['calib']['theta']['cb_scale']=poly[0]
self.comp_rt.config['calib']['theta']['cb_bias']=poly[1]
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'New calibrated range',theta_as,theta_bs
max_q=max(theta_as,theta_bs)
min_q=min(theta_as,theta_bs)
if self.comp_j is not None:
print 'Setting joint limits to',min_q,max_q
print 'Expected joint limits of',self.param_internal['joint_limits']
self.comp_j.param.max_q=float(max_q)
self.comp_j.param.min_q=float(min_q)
else:
print 'Joint component missing. Unable to set joint limits to',min_q,max_q
#Assume 0-Ndeg, where N is defined by the encoder soft limits
self.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100
self.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100
self.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100
self.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100
print 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max']
|
4,401 | d0e957abfe5646fb84aed69902f2382d554dc825 |
from calc1 import LispTranslator, RPNTranslator, Parser, Lexer
import unittest
class TestTranslators(unittest.TestCase):
def init_rpn(self, program):
return RPNTranslator(Parser(Lexer(program)))
def init_lisp(self, program):
return LispTranslator(Parser(Lexer(program)))
def test_simple_rpn(self):
self.assertEqual(self.init_rpn('2 + 3').interpret(), '2 3 +')
self.assertEqual(self.init_rpn('2 + 3 + 5').interpret(), '2 3 + 5 +')
self.assertEqual(self.init_rpn('2 + 3 * 5').interpret(), '2 3 5 * +')
self.assertEqual(self.init_rpn('(2 + 3) * 5').interpret(), '2 3 + 5 *')
def test_simple_lisp(self):
self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')
self.assertEqual(self.init_lisp('2 + 3 + 5').interpret(), '(+ (+ 2 3) 5)')
self.assertEqual(self.init_lisp('2 + 3 * 5').interpret(), '(+ 2 (* 3 5))')
self.assertEqual(self.init_lisp('(2 + 3) * 5').interpret(), '(* (+ 2 3) 5)')
def test_examples_chapter_seven(self):
self.assertEqual(self.init_rpn('(5 + 3) * 12 DIV 3').interpret(), '5 3 + 12 * 3 DIV')
self.assertEqual(self.init_lisp('2 + 3').interpret(), '(+ 2 3)')
self.assertEqual(self.init_lisp('(2 + 3 * 5)').interpret(), '(+ 2 (* 3 5))')
if __name__ == '__main__':
unittest.main()
|
4,402 | 8c2920db7fc49d56aa8da6289cd22272ed3e3283 | from django.apps import AppConfig
class ShortenConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'shorten'
|
4,403 | 29ec576d1fe04108eeb03a5d1b167671d3004570 | # Copyright 2017-2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
from datetime import datetime, timedelta
from odoo import fields
from odoo.tests.common import TransactionCase
class TestCase(TransactionCase):
def setUp(self):
super(TestCase, self).setUp()
self.event = self.env["event.event"].create(
{
"name": "TestEvent",
"attendee_signup": True,
"create_partner": True,
"date_begin": fields.Datetime.to_string(
datetime.today() + timedelta(days=1)
),
"date_end": fields.Datetime.to_string(
datetime.today() + timedelta(days=15)
),
}
)
|
4,404 | f50c9aec85418553f4724146045ab7c3c60cbb80 | import numpy as np
from sklearn.preprocessing import OneHotEncoder
def formator(value):
return "%.2f" % value
def features_preprocessor(datasetLocation):
data = np.genfromtxt(datasetLocation,delimiter=",",usecols=range(41)) ##!!! usecols = range(41)
encoder = OneHotEncoder(categorical_features=[1,2,3])
encoder.fit(data)
trainingData_features = encoder.transform(data).toarray().tolist()
formatted_trainingData = [[formator(v) for v in r] for r in trainingData_features] # Convert float valued list to string.
print len(formatted_trainingData[0])
return formatted_trainingData
|
4,405 | 459bd36037158c9a6a38da6eadf45a3dc6f19e04 | import os
import sys
import requests
import urllib.parse
import urllib.request
import json
from shutil import copyfile
def sdssDownload(band, location, size, path):
"""
.
sdssArchie populates a directory with links to raw images
from the SDSS mission. These images are all in FITS format
and suitable for reprojection, moaicking, etc.
Parameters
----------
band: str
SDSS wavelength band (e.g. "g").
location: str
Coordinates or name of an astronomical object
(e.g. "4h23m11s -12d14m32.3s", "Messier 017").
size: float
Region size in degrees.
path: str
Directory for output files.
"""
debug = 0
# Build the URL to get image metadata
url = "http://montage.ipac.caltech.edu/cgi-bin/ArchiveList/nph-archivelist?survey=SDSSDR7+" \
+ urllib.parse.quote_plus(band) \
+ "&location=" \
+ urllib.parse.quote_plus(location) \
+ "&size=" \
+ str(size) + "&units=deg&mode=JSON"
if debug:
print('DEBUG> url = "' + url + '"')
# Retrieve the image metadata and convert
# the JSON to a Python dictionary
fjson = urllib.request.urlopen(url)
data = json.load(fjson)
if debug:
print("DEBUG> data: ")
print(data)
nimages = len(data)
if debug:
print("DEBUG> nimages = " + str(nimages))
# We need to check the given directory,
# whether it exists, whether it is writeable,
# etc. We'll do it by trying to create it,
# then trying to write the image data it.
rtn = {}
try:
if not os.path.exists(path):
os.makedirs(path)
except:
rtn['status'] = 1
rtn['msg' ] = 'Cannot create output directory.'
return rtn
# Retrieve all the images into the data directory
try:
for index in range(0,nimages):
datafile = path + "/" + data[index]['file']
url = data[index]['url']
archivefile = url
archivefile = archivefile.replace('http://das.sdss.org','/home/idies/workspace/sdss_das/das2')
if debug:
print('copy file ' + archivefile + ' to ' + datafile)
copyfile(archivefile, datafile)
except:
rtn['status'] = 1
rtn['msg' ] = 'Error reading or writing data'
return rtn
# Success
rtn['status'] = 0
rtn['count' ] = nimages
return rtn
|
4,406 | 468c070aebff3124927c5595d68bb94321dd75e5 | import datetime
if __name__ == "__main__" :
keys = {'a','e','i', 'o', 'u', 'y'}
values = [1]
dictionnaire = {cle : list(values) for cle in keys}
print("dictionnaire : ", dictionnaire)
values.append(2)
#for cle in keys : dictionnaire.update({cle:values})
#dictionnaire.update({cle2 : list(values) for cle2 in keys})
#dictionnaire = {cle : list(values) for cle in keys}
#for cle in list(dictionnaire) : dictionnaire.update({cle:values})
for cle in dictionnaire.keys() : dictionnaire.update({cle:values})
print("dictionnaire : ", dictionnaire)
|
4,407 | cc87682d4ebb283e2d0ef7c09ad28ba708c904bd | # stopwatch.py - A simple stopwatch program.
import time
# Display the porgram's instructions
print(
""" \n\nInstructions\n
press Enter to begin.\n
Afterwards press Enter to "click" the stopwatch.\n
Press Ctrl-C to quit"""
)
input() # press Enter to begin
print("Started")
startTime = time.time()
lastTime = startTime
lapNum = 1
# TODO: start tracking the lap times.
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print(f"Lap #{lapNum}: {totalTime} {lapTime}", end="")
lapNum += 1
lastTime = time.time() # reset the last lap time
except KeyboardInterrupt:
# handle the ctrl-c exception to keep its message from displaying.
print("\nDone")
|
4,408 | d436362468b847e427bc14ca221cf0fe4b2623e3 | from flask_restful import Resource, reqparse
from db import query
import pymysql
from flask_jwt_extended import jwt_required
"""
This module is used to retrieve the data
for all the request_no's which have a false or a 0 select_status.
This is done by selecting distinct request_no's from requests table
for those rows where select_status = 0
"""
# This resource is for the admin to obtain all the rows in the requests table
# with a particular request_no
class AdminReqNoDetails(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('request_no', type=int, required=True, help="request_no cannot be left blank!")
data = parser.parse_args()
#create query string
qstr = f""" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; """
try:
return query(qstr)
except:
return {
"message" : "There was an error connecting to the requests table while retrieving."
}, 500
|
4,409 | 9004314951f77b14bab1aba9ae93eb49c8197a8d | # B. A New Technique
# TLE (Time limit exceeded)
from sys import stdin, stdout
t = int(input())
for _ in range(t):
n, m = map(int, input().split())
rows = [0] * n
a_column = list()
for r in range(n):
tmp = list(input().split())
rows[r] = tmp
a_column.append(tmp[0])
sorted_a_column = sorted(a_column)
found = False
for c in range(m):
if not found:
tmp_c = list(input().split())
if sorted(tmp_c) == sorted_a_column:
found = True
output = str()
for num in tmp_c:
index = a_column.index(num)
output += ' '.join(rows[index])
output += '\n'
print(output, end='')
else:
stdin.__next__()
|
4,410 | b976dab3c621bb929eb488fa7f4394666efec2ed | import os
import json
from threading import Thread
import time
from time import sleep
from flask import Flask, json, render_template, request
import redis
from collections import OrderedDict
import requests
from Queue import Queue
REGISTRAR_URL = 'http://cuteparty-registrar1.cfapps.io/update'
app = Flask(__name__)
port = int(os.getenv("PORT"))
vcap = json.loads(os.environ['VCAP_SERVICES'])
svc = vcap['rediscloud'][0]['credentials']
db = redis.StrictRedis(host=svc["hostname"], port=svc["port"], password=svc["password"],db=0)
application_name = json.loads(os.environ['VCAP_APPLICATION'])['application_name']
class Producer(Thread):
"""
Background thread for fetching instance info
"""
def __init__(self,queue):
"""
Constructor
"""
Thread.__init__(self)
self.queue = queue
def run(self):
"""
This is the run implementation of the background thread , which fetchs the instaces info.
"""
while True :
try:
instance_id = os.getenv("CF_INSTANCE_INDEX")
mydict = db.hgetall(application_name)
if instance_id not in mydict :
self.queue.put(instance_id)
except :
pass
finally:
pass
class Consumer(Thread):
"""
Backgrdound thread for fetching from Queue and updating redis
"""
def __init__(self,queue):
"""
Constrcutor
"""
Thread.__init__(self)
self.queue = queue
def run(self):
"""
Run method for background thread which updates redis
"""
while True :
try :
instance_id = self.queue.get()
db.hset(application_name,instance_id,1)
except:
pass
finally:
pass
class MasterUpdater(Thread):
"""
This background thread will update the aggregator/registrar app at provided url
"""
def __init__(self,db,appname):
"""
Constructor
"""
Thread.__init__(self)
self.db = db
self.appname = appname
def run(self):
"""
Run implementation of background thread which updates the aggregator
"""
while True :
try:
appinfo = self.db.hgetall(self.appname)
appinfo_str = json.dumps(appinfo)
data = {'applicationname':self.appname,'appinfo':appinfo_str}
response = requests.post(REGISTRAR_URL, data=data)
time.sleep(2)
except :
pass
def init_workers():
"""
This method is for starting all worker threads.
We are using three workers right now .
1. One for fetching latest instances info and adds to Queue
2. One for fetching from Queue and updating Redis
3. For updating the aggregator app , about this applications info.
All are deamon threads.
"""
party_queue = Queue()
p = Producer(party_queue)
p.daemon = True
c = Consumer(party_queue)
c.deamon= True
m = MasterUpdater(db,application_name)
m.deamon = True
p.start()
c.start()
m.start()
@app.route('/addthread')
def addthread():
"""
This endpoint is for adding threads to the application.
Loadbalancer decids to go for which instances and based on that thread is added to it.
"""
instance_id = os.getenv("CF_INSTANCE_INDEX")
print 'Instance Id ****************%s'%instance_id
thread_count = int(db.hget(application_name,instance_id))
thread_count+=1
print 'Threadcount ****************%s'%thread_count
result = db.hset(application_name,str(instance_id),str(thread_count))
print 'HSET result %s'%result
print db.hgetall(application_name)
return json.dumps({'message':'success'})
@app.route('/deletethread')
def deletethread():
"""
This endpoint is for deleting threads to the application.
Loadbalancer decids to go for which instances and based on that thread is deleted from it.
"""
instance_id = os.getenv("CF_INSTANCE_INDEX")
print 'Instance Id **************%s'%instance_id
thread_count = int(db.hget(application_name,instance_id))
thread_count-=1
db.hset(application_name,instance_id,thread_count)
return json.dumps({'message':'success'})
@app.route('/instances')
def instances():
"""
This will list out all the instances and threads per application.
An application can see only it's threads and instances.
"""
mydict = db.hgetall(application_name)
ordered = OrderedDict()
for key in sorted(mydict):
ordered.__setitem__(key,mydict.get(key))
mylist = []
return render_template('robots.html', mydict=ordered)
@app.route('/')
def index():
"""
Main entry point
"""
return render_template('index.html')
if __name__ == "__main__":
init_workers()
app.run(host='0.0.0.0', port=port, debug=True)
|
4,411 | ff09993a4f8fed65fa00c065eb5cfa41e7f9dcc1 | from django.contrib.auth.models import User
from django.db import models
class QueuedSpace(models.Model):
""" Stores space json for possible further editing before being sent to the server.
q_etag should update on every save so conflicts can be checked for in queued items.
"""
space_id = models.IntegerField(blank=True, null=True)
json = models.TextField()
q_etag = models.CharField(max_length=40, blank=True)
status = models.CharField(max_length=25, blank=True)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)
modified_by = models.ForeignKey(User, blank=True, null=True, related_name='modified_by')
approved_by = models.ForeignKey(User, blank=True, null=True, related_name='approved_by')
def __unicode__(self):
return "id: %s (marked %s on %s by %s)" % (self.space_id, self.status, self.last_modified, self.modified_by)
#TODO: put in an etag generator
|
4,412 | d0f9dd0a06023dd844b0bf70dff360f6bb46c152 | #-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2008 Rok Garbas <rok@garbas.si> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
__docformat__ = "reStructuredText"
import z3c.form
import zope.schema
import zope.interface
import zope.component
from widget_date import DateWidget
from interfaces import IMonthYearWidget
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = ('', '', 1)
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
|
4,413 | 6e739c30b3e7c15bd90b74cfd5a1d6827e863a44 | '''
Created on 4 Oct 2016
@author: MetalInvest
'''
def isHammerHangman(high, low, open, close):
body = abs(open - close)
leg = min(open, close) - low
return leg / body >= 2.0 and high/max(open, close) <= 1.08
def isEngulfing(df, bottom = True):
open_0 = df['open'][-1]
close_0 = df['close'][-1]
open_1 = df['open'][-2]
close_1 = df['close'][-2]
body_0 = close_0 - open_0
body_1 = close_1 - open_1
if bottom:
return body_0 > 0 and body_1 < 0 and body_0 > abs(body_1)
else:
return body_0 < 0 and body_1 > 0 and abs(body_0) > body_1
def isDarkCloud():
pass
def isPiercing():
pass
def jap_candle_reversal(df, context):
# we check strong trend reversal reversal_pattern
index = 0.0
# hammer & hangman
if isHammerHangman(df['high'][-1], df['low'][-1], df['open'][-1], df['close'][-1]):
index += g.reversal_index
if isEngulfing(df):
index += g.reversal_index
return index |
4,414 | 7dff15a16ecc3ce3952f4b47290393ea3183807f | x=input("Do you really want to run this program? (y/n) : ")
x=x.upper()
if x=="Y" or x=="N" or x=="Q":
while x=="Y" or x=="N" or x=="Q":
if x=="Q":
print("Exiting the Program")
import sys
sys.exit()
elif x=="N":
print("You decided to leave. See you again!” ")
break
#elif x=="Y":
#You can run the program.Enter the code required to run the program
else:
print("Invalid selection is entered")
|
4,415 | 44649e44da4eb80e7f869ff906798d5db493b913 | # -*- coding: utf-8; -*-
import gherkin
from gherkin import Lexer, Parser, Ast
def test_lex_test_eof():
"lex_text() Should be able to find EOF"
# Given a lexer that takes '' as the input string
lexer = gherkin.Lexer('')
# When we try to lex any text from ''
new_state = lexer.lex_text()
# Then we see we've got to EOF and that new state is nil
lexer.tokens.should.equal([(1, gherkin.TOKEN_EOF, '')])
new_state.should.be.none
def test_lex_text():
"lex_text() Should be able to find text before EOF"
# Given a lexer that takes some text as input string
lexer = gherkin.Lexer('some text')
# When we lex it
new_state = lexer.lex_text()
# Then we see we found both the text and the EOF token
lexer.tokens.should.equal([
(1, gherkin.TOKEN_TEXT, 'some text'),
(1, gherkin.TOKEN_EOF, '')
])
# And the new state is nil
new_state.should.be.none
def test_lex_hash_with_text():
"lex_text() Should stop lexing at # (we found a comment!)"
# Given a lexer with some text and some comment
lexer = gherkin.Lexer(' some text # random comment')
# When the input is lexed through the text lexer
new_state = lexer.lex_text()
# Then we see the following token on the output list
lexer.tokens.should.equal([
(1, gherkin.TOKEN_TEXT, 'some text '),
])
# And that the next state will lex comments
new_state.should.equal(lexer.lex_comment)
def test_lex_comment():
"lex_comment() Should stop lexing at \\n"
# Given a lexer loaded with some comments
lexer = gherkin.Lexer(' random comment')
# When We lex the input text
new_state = lexer.lex_comment()
# Then we see the comment above was captured
lexer.tokens.should.equal([
(1, gherkin.TOKEN_COMMENT, 'random comment'),
])
# And that new state is lex_text()
new_state.should.equal(lexer.lex_text)
def test_lex_comment_meta_label():
"lex_comment() Should stop lexing at : (we found a label)"
# Given a lexer loaded with a comment that contains a label
lexer = gherkin.Lexer(' metadata: test')
# When we lex the comment
new_state = lexer.lex_comment()
# Then we see that a label was found
lexer.tokens.should.equal([
(1, gherkin.TOKEN_META_LABEL, 'metadata'),
])
# And that new state is going to read the value of the variable we
# just found
new_state.should.equal(lexer.lex_comment_metadata_value)
def test_lex_comment_metadata_value():
"lex_comment_metadata_value() Should stop lexing at \n"
# Given a lexer loaded with the value of a label and a new line
# with more text
lexer = gherkin.Lexer(' test value\nblah')
# When we lex the input string
new_state = lexer.lex_comment_metadata_value()
# Then we see that only the value present is the one before the
# \n, everything else will be lexed by lex_text
lexer.tokens.should.equal([
(1, gherkin.TOKEN_META_VALUE, 'test value'),
])
# And we also see that the next
new_state.should.equal(lexer.lex_text)
def test_lex_comment_no_newline():
# Given a lexer loaded with a comment without the newline marker
lexer = gherkin.Lexer(' test comment')
# When we lex the input string
new_state = lexer.lex_comment_metadata_value()
# Then we see the whole line was captured
lexer.tokens.should.equal([
(1, gherkin.TOKEN_META_VALUE, 'test comment'),
])
# And we also see that the next
new_state.should.equal(lexer.lex_text)
def test_lex_comment_until_newline():
"Lexer.lex_comment() Should parse comments until the newline character"
# Given a lexer loaded with comments containing a metadata field
lexer = gherkin.Lexer('# one line\n# another line')
# When I run the lexer
tokens = lexer.run()
# Then we see both lines were captured
lexer.tokens.should.equal([
(1, gherkin.TOKEN_COMMENT, 'one line'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_COMMENT, 'another line'),
(2, gherkin.TOKEN_EOF, ''),
])
def test_lex_comment_full():
"Lexer.run() Should be able to process metadata in comments"
# Given a lexer loaded with comments containing a metadata field
lexer = gherkin.Lexer('some text # metadata-field: blah-value\ntext')
# When I run the lexer
tokens = lexer.run()
# Then I see the tokens collected match some text, a field, more
# text and EOF
tokens.should.equal([
(1, gherkin.TOKEN_TEXT, 'some text '),
(1, gherkin.TOKEN_META_LABEL, 'metadata-field'),
(1, gherkin.TOKEN_META_VALUE, 'blah-value'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'text'),
(2, gherkin.TOKEN_EOF, '')
])
def test_lex_text_with_label():
"Lexer.run() Should be able to parse a label with some text"
# Given a lexer loaded with a feature
lexer = gherkin.Lexer(
'Feature: A cool feature\n some more text\n even more text')
# When we run the lexer
tokens = lexer.run()
# Then we see the token list matches the label, text, text EOF
# sequence
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'A cool feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'some more text'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'even more text'),
(3, gherkin.TOKEN_EOF, '')
])
def test_lex_text_with_labels():
"Lexer.run() Should be able to tokenize a feature with a scenario"
# Given a lexer with a more complete feature+scenario
lexer = gherkin.Lexer('''
Feature: Some descriptive text
In order to parse a Gherkin file
As a parser
I want to be able to parse scenarios
Even more text
Scenario: The user wants to describe a feature
''')
# When we run the lexer
tokens = lexer.run()
# Then we see it was broken down into the right list of tokens
tokens.should.equal([
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Feature'),
(3, gherkin.TOKEN_TEXT, 'Some descriptive text'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'In order to parse a Gherkin file'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'As a parser'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'I want to be able to parse scenarios'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Even more text'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_LABEL, 'Scenario'),
(10, gherkin.TOKEN_TEXT, 'The user wants to describe a feature'),
(10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_EOF, '')
])
def test_lex_text_with_steps():
"Lexer.run() Should be able to tokenize steps"
# Given a lexer loaded with feature+background+scenario+steps
lexer = gherkin.Lexer('''\
Feature: Feature title
feature description
Background: Some background
about the problem
Scenario: Scenario title
Given first step
When second step
Then third step
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that everything, including the steps was properly
# tokenized
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Feature title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'feature description'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Background'),
(3, gherkin.TOKEN_TEXT, 'Some background'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'about the problem'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Scenario title'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'Given first step'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TEXT, 'When second step'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Then third step'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
def test_lex_load_languages():
"Lexer.run() Should be able to parse different languages"
# Given the following lexer instance loaded with another language
lexer = gherkin.Lexer('''# language: pt-br
Funcionalidade: Interpretador para gherkin
Para escrever testes de aceitação
Como um programador
Preciso de uma ferramenta de BDD
Contexto:
Dado que a variavel "X" contém o número 2
Cenário: Lanche
Dada uma maçã
Quando mordida
Então a fome passa
''')
# When we run the lexer
tokens = lexer.run()
# Then the following list of tokens is generated
tokens.should.equal([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_META_VALUE, 'pt-br'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Funcionalidade'),
(3, gherkin.TOKEN_TEXT, 'Interpretador para gherkin'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'Para escrever testes de aceitação'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'Como um programador'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'Preciso de uma ferramenta de BDD'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_LABEL, 'Contexto'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Dado que a variavel "X" contém o número 2'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_LABEL, 'Cenário'),
(9, gherkin.TOKEN_TEXT, 'Lanche'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_TEXT, 'Dada uma maçã'),
(10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'Quando mordida'),
(11, gherkin.TOKEN_NEWLINE, '\n'),
(12, gherkin.TOKEN_TEXT, 'Então a fome passa'),
(12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_EOF, '')
])
def test_lex_tables():
"Lexer.run() Should be able to lex tables"
# Given the following lexer loaded with an examples label followed
# by a table that ends before '\n'
lexer = gherkin.Lexer('''\
Examples:
| column1 | column2 | ''')
# When we run the lexer
tokens = lexer.run()
# Then we see the scenario outline case was properly parsed
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Examples'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'column1'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'column2'),
(2, gherkin.TOKEN_EOF, ''),
])
def test_lex_tables_full():
"Lexer.run() Should be able to lex scenario outlines"
lexer = gherkin.Lexer('''\
Feature: gherkin has steps with examples
Scenario Outline: Add two numbers
Given I have <input_1> and <input_2> the calculator
When I press "Sum"!
Then the result should be <output> on the screen
Examples:
| input_1 | input_2 | output |
| 20 | 30 | 50 |
| 0 | 40 | 40 |
''')
# When we run the lexer
tokens = lexer.run()
# Then we see the scenario outline case was properly parsed
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'gherkin has steps with examples'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(2, gherkin.TOKEN_TEXT, 'Add two numbers'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'Given I have <input_1> and <input_2> the calculator'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'When I press "Sum"!'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'Then the result should be <output> on the screen'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'Examples'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'input_1'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'input_2'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'output'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, '20'),
(8, gherkin.TOKEN_TABLE_COLUMN, '30'),
(8, gherkin.TOKEN_TABLE_COLUMN, '50'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_TABLE_COLUMN, '0'),
(9, gherkin.TOKEN_TABLE_COLUMN, '40'),
(9, gherkin.TOKEN_TABLE_COLUMN, '40'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_EOF, '')
])
def test_lex_tables_within_steps():
"Lexer.run() Should be able to lex example tables from steps"
# Given a lexer loaded with steps that contain example tables
lexer = gherkin.Lexer('''\
Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that steps that contain : will be identified as
# labels
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Check models existence'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Background'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Given I have a garden in the database'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TABLE_COLUMN, '@name'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'Secret Garden'),
(5, gherkin.TOKEN_TABLE_COLUMN, '45'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'And I have gardens in the database'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Octopus\' Garden'),
(8, gherkin.TOKEN_TABLE_COLUMN, '120'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
def test_lex_multi_line_str():
"Lexer.run() Should be able to find multi quoted strings after labels"
# Given a lexer loaded with steps that contain example tables
lexer = gherkin.Lexer('''\
Given the following email template:
''\'Here we go with a pretty
big block of text
surrounded by triple quoted strings
''\'
And a cat picture
"""Now notice we didn't use (:) above
"""
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that triple quoted strings are captured by the lexer
tokens.should.equal([
(1, gherkin.TOKEN_LABEL, 'Given the following email template'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_QUOTES, "'''"),
(2, gherkin.TOKEN_TEXT, '''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''),
(5, gherkin.TOKEN_QUOTES, "'''"),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'And a cat picture'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_QUOTES, '"""'),
(7, gherkin.TOKEN_TEXT, "Now notice we didn't use (:) above\n "),
(8, gherkin.TOKEN_QUOTES, '"""'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
def test_lex_tags_empty():
"Lexer.lex_tag() Should bail if we reach EOF"
# Given a lexer loaded with an empty string
lexer = gherkin.Lexer('')
# When we try to lex tags
lexer.lex_tag()
# Then we see we found no tokens
lexer.tokens.should.be.empty
def test_lex_tags():
"Lexer.run() Should be able to find tags"
# Given a lexer loaded with steps that contain example tables
lexer = gherkin.Lexer('''\
@tagged-feature
Feature: Parse tags
@tag1 @tag2
Scenario: Test
''')
# When we run the lexer
tokens = lexer.run()
# Then we see that triple quoted strings are captured by the lexer
tokens.should.equal([
(1, gherkin.TOKEN_TAG, 'tagged-feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TAG, 'tag1'),
(4, gherkin.TOKEN_TAG, 'tag2'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Test'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_EOF, ''),
])
def test_parse_metadata_empty():
Parser([(1, gherkin.TOKEN_EOF, '')]).parse_metadata().should.be.none
Parser([None]).parse_metadata().should.be.none
def test_parse_metadata_incomplete():
parser = Parser([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_EOF, ''),
])
parser.parse_metadata().should.be.none
def test_parse_metadata_syntax_error():
parser = Parser([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_TEXT, 'pt-br'),
])
parser.parse_metadata.when.called.should.throw(
SyntaxError, 'No value found for the meta-field `language\'')
def test_parse_metadata():
parser = Parser([
(1, gherkin.TOKEN_META_LABEL, 'language'),
(1, gherkin.TOKEN_META_VALUE, 'pt-br'),
])
metadata = parser.parse_metadata()
metadata.should.equal(Ast.Metadata(line=1, key='language', value='pt-br'))
def test_parse_empty_title():
parser = Parser([
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'more text after title'),
])
feature = parser.parse_title()
feature.should.be.none
def test_parse_title():
parser = Parser([
(1, gherkin.TOKEN_TEXT, 'Scenario title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
])
feature = parser.parse_title()
feature.should.equal(Ast.Text(line=1, text='Scenario title'))
def test_parse_table():
parser = Parser([
(1, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(1, gherkin.TOKEN_TABLE_COLUMN, 'email'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'),
(2, gherkin.TOKEN_TABLE_COLUMN, 'lincoln@clarete.li'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'gabriel@nacaolivre.org'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_LABEL, 'Scenario'),
(4, gherkin.TOKEN_EOF, ''),
])
feature = parser.parse_table()
feature.should.equal(Ast.Table(line=1, fields=[
['name', 'email'],
['Lincoln', 'lincoln@clarete.li'],
['Gabriel', 'gabriel@nacaolivre.org'],
]))
def test_parse_background():
# Background: title
# Given two users in the database:
# | name | email |
# | Lincoln | lincoln@clarete.li |
# | Gabriel | gabriel@nacaolivre.org |
# Scenario:
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Background'),
(1, gherkin.TOKEN_TEXT, 'title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Given two users in the database'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(3, gherkin.TOKEN_TABLE_COLUMN, 'email'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'Lincoln'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'lincoln@clarete.li'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'Gabriel'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'gabriel@nacaolivre.org'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'Scenario'),
])
# When the background is parsed
feature = parser.parse_background()
# Then I see the output contains a valid background with a step
# with examples. Notice the scenario label is not returned
# anywhere here
feature.should.equal(Ast.Background(
line=1,
title=Ast.Text(line=1, text='title'),
steps=[
Ast.Step(
line=2,
title=Ast.Text(line=2, text='Given two users in the database'),
table=Ast.Table(line=3, fields=[
['name', 'email'],
['Lincoln', 'lincoln@clarete.li'],
['Gabriel', 'gabriel@nacaolivre.org'],
]))
]))
## Scenarios
def teste_parse_scenario():
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Scenario'),
(1, gherkin.TOKEN_TEXT, 'Scenario title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'Given first step'),
])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(
line=1,
title=Ast.Text(line=1, text='Scenario title'),
steps=[Ast.Step(line=2, title=Ast.Text(line=2, text='Given first step'))],
)])
def teste_parse_scenario_with_description():
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Scenario'),
(1, gherkin.TOKEN_TEXT, 'Scenario title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'Scenario description'),
(2, gherkin.TOKEN_TEXT, 'More description'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'Given first step'),
])
feature = parser.parse_scenarios()
feature.should.equal([Ast.Scenario(
line=1,
title=Ast.Text(line=1, text='Scenario title'),
description=Ast.Text( line=2, text='Scenario description More description'),
steps=[Ast.Step(line=3, title=Ast.Text(line=3, text='Given first step'))],
)])
def test_parse_scenario_outline_with_examples():
""
# Given a parser loaded with the following gherkin document:
#
# Scenario Outline: Plant a tree
# Given the <name> of a garden
# When I plant a tree
# And wait for <num_days> days
# Then I see it growing
# Examples:
# | name | num_days |
# | Secret | 2 |
# | Octopus | 5 |
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(1, gherkin.TOKEN_TEXT, 'Plant a tree'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TEXT, 'When I plant a tree'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'And wait for <num_days> days'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TEXT, 'Then I see it growing'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'Examples'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'num_days'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Secret'),
(8, gherkin.TOKEN_TABLE_COLUMN, '2'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_TABLE_COLUMN, 'Octopus'),
(9, gherkin.TOKEN_TABLE_COLUMN, '5'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_EOF, '')
])
scenarios = parser.parse_scenarios()
scenarios.should.equal([
Ast.ScenarioOutline(
line=1,
title=Ast.Text(line=1, text='Plant a tree'),
steps=[Ast.Step(line=2, title=Ast.Text(line=2, text='Given the <name> of a garden')),
Ast.Step(line=3, title=Ast.Text(line=3, text='When I plant a tree')),
Ast.Step(line=4, title=Ast.Text(line=4, text='And wait for <num_days> days')),
Ast.Step(line=5, title=Ast.Text(line=5, text='Then I see it growing'))],
examples=Ast.Examples(line=6, table=Ast.Table(line=7, fields=[
['name', 'num_days'],
['Secret', '2'],
['Octopus', '5'],
]))
)])
def test_parse_not_starting_with_feature():
parser = gherkin.Parser(gherkin.Lexer('''
Scenario: Scenario title
Given first step
When second step
Then third step
''').run())
parser.parse_feature.when.called.should.throw(
SyntaxError,
"Feature expected in the beginning of the file, "
"found `Scenario' though.")
def test_parse_feature_two_backgrounds():
parser = gherkin.Parser(gherkin.Lexer('''
Feature: Feature title
feature description
Background: Some background
about the problem
Background: Some other background
will raise an exception
Scenario: Scenario title
Given first step
When second step
Then third step
''').run())
parser.parse_feature.when.called.should.throw(
SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected")
def test_parse_feature_background_wrong_place():
parser = gherkin.Parser(gherkin.Lexer('''
Feature: Feature title
feature description
Scenario: Scenario title
Given first step
When second step
Then third step
Background: Some background
about the problem
''').run())
parser.parse_feature.when.called.should.throw(
SyntaxError,
"`Background' should not be declared here, Scenario or Scenario Outline expected")
def test_parse_feature():
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Feature title'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TEXT, 'feature description'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Background'),
(3, gherkin.TOKEN_TEXT, 'Some background'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TEXT, 'Given the problem'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Scenario title'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'Given first step'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_LABEL, 'Scenario'),
(7, gherkin.TOKEN_TEXT, 'Another scenario'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TEXT, 'Given this step'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_TEXT, 'When we take another step'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_EOF, ''),
])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(
line=1,
title=Ast.Text(line=1, text='Feature title'),
description=Ast.Text(line=2, text='feature description'),
background=Ast.Background(
line=3,
title=Ast.Text(line=3, text='Some background'),
steps=[Ast.Step(line=4, title=Ast.Text(line=4, text='Given the problem'))]),
scenarios=[
Ast.Scenario(line=5,
title=Ast.Text(line=5, text='Scenario title'),
steps=[Ast.Step(line=6, title=Ast.Text(line=6, text='Given first step'))]),
Ast.Scenario(line=7,
title=Ast.Text(line=7, text='Another scenario'),
steps=[Ast.Step(line=8, title=Ast.Text(line=8, text='Given this step')),
Ast.Step(line=9, title=Ast.Text(line=9, text='When we take another step'))]),
],
))
def test_parse_tables_within_steps():
"Lexer.run() Should be able to parse example tables from steps"
# Given a parser loaded with steps that contain example tables
'''Feature: Check models existence
Background:
Given I have a garden in the database:
| @name | area | raining |
| Secret Garden | 45 | false |
And I have gardens in the database:
| name | area | raining |
| Octopus' Garden | 120 | true |
Scenario: Plant a tree
Given the <name> of a garden
When I plant a tree
And wait for <num_days> days
Then I see it growing
'''
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Feature'),
(1, gherkin.TOKEN_TEXT, 'Check models existence'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Background'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Given I have a garden in the database'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TABLE_COLUMN, '@name'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(4, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'Secret Garden'),
(5, gherkin.TOKEN_TABLE_COLUMN, '45'),
(5, gherkin.TOKEN_TABLE_COLUMN, 'false'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_LABEL, 'And I have gardens in the database'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'name'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'area'),
(7, gherkin.TOKEN_TABLE_COLUMN, 'raining'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, "Octopus' Garden"),
(8, gherkin.TOKEN_TABLE_COLUMN, '120'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'true'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_LABEL, 'Scenario'),
(9, gherkin.TOKEN_TEXT, 'Plant a tree'),
(9, gherkin.TOKEN_NEWLINE, '\n'),
(10, gherkin.TOKEN_TEXT, 'Given the <name> of a garden'),
(10, gherkin.TOKEN_NEWLINE, '\n'),
(11, gherkin.TOKEN_TEXT, 'When I plant a tree'),
(11, gherkin.TOKEN_NEWLINE, '\n'),
(12, gherkin.TOKEN_TEXT, 'And wait for <num_days> days'),
(12, gherkin.TOKEN_NEWLINE, '\n'),
(13, gherkin.TOKEN_TEXT, 'Then I see it growing'),
(13, gherkin.TOKEN_NEWLINE, '\n'),
(14, gherkin.TOKEN_EOF, '')
])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(
line=1,
title=Ast.Text(line=1, text='Check models existence'),
background=Ast.Background(
line=2,
steps=[
Ast.Step(
line=3,
title=Ast.Text(line=3, text='Given I have a garden in the database'),
table=Ast.Table(line=4, fields=[
['@name', 'area', 'raining'],
['Secret Garden', '45', 'false']])),
Ast.Step(
line=6,
title=Ast.Text(line=6, text='And I have gardens in the database'),
table=Ast.Table(line=7, fields=[
['name', 'area', 'raining'],
['Octopus\' Garden', '120', 'true']])),
]
),
scenarios=[
Ast.Scenario(
title=Ast.Text(line=9, text='Plant a tree'),
line=9,
steps=[
Ast.Step(line=10, title=Ast.Text(line=10, text='Given the <name> of a garden')),
Ast.Step(line=11, title=Ast.Text(line=11, text='When I plant a tree')),
Ast.Step(line=12, title=Ast.Text(line=12, text='And wait for <num_days> days')),
Ast.Step(line=13, title=Ast.Text(line=13, text='Then I see it growing'))
])
],
))
def test_parse_quoted_strings_on_steps():
# Given a parser loaded with the following Gherkin document
# Given the following email template:
# '''Here we go with a pretty
# big block of text
# surrounded by triple quoted strings
# '''
# And a cat picture
# """Now notice we didn't use (:) above
# """
parser = Parser([
(1, gherkin.TOKEN_LABEL, 'Given the following email template'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_QUOTES, "'''"),
(2, gherkin.TOKEN_TEXT, '''Here we go with a pretty
big block of text
surrounded by triple quoted strings
'''),
(5, gherkin.TOKEN_QUOTES, "'''"),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TEXT, 'And a cat picture'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_QUOTES, '"""'),
(7, gherkin.TOKEN_TEXT, "Now notice we didn't use (:) above\n "),
(8, gherkin.TOKEN_QUOTES, '"""'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, '')
])
steps = parser.parse_steps()
steps.should.equal([
Ast.Step(
line=1,
title=Ast.Text(line=1, text='Given the following email template'),
text=Ast.Text(line=2, text='''Here we go with a pretty
big block of text
surrounded by triple quoted strings
''')),
Ast.Step(
line=6,
title=Ast.Text(line=6, text='And a cat picture'),
text=Ast.Text(line=7, text="Now notice we didn't use (:) above\n "))])
def test_parse_text():
parser = Parser([
(1, gherkin.TOKEN_TAG, 'tag1'),
(1, gherkin.TOKEN_TAG, 'tag2'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_TAG, 'tag3'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_LABEL, 'Feature'),
])
tags = parser.parse_tags()
tags.should.equal(['tag1', 'tag2', 'tag3'])
def test_parse_tags_on_scenario_outline_examples():
"Parser should allow tags to be defined in examples"
# Given a parser loaded with a document that contains tags on
# scenario outline examples
# @tagged-feature
# Feature: Parse tags
# @tag1 @tag2
# Scenario Outline: Test
# @example-tag1
# @example-tag2
# Examples:
# | Header |
parser = Parser([
(1, gherkin.TOKEN_TAG, 'tagged-feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_TAG, 'tag1'),
(3, gherkin.TOKEN_TAG, 'tag2'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_LABEL, 'Scenario Outline'),
(4, gherkin.TOKEN_TEXT, 'Test'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_TAG, 'example-tag1'),
(5, gherkin.TOKEN_NEWLINE, '\n'),
(6, gherkin.TOKEN_TAG, 'example-tag2'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_LABEL, 'Examples'),
(7, gherkin.TOKEN_NEWLINE, '\n'),
(8, gherkin.TOKEN_TABLE_COLUMN, 'Header'),
(8, gherkin.TOKEN_NEWLINE, '\n'),
(9, gherkin.TOKEN_EOF, ''),
])
# When I parse the document
feature = parser.parse_feature()
# Then I see all the tags were found
feature.should.equal(Ast.Feature(
line=2,
title=Ast.Text(line=2, text='Parse tags'),
tags=['tagged-feature'],
scenarios=[Ast.ScenarioOutline(
line=4,
title=Ast.Text(line=4, text='Test'),
tags=['tag1', 'tag2'],
examples=Ast.Examples(
line=7,
tags=['example-tag1', 'example-tag2'],
table=Ast.Table(line=8, fields=[['Header']])),
)]))
def test_parse_tags_on_feature_and_scenario():
# Given a parser loaded with a gherkin document with one tag on
# the feature and two tags on a scenario:
#
# @tagged-feature
# Feature: Parse tags
#
# @tag1 @tag2
# Scenario: Test
parser = Parser([
(1, gherkin.TOKEN_TAG, 'tagged-feature'),
(1, gherkin.TOKEN_NEWLINE, '\n'),
(2, gherkin.TOKEN_LABEL, 'Feature'),
(2, gherkin.TOKEN_TEXT, 'Parse tags'),
(2, gherkin.TOKEN_NEWLINE, '\n'),
(3, gherkin.TOKEN_NEWLINE, '\n'),
(4, gherkin.TOKEN_TAG, 'tag1'),
(4, gherkin.TOKEN_TAG, 'tag2'),
(4, gherkin.TOKEN_NEWLINE, '\n'),
(5, gherkin.TOKEN_LABEL, 'Scenario'),
(5, gherkin.TOKEN_TEXT, 'Test'),
(6, gherkin.TOKEN_NEWLINE, '\n'),
(7, gherkin.TOKEN_EOF, ''),
])
feature = parser.parse_feature()
feature.should.equal(Ast.Feature(
line=2,
title=Ast.Text(line=2, text='Parse tags'),
tags=['tagged-feature'],
scenarios=[Ast.Scenario(
line=5,
title=Ast.Text(line=5, text='Test'),
tags=['tag1', 'tag2'])]))
def test_ast_node_equal():
# Given two different AST nodes
n1 = Ast.Node()
n2 = Ast.Node()
# And different attributes to each node
n1.name = 'Lincoln'
n2.color = 'green'
# When I compare them
equal = n1 == n2
# Then I see they're different
equal.should.be.false
|
4,416 | 391ecb2f23cc0ce59bd9fac6f97bd4c1788444b9 | n = eval(input("Entrez valeur: "))
res = 0
while n > 0:
res += n%10
n //= 10
print(res, n)
print(res)
|
4,417 | 5626e5a4a448630fbbbc92a67ae08f3ed24e1b9e | #Main program:
#reads IMU data from arduino uart
#receives PS3 Controller input
#Mantains Controller input frequency with CST
#!/usr/bin/env python
from map import mapControllerToDeg
from map import constrain
from map import wrap_180
from map import motorOutputLimitHandler
from uart1 import IMUDevice
import socket
from controlStateTable2 import ControlStateTable
from map import arduino_map
from pid import PID
import time
import pdb
def setup(pids):
# PID Configuration
#pids['PITCH'].set_Kpid(6.5,0.1,1.2)
#pids['ROLL'].set_Kpid(6.5,0.1,1.2)
#pids['PITCH'].set_Kpid(6.5,0.1,1.2)
#pids['ROLL'].set_Kpid(6.5,0.1,1.2)
#pids['YAW'].set_Kpid(2.7,1,0)
pids['PITCH'].set_Kpid(6.5,0,0)
pids['ROLL'].set_Kpid(0,0,0)
#pids['YAW'].set_Kpid(0,0,0)
def print_IMU_CST_Streams():
print CST.strTable()
#print "IMU reading" + IMU.getLine()
def convert_IMU_CST_to_Degrees():
global C_YPR
#sets Y,P,R in IMU class
IMU.getYPR(IMU.getLine())
#converts the control values for YPR to degrees
C_YPR = mapControllerToDeg(CST.getTable())
#print "IMU DATA:"
#print IMU.Yaw, IMU.Pitch, IMU.Roll
print "CONTROL DEG DATA: ", str(C_YPR)
def calculatePIDs():
global pitch_output, roll_output, yaw_output, thr, pids
#PID CODE
#print "PID _____ PITCH: "
pitch_output = constrain(pids['PITCH'].update_pid_std(C_YPR['P'], IMU.Pitch, 10000),-250, 250)
#print "PID _____ ROLL: "
roll_output = constrain(pids['ROLL'].update_pid_std(C_YPR['R'], IMU.Roll, 10000),-250, 250)
#print "PID _____ YAW: "
yaw_output = constrain(pids['YAW'].update_pid_std(wrap_180(C_YPR['Y']), wrap_180(IMU.Yaw), 10000),-360, 360)
#get thrust
#thr = float(CST.getTable()['THRUST'])
def calculateMotorThrust():
global pitch_output, roll_output, yaw_output, thr, motor, pidStatusEnable
#motor['FL'] = thr + roll_output + pitch_output - yaw_output
#motor['BL'] = thr + roll_output - pitch_output + yaw_output
#motor['FR'] = thr - roll_output + pitch_output + yaw_output
#motor['BR'] = thr - roll_output - pitch_output - yaw_output
motor['FL'] = thr + roll_output - pitch_output + yaw_output
motor['BL'] = thr + roll_output + pitch_output - yaw_output
motor['FR'] = thr - roll_output - pitch_output - yaw_output
motor['BR'] = thr - roll_output + pitch_output + yaw_output
motorOutputLimitHandler(motor)
sep= ","
tuple1 = str(int(motor['BR']))+sep+str(int(motor['BL']))+sep+str(int(motor['FR']))+sep+str(int(motor['FL']))
writeResult = IMU.writeSerialPort(tuple1)
#except Exception, e:
# raise
# if writeResult == -1:
#print "Could not write motor value............."
print "Motor: ", str(motor)
print "--------"
def sleep():
global seconds, microseconds_unit
time.sleep(seconds/microseconds_unit)
def stabilizationCode():
print_IMU_CST_Streams()
convert_IMU_CST_to_Degrees()
calculatePIDs()
#try:
calculateMotorThrust()
#except Exception, e:
# raise
sleep()
seconds = 10000
microseconds_unit = 1000000.0
unblocking = 0 #unblocks socket
#verify client and server ip are = to interface ip
#TCP_IP = '192.168.1.7'
TCP_IP='192.168.1.101'
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
#To store degrees from PS3 Controller
C_YPR = {}
#PID dictionary
pids = { 'PITCH': PID(),
'ROLL': PID(),
'YAW': PID() }
#motor dictionary
motor = { 'FL':0, 'BL':0,
'FR':0, 'BR':0 }
pitch_output=0
roll_output=0
yaw_output=0
thr = 0
THR_MIN = 1100
THR_MAX = 2000
IMU = IMUDevice()
IMU.openSerialPort()
IMU.openSampleFile()
CST = ControlStateTable()
############################## open wireless constants set port ##################
TCP_IP2='192.168.1.101'
TCP_PORT2 = 5008
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.setblocking(unblocking)
server_address2 = (TCP_IP2, TCP_PORT2)
s2.bind(server_address2)
s2.listen(1)
conn2, addr2 = s2.accept()
print 'Connection address:', addr2
################################ open ps3 socket port #####################
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.setblocking(unblocking)
server_address = (TCP_IP, TCP_PORT)
s.bind(server_address)
s.listen(1)
conn, addr = s.accept()
print 'Connection address:', addr
conn.setblocking(unblocking) #does not wait for packets
conn2.setblocking(unblocking)
#configure PID
setup(pids)
def setPidConstantsWireless(data):
#pdb.set_trace()
global conn2
global pids
keyf = data[0]
keyPidC=data[1]
value = data[2:]
value = float(value)
p=pids['PITCH']
r=pids['ROLL']
y=pids['YAW']
if keyf == 'p':
pKp = p.m_Kp
pKd = p.m_Kd
pKi = p.m_Ki
if keyPidC == 'p':
pKp = value
elif keyPidC == 'i':
pKi = value
elif keyPidC == 'd':
pKd = value
pids['PITCH'].set_Kpid(pKp, pKi, pKd)
if keyf == 'r':
rKp = r.m_Kp
rKd = r.m_Kd
rKi = r.m_Ki
if keyPidC == 'p':
rKp = value
elif keyPidC == 'i':
rKi = value
elif keyPidC == 'd':
rKd = value
pids['ROLL'].set_Kpid(rKp, rKi, rKd)
if keyf == 'y':
yKp = y.m_Kp
yKd = y.m_Kd
yKi = y.m_Ki
if keyPidC == 'p':
yKp = value
elif keyPidC == 'i':
yKi = value
elif keyPidC == 'd':
yKd = value
pids['YAW'].set_Kpid(yKp, yKi, yKd)
ptitle="| Pitch: kp, kd, ki= "
pData =str(p.m_Kp)+","+ str(p.m_Kd)+","+ str(p.m_Ki)
rtitle="| Roll: kp, kd, ki= "
rData=str(r.m_Kp)+","+ str(r.m_Kd)+","+ str(r.m_Ki)
ytitle="| Yaw: kp, kd, ki= "
yData=str(y.m_Kp)+","+ str(y.m_Kd)+","+ str(y.m_Ki)
conn2.send(ptitle+pData+rtitle+rData+ytitle+yData)
#print "DATA RX:"
#print data
#print ptitle + pData
#exit()
################################## Main loop ######################################
while 1:
#pdb.set_trace()
try:
data2 = conn2.recv(BUFFER_SIZE)
#pdb.set_trace()
if data2 not in ['', None]: #no data
setPidConstantsWireless(data2)
#if no data is rx continue
except:
#if no data is received from ps3 controller then continue
try:
data = conn.recv(BUFFER_SIZE)
except:
#controller is connected but no data has been received
#send data from CST and IMU when no PS3 input received
if thr >= 1150: #only run pid if thrust is over 1100
#try:
stabilizationCode()
#except Exception, e:
# continue
#else:
#due to anomalies in the data stream at the beginning of reading
#the serial buffer we need to start releasing it before our thrust
#is good for flight
#buffRelease=IMU.getLine()
continue
"""if data in ['',None]: #enable for testing
#controller is not connected
#send data from CST and IMU when no PS3 input received
stabilizationCode()
continue"""
#PS3 data received
#print "received data:"+ data
key,value=CST.decode(data)
#print key, value
if key == 'EXIT': #shutdown pid
tuple1 = "1000,1000,1000,1000"
writeResult = IMU.writeSerialPort(tuple1)
conn.close()
conn2.close()
exit()
CST.updateStateTable(key,value)
thr = float(CST.getTable()['THRUST'])
if thr >= 1150: #only run pid if thrust is over 1100
#try:
stabilizationCode()
#except Exception, e:
#continue
#else:
#due to anomalies in the data stream at the beginning of reading
#the serial buffer we need to start releasing it before our thrust
#is good for flight
#buffRelease=IMU.getLine()
conn.send(data) #echo
#conn.close()
|
4,418 | c036621c5f03d94987b4da004d063d11a7cc8424 | # -*- coding:utf-8 -*-
'''
Created on 2013. 4. 30.
@author: Hwang-JinHwan
parsing the txt file which are generated by coping the pdf nova praxis rpg rule book
to create bootstrap document
'''
import re
import codecs
template = """
<head>
<style type="text/css">
body {{
padding-top: 60px;
padding-bottom: 40px;
}}
</style>
<link href="//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap-combined.min.css" rel="stylesheet">
</head>
<body>
<div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<ul class="nav">
{nav_content}
</ul>
</div>
</div>
</div>
<div class='container'>
<div class="row">
{body_content}
</div>
</div>
<script src="//code.jquery.com/jquery-1.4.2.min.js"></script>
<script src="//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/js/bootstrap.min.js"></script>
</body>
"""
"""
<li class="dropdown">
<a data-toggle="dropdown" class="dropdown-toggle" href="#">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
<li>
<a href="#">2-level Dropdown <i class="icon-arrow-right"></i></a>
<ul class="dropdown-menu sub-menu">
<li><a href="#">Action</a></li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="nav-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="nav-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
"""
nav_template ="""
<li class="dropdown">
<a data-toggle="dropdown" class="dropdown-toggle" href="#">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
{drop_down_content}
<li>
<a href="#">Link</a>
</li>
<li class="active">
<a href="#">Link</a>
</li>
<li class="divider"></li>
<li>
<a href="#">Link</a>
</li>
</ul>
</li>
"""
indexed_title = []
def resolve_index_line(line):
def resolve(matchObj):
title = matchObj.group(1)
indexed_title.append(title.lower())
dot = matchObj.group(2)
page_num = matchObj.group(3)
return ur'<a href="#%s">%s</a>%s<a href="#p%s">%s</a><br>'%(title.lower(), title, dot, page_num.lower(), page_num)
return re.sub(ur'(\w.*?)\s*(\.{2,})\s*(\d+)', resolve, line, re.M | re.I)
curr_searching_title = 0
def resovle_title_line(line):
global curr_searching_title
if line.rstrip().lower() == indexed_title[curr_searching_title]:
curr_searching_title+=1
level = 3
if line.startswith("CHAPTER") :
level = 1
return '<h{level}><a name="{anchor}"></a>{text}</h{level}>\n'.format(anchor=line.rstrip().lower(), text=line.rstrip(), level=level)
else:
return line
"""if line.isupper() :
if re.match("^[A-Z]([A-Z0-9]|\s){3,}$", line, re.M):
titles.append(line.rstrip())
return '<h3><a name="%s"></a>%s</h3>\n' % (line.rstrip(), line.rstrip())"""
def resolve_normal_line(line):
sub_line = re.sub(ur'(pg. |page )(\d+)', ur'<a href="#p\2">\g<0></a>', line, re.M | re.I)
if line != sub_line:
print line,
print sub_line,
sub_line = "<p>%s</p>\n" % sub_line.rstrip()
return sub_line
def get_nav_content():
drop_down_content = []
for title in indexed_title:
drop_down_content.append('<li><a href="#%s">%s</a></li>\n' % (title, title))
return nav_template.format(drop_down_content="".join(drop_down_content))
if __name__ == '__main__':
# fr = open("resource/bar_test.txt", 'r')
fr = open("resource/nova praxis all.txt", 'r')
lines = fr.readlines()
toc_page_num = 5
prev_page_num = 2
body_content = []
buffered = []
for line in lines:
if(prev_page_num+1 <= toc_page_num):
ret = resolve_index_line(line)
if ret != line:
buffered.append(ret)
continue
elif(prev_page_num+1 >toc_page_num):
ret = resovle_title_line(line)
if ret != line:
buffered.append(ret)
continue
# data = fr.read()
matchObj = re.match(ur'^(\d+)$', line, re.M | re.I)
if matchObj:
page_num = int(matchObj.group(1))
if page_num < prev_page_num or page_num > prev_page_num + 2:
line = resolve_normal_line(line)
buffered.append(line)
continue
matched_tail = matchObj.group()
print "#MATCH:", matched_tail
buffered.append(matched_tail + "<br>\n")
buffered.insert(0, '<div class="well">')
buffered.append(r'</div>')
buffered.insert(0, '<a name="p%s"></a>' % page_num)
body_content.append("".join(buffered))
buffered = []
buffered.append(line[len(matched_tail):])
prev_page_num = page_num
else:
line = resolve_normal_line(line)
buffered.append(line)
fw = codecs.open("resource/nova_praxis.html", 'w', encoding='utf-8')
body_content.append("".join(buffered))
fw.write(template.format(body_content="".join(body_content), nav_content=get_nav_content()))
fr.close()
fw.close()
|
4,419 | 80891a4c9703f91509d2c1b22304f33426dfb962 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/cypher/.eric6/eric6plugins/vcsGit/ConfigurationPage/GitPage.ui'
#
# Created by: PyQt5 UI code generator 5.8
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_GitPage(object):
def setupUi(self, GitPage):
GitPage.setObjectName("GitPage")
GitPage.resize(609, 751)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(GitPage)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.headerLabel = QtWidgets.QLabel(GitPage)
self.headerLabel.setObjectName("headerLabel")
self.verticalLayout_2.addWidget(self.headerLabel)
self.line15 = QtWidgets.QFrame(GitPage)
self.line15.setFrameShape(QtWidgets.QFrame.HLine)
self.line15.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line15.setFrameShape(QtWidgets.QFrame.HLine)
self.line15.setObjectName("line15")
self.verticalLayout_2.addWidget(self.line15)
self.groupBox = QtWidgets.QGroupBox(GitPage)
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.logSpinBox = QtWidgets.QSpinBox(self.groupBox)
self.logSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.logSpinBox.setMaximum(999999)
self.logSpinBox.setObjectName("logSpinBox")
self.gridLayout.addWidget(self.logSpinBox, 0, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(41, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 2, 1, 1)
self.label_7 = QtWidgets.QLabel(self.groupBox)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 1, 0, 1, 1)
self.logWidthSpinBox = QtWidgets.QSpinBox(self.groupBox)
self.logWidthSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.logWidthSpinBox.setMinimum(10)
self.logWidthSpinBox.setObjectName("logWidthSpinBox")
self.gridLayout.addWidget(self.logWidthSpinBox, 1, 1, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(GitPage)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)
self.commitSpinBox = QtWidgets.QSpinBox(self.groupBox_2)
self.commitSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.commitSpinBox.setMinimum(1)
self.commitSpinBox.setMaximum(100)
self.commitSpinBox.setObjectName("commitSpinBox")
self.gridLayout_2.addWidget(self.commitSpinBox, 0, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 0, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
self.commitIdSpinBox = QtWidgets.QSpinBox(self.groupBox_2)
self.commitIdSpinBox.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.commitIdSpinBox.setMinimum(1)
self.commitIdSpinBox.setMaximum(40)
self.commitIdSpinBox.setObjectName("commitIdSpinBox")
self.gridLayout_2.addWidget(self.commitIdSpinBox, 1, 1, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(269, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 1, 2, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.groupBox_5 = QtWidgets.QGroupBox(GitPage)
self.groupBox_5.setObjectName("groupBox_5")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.groupBox_5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_3 = QtWidgets.QLabel(self.groupBox_5)
self.label_3.setObjectName("label_3")
self.horizontalLayout.addWidget(self.label_3)
self.cleanupPatternEdit = QtWidgets.QLineEdit(self.groupBox_5)
self.cleanupPatternEdit.setObjectName("cleanupPatternEdit")
self.horizontalLayout.addWidget(self.cleanupPatternEdit)
self.verticalLayout_2.addWidget(self.groupBox_5)
self.groupBox_3 = QtWidgets.QGroupBox(GitPage)
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setObjectName("verticalLayout")
self.aggressiveCheckBox = QtWidgets.QCheckBox(self.groupBox_3)
self.aggressiveCheckBox.setObjectName("aggressiveCheckBox")
self.verticalLayout.addWidget(self.aggressiveCheckBox)
self.verticalLayout_2.addWidget(self.groupBox_3)
self.configButton = QtWidgets.QPushButton(GitPage)
self.configButton.setObjectName("configButton")
self.verticalLayout_2.addWidget(self.configButton)
spacerItem3 = QtWidgets.QSpacerItem(388, 21, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem3)
self.retranslateUi(GitPage)
QtCore.QMetaObject.connectSlotsByName(GitPage)
GitPage.setTabOrder(self.logSpinBox, self.logWidthSpinBox)
GitPage.setTabOrder(self.logWidthSpinBox, self.commitSpinBox)
GitPage.setTabOrder(self.commitSpinBox, self.commitIdSpinBox)
GitPage.setTabOrder(self.commitIdSpinBox, self.cleanupPatternEdit)
GitPage.setTabOrder(self.cleanupPatternEdit, self.aggressiveCheckBox)
GitPage.setTabOrder(self.aggressiveCheckBox, self.configButton)
def retranslateUi(self, GitPage):
_translate = QtCore.QCoreApplication.translate
self.headerLabel.setText(_translate("GitPage", "<b>Configure Git Interface</b>"))
self.groupBox.setTitle(_translate("GitPage", "Log"))
self.label.setText(_translate("GitPage", "No. of log messages shown:"))
self.logSpinBox.setToolTip(_translate("GitPage", "Enter the number of log messages to be shown"))
self.label_7.setText(_translate("GitPage", "No. of subject characters shown in list:"))
self.logWidthSpinBox.setToolTip(_translate("GitPage", "Enter the number of characters of the commit subject to be shown in the list"))
self.groupBox_2.setTitle(_translate("GitPage", "Commit"))
self.label_2.setText(_translate("GitPage", "No. of commit messages to remember:"))
self.commitSpinBox.setToolTip(_translate("GitPage", "Enter the number of commit messages to remember"))
self.label_4.setText(_translate("GitPage", "Commit ID length:"))
self.commitIdSpinBox.setToolTip(_translate("GitPage", "Enter the number of character to show for the commit ID"))
self.groupBox_5.setTitle(_translate("GitPage", "Cleanup"))
self.label_3.setText(_translate("GitPage", "Pattern:"))
self.cleanupPatternEdit.setToolTip(_translate("GitPage", "Enter the file name patterns to be used for cleaning up (entries separated by a space character)"))
self.groupBox_3.setTitle(_translate("GitPage", "Repository Optimization"))
self.aggressiveCheckBox.setToolTip(_translate("GitPage", "Select this to use the \'--aggressive\' option for garbage collection"))
self.aggressiveCheckBox.setText(_translate("GitPage", "Perform aggressive repository optimization"))
self.configButton.setToolTip(_translate("GitPage", "Edit the Git configuration file"))
self.configButton.setText(_translate("GitPage", "Edit configuration file"))
|
4,420 | 4bc61ae2fe6453819a5bbf9cf05976f7800fa7c1 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import glob
def des_match(des_l,des_q):
bf=cv2.BFMatcher(cv2.NORM_L2,crossCheck=True)
matches=bf.match(des_l,des_q)
matches = sorted(matches,key=lambda x:x.distance)
return matches
def check_match(matches,threshold,txt):
count=0
if (matches[0].distance< threshold):
for i in range (0,len(matches)):
if (int(matches[i].distance) <=threshold):
count+=1
#print matches[i].distance
#print txt
return count
else:
print str(txt)+" not found"
'''
cap=cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS,30)
while (1):
ret,frame = cap.read()
cv2.imshow('Query Image',frame)
if (cv2.waitKey(112)==ord('p')):
query=frame
cap.release()
break
'''
#Ideal Logo
coke=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/C.jpg')
fanta=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/F.jpg')
star=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/S.jpg')
sprite=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/SP.jpg')
redbull=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/R.jpg')
pepsi=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/P.jpg')
heineken=cv2.imread('/home/raj/Downloads/CVproject/IdealLogos/H.jpg')
test_set=glob.glob('/home/raj/Downloads/CVproject/testset/*.jpg')
print len(test_set)
for j in range(0,len(test_set)):
#Query Image
query=cv2.imread(test_set[j])
C=cv2.cvtColor(coke,cv2.COLOR_BGR2GRAY)
F=cv2.cvtColor(fanta,cv2.COLOR_BGR2GRAY)
S=cv2.cvtColor(star,cv2.COLOR_BGR2GRAY)
SP=cv2.cvtColor(sprite,cv2.COLOR_BGR2GRAY)
R=cv2.cvtColor(redbull,cv2.COLOR_BGR2GRAY)
P=cv2.cvtColor(pepsi,cv2.COLOR_BGR2GRAY)
H=cv2.cvtColor(heineken,cv2.COLOR_BGR2GRAY)
#Query
gquery=cv2.cvtColor(query,cv2.COLOR_BGR2GRAY)
#SIFT Implementation
sift=cv2.xfeatures2d.SIFT_create()
#Keypoint and Descriptor for Logos
kpc,desc = sift.detectAndCompute(C,None)
kpf,desf = sift.detectAndCompute(F,None)
kps,dess = sift.detectAndCompute(S,None)
kpsp,dessp = sift.detectAndCompute(SP,None)
kpr,desr = sift.detectAndCompute(R,None)
kpp,desp = sift.detectAndCompute(P,None)
kph,desh = sift.detectAndCompute(H,None)
# keypoint and Descriptor for Query
kpq,desq = sift.detectAndCompute(gquery,None)
#print temp1
#img=cv2.drawKeypoints(img,kp4,img)
#gray4=cv2.drawKeypoints(gray4,kp4,gray4)
#cv2.imshow("DEs",gray4)
des_count=[]
threshold=200
matches=des_match(desc,desq)
count=check_match(matches,threshold,txt="Coke")
des_count.append((count,"Coke"))
matches=des_match(desf,desq)
count=check_match(matches,threshold,txt="Fanta")
des_count.append((count,"Fanta"))
matches=des_match(dess,desq)
count=check_match(matches,threshold,txt="Starbucks")
des_count.append((count,"Starbucks"))
matches=des_match(dessp,desq)
count=check_match(matches,threshold,txt="Sprite")
des_count.append((count,"Sprite"))
matches=des_match(desr,desq)
count=check_match(matches,threshold,txt="Redbull")
des_count.append((count,"Redbull"))
matches=des_match(desp,desq)
count=check_match(matches,threshold,txt="Pepsi")
des_count.append((count,"Pepsi"))
matches=des_match(desh,desq)
count=check_match(matches,threshold,txt="Heineken")
des_count.append((count,"Heineken"))
print des_count
x,i=max(des_count)
print x,i
cv2.putText(query,i,(10,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
cv2.imshow("Query",query)
cv2.imwrite('/home/raj/Downloads/CVproject/result/'+str(j)+'.jpg',query)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
4,421 | d6574cacea693517f3eaa92b4b929c2ee73da2e4 | from .tc_gcc import *
class AndroidGccToolChain(GccToolChain):
def __init__(self, name, ndkDir, gccVersionStr, platformVer, archStr, prefix = "", suffix = ""):
# TODO: non-windows host platform
hostPlatform = 'windows'
installDir = os.path.join(ndkDir, 'toolchains', prefix + gccVersionStr, 'prebuilt', hostPlatform)
super().__init__(name, installDir, False, prefix, suffix)
self.ndkDir = ndkDir
self.gccVersionStr = gccVersionStr
self.platformVer = platformVer
self.archStr = archStr
self.ndkDirEsc = binutils_esc_path(self.ndkDir)
shortArch = "arm" if "arm" in archStr else archStr
sysroot = "%s/platforms/android-%d/arch-%s" % (binutils_esc_path(ndkDir), platformVer, shortArch)
def _append_sysroot_options(options):
options.append('-B="%s"' % (binutils_esc_path(installDir)))
options.append('--sysroot="%s"' % (sysroot))
# default options (c++)
_append_sysroot_options(self.defaultCppOptions)
# default options (link)
_append_sysroot_options(self.defaultLinkOptions)
self.defaultLinkOptions.append('-Wl,-L"%s/usr/lib"' % (sysroot))
|
4,422 | 064f535b7ea0f1e4a09bdf830021f17d175beda7 | #coding=utf-8
from __future__ import division
import os
def judgeReported(evi, content):
for item in evi['reported']:
flag = content.find(item)
if flag > 0:
return 'Y'
for item in evi['properly']['neg']:
flag = content.find(item)
if flag > 0:
return 'Y'
return 'N'
def judgeConducted(evi, content):
for item in evi['conducted']:
flag = content.find(item)
if flag > 0:
return 'N'
ran_flag = 'N'
for item in evi['reported']:
flag = content.find(item)
if flag > 0:
ran_flag = 'Y'
for item in evi['properly']['neg']:
flag = content.find(item)
if flag > 0 and ran_flag == 'N':
return 'N'
return 'Y'
def judgeDescribedOld(paper_id, evi, content):
score = {}
for k in content.keys():
score[k] = 1
excld = ['.',',',':','#','!','(',')','"','?']
for e in excld:
content[k] = content[k].replace(e, '')
for word in content[k].split():
if word in evi.keys():
score[k] *= evi[word]
else:
score[k] *= 0.1
ranking = [(score[key], key) for key in score.keys()]
ranking.sort()
ranking.reverse()
for kk in score.keys():
if score[kk] > 2000:
return 'Y'
return 'N'
def judgeDescribed(evi, content):
phrase = evi['properly']['pos'] + evi['properly']['neg']
for item in phrase:
flag = content.find(item)
if flag > 0:
return 'Y'
return 'N'
def judgeProperly(evi, content):
for p in evi['neg']:
if content.find(p) > 0:
print 'Not done properly:\t' + p
return 'N'
for q in evi['pos']:
if content.find(q) > 0:
print 'Done properly:\t' + q
return 'Y'
return 'N'
def getJudgement(paper_id, content_string, evidence):
result = {}
result['reported'] = judgeReported(evidence, content_string)
if result['reported'] == 'N':
result['conducted'] = 'N'
result['described'] = 'N'
result['properly'] = 'N'
result['result'] = 'E'
return result
else:
result['conducted'] = judgeConducted(evidence, content_string)
if result['conducted'] == 'N':
result['described'] = 'N'
result['properly'] = 'N'
result['result'] = 'D'
return result
else:
result['described'] = judgeDescribed(evidence, content_string)
if result['described'] == 'N':
result['properly'] = 'N'
result['result'] = 'C'
return result
else:
result['properly'] = judgeProperly(evidence['properly'], content_string)
if result['properly'] == 'N':
result['result'] = 'B'
else:
result['result'] = 'A'
return result
|
4,423 | 53c5f298dbfb21d7688fef8f0312858e2fd73d79 | # Python 3 program - Currency Sum Validator
# def bill_count
def bill_count(amount_user, list_of_money_bills):
n = len(list_of_money_bills)
# Initialize Result
ans = []
# Traverse through all the list
i = n - 1
while (i >= 0):
# Find list
while (amount_user >= list_of_money_bills[i]):
amount_user -= list_of_money_bills[i]
ans.append(list_of_money_bills[i])
i -= 1
# Print result
a = dict({i: ans.count(i) for i in ans})
values = a.values()
total = sum(values)
print("The minimum count of money bills required to equal the user money amount is:" + str(total))
# Driver Code
if __name__ == '__main__':
amount_user = int(input("Enter the total amount that the user has: "))
list_of_money_bills = [int(x) for x in input("Enter the list of available money bills:").split()]
bill_count(amount_user, list_of_money_bills)
# This code is contributed by
# Akanksha Bothe
|
4,424 | 02bec34b138d53235dc944adeae8ccb8d6b3d340 | from django.shortcuts import render, HttpResponse, redirect
from .models import Book, Author # This is the models.py Database
# Create your views here.
def main(request):
context = {
"the_books" : Book.objects.all(), #Book Class model.py
}
return render(request, "index.html", context)
def book(request):
Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])
return redirect('/')
def author(request):
context = {
"the_auths" : Author.objects.all(), #Author Class model.py
}
return render(request, "author.html", context)
def auth(request):
Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])
# newA = Author(first_name= "jlkj")
# newA.save()
return redirect('/author')
def authInfo(request, authorid):
context = {
'selectedAuthor' : Author.objects.get(id=authorid)
}
return render(request, "author_info.html", context)
def bookInfo(request, bookid):
context = {
'selectedBook' : Book.objects.get(id=bookid),
'allAuthors' : Author.objects.all()
}
return render(request, "book_info.html", context)
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id = request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f"/bookinfo/{bookid}") |
4,425 | 01849a6bf5ce5eb75c549af28312f61711ad2494 | import smtplib
import subprocess
import time
class NotifyError(Exception):
def __init__(self, message):
self.message = message
class Notification(object):
def __init__(self, config, dry_run):
self.dry_run = dry_run
self.notifications = {}
def submit(self, recipient, message):
if recipient not in self.notifications:
self.notifications[recipient] = []
self.notifications[recipient].append(message)
def notify_all(self):
for recip in self.notifications:
if len(self.notifications[recip]) > 0:
self.notify(recip, '\r\n\r\n-------------------\r\n\r\n'.join(self.notifications[recip]))
time.sleep(5)
self.notifications[recip] = []
def notify(self, recipient, message):
raise NotImplementedError('Need to subclass Notification')
def connect(self):
raise NotImplementedError('Need to subclass Notification')
def close(self):
raise NotImplementedError('Need to subclass Notification')
class SendMail(Notification):
def __init__(self, config, dry_run):
super().__init__(config, dry_run)
self.address = config.sendmail.address
self.contact_info = config.sendmail.contact_info
self.message_template = '\r\n'.join(['From: '+self.address,
'To: {}',
'Subject: ['+config.name+'] Notifications',
'',
'Greetings Human {},',
'',
'{}'
'',
'',
'Beep boop,',
config.name + ' Bot'])
def notify(self, recipient, message):
# -i flag: do NOT treat bare dot as EOF
cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.contact_info[recipient]['address']]
msg = self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)
proc = subprocess.Popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(input=msg.encode('utf-8'))
#TODO handle errors
#print(f"ret: {proc.returncode}")
#print("stdout:" + str(out))
#print("stderr:" + str(err))
def connect(self):
pass
def close(self):
pass
class SMTP(Notification):
def __init__(self, config, dry_run):
super().__init__(config, dry_run)
self.hostname = config.smtp.hostname
self.username = config.smtp.username
self.passwd = config.smtp.passwd
self.address = config.smtp.address
self.contact_info = config.smtp.contact_info
self.connected = False
self.message_template = '\r\n'.join(['From: '+self.address,
'To: {}',
'Subject: ['+config.name+'] Notifications',
'',
'Greetings Human {},',
'',
'{}'
'',
'',
'Beep boop,',
config.name + ' Bot'])
#TODO deal with smtplib exceptions
def connect(self):
self.server = smtplib.SMTP(self.hostname)
self.server.ehlo()
self.server.starttls()
self.server.login(self.username, self.passwd)
self.connected = True
#TODO implement saving messages to disk with timestamp if send fails
#TODO deal with smtplib exceptions
def notify(self, recipient, message):
if not self.connected:
raise NotifyError('Not connected to SMTP server; cannot send notifications')
self.server.sendmail(self.address,
self.contact_info[recipient]['address'],
self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)
)
#TODO deal with smtplib exceptions
def close(self):
if self.connected:
self.server.quit()
self.connected = False
|
4,426 | 72f3ae476581ff5acd6c7101764f4764285a47bd | input_object = open("input.txt", "r")
input_data = input_object.readlines()
input_object.close()
cleaned_data = []
for line in input_data:
cleaned_data.append(int(line.strip()))
input_size = len(cleaned_data)
for i in range(0, input_size):
for j in range(i, input_size):
for k in range(j, input_size):
if cleaned_data[i] + cleaned_data[j] + cleaned_data[k] == 2020:
ans = cleaned_data[i]*cleaned_data[j]*cleaned_data[k]
print(ans)
break |
4,427 | 571636be9d213d19bddfd1d04688bc0955c9eae5 | print('SYL_2整型数组_12 合并排序数组') |
4,428 | 7d6e8e6142184a1540daa29dac802fe75bd93d8e |
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__="""
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking
import os
import sys
import re
import hashlib
from string import digits
import tempfile
from math import sin, cos, tan, pi, ceil
from reportlab import rl_config, ascii, xrange
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import pdfgeom, pathobject
from reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter
from reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor
from reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester
from reportlab.lib.rl_accel import fp_str, escapePDF
from reportlab.lib.boxstuff import aspectRatioFix
from reportlab.pdfgen import canvas
c = canvas.Canvas("essai.pdf")
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch, inch)
# define a large font
c.setFont("Helvetica", 80)
# choose some colors
c.setStrokeColorRGB(0.2, 0.5, 0.3)
c.setFillColorRGB(1, 0, 1)
# draw a rectangle
c.rect(inch, inch, 6 * inch, 9 * inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0, 0, 0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(6 * inch, -6 * inch, "welcome my project pharmacie")
c.showPage()
c.save() |
4,429 | 1522ebb52504f7f27a526b597fe1e262bbcbfbb0 | #!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
if len(tuple_a) < 1:
a_x = 0
else:
a_x = tuple_a[0]
if len(tuple_a) < 2:
a_y = 0
else:
a_y = tuple_a[1]
if len(tuple_b) < 1:
b_x = 0
else:
b_x = tuple_b[0]
if len(tuple_b) < 2:
b_y = 0
else:
b_y = tuple_b[1]
a = a_x + b_x
b = a_y + b_y
tuple_c = (a, b)
return tuple_c
|
4,430 | 9f02313b6f91f83e3a8b4af8d9447b1d8f3558f6 | import socket
from threading import Thread
from ast import literal_eval
clients = {}
addresses = {}
host = '127.0.0.1'
port = 5678
active = []
addr = (host, port)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(addr)
groups = []
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
def broadcast_file(msg):
for sock in clients:
sock.send(msg)
def private_message(address,message):
message = '<private>' + message
receiver = list(filter(lambda x: address in str(x), clients))[0]
receiver.send(bytes(message,'utf-8'))
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = server.accept()
print(str(client_address[0]) + ":" + str(client_address[1]) + " has connected.")
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(2048).decode("utf8")
welcome = 'Welcome %s! Enter {quit} to exit.' % name
try:
client.send(bytes(welcome, "utf8"))
msg = "%s: has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
clients[client] = name
temp_client = {'Address':addresses[client],'Name':clients[client]}
active.append(temp_client)
broadcast(bytes(str(active),'utf-8'))
while True:
msg = client.recv(2048)
try:
if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):
temp = msg.decode('utf-8').split(')')
address = temp[0] + ')'
private_message(address,temp[1])
elif msg != bytes("{quit}", "utf8"):
broadcast(msg, "<global>" + name + ": ")
print(client)
else:
#client.send(bytes("{quit}", "utf8"))
client.close()
active.remove({'Address':addresses[client],'Name':clients[client]})
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
broadcast(bytes(str(active),'utf-8'))
break
except:
print(msg)
broadcast_file(msg)
except Exception as e:
print(e)
if __name__ == "__main__":
server.listen(5) # Listens for 5 connections at max.
print("Waiting for connection...")
accept_clients_thread = Thread(target=accept_incoming_connections)
accept_clients_thread.start() # Starts the infinite loop.
accept_clients_thread.join()
server.close()
|
4,431 | 341fb4442ba1d1bb13dbbe123e1051e1ceeb91e7 | import pymongo
import pandas as pd
import re
from pymongo import MongoClient
from nltk.corpus import stopwords
from nltk import word_tokenize
from gensim import corpora
import pickle
client = MongoClient()
db = client.redditCrawler
collection = db.data_test1
def remove_posts(data, index_list):
data = data.drop(index_list)
return data.reset_index(drop=True)
data = pd.DataFrame(list(collection.find()))
mod_posts = [i for i in range(len(data)) if 'moronic Monday' in data['title'][i]]
#remove all the mod posts that include 'moronic Monday'
data = remove_posts(data, mod_posts)
titles = data['title']
content = data['post']
comments = data['comments']
# collect only the comments without vote scores, dates, etc
comments_in_thread = []
for index, thread in enumerate(comments):
aggregate = []
for comment in thread:
if type(comment['comment_reply']) == str:
aggregate.append(comment['comment_reply'].lower())
comments_in_thread.append(aggregate)
comments = comments_in_thread
#number of titles and post need to be the same
assert len(titles) == len(content)
assert len(comments) == len(content)
#preprocess
stop_words = stopwords.words('english')
stop_words.extend(['would',
'people',
'money',
'think',
'thinks',
'thanks',
'thing',
'things',
'ok',
'nt',
'actually',
'like',
'get',
'even',
'could',
'also',
])
#Function to clean off each dataset item; stop words (what, if, is, where, how, I, she)
def preprocess(text):
#no content/nan/len of 0
#text = [re.sub('[^a-zA-Z0-9]+', ' ', word) for word in text]
text = text.lower()
text = text.replace('$', ' ')
text = text.replace('-', ' ')
text = text.replace("/", ' ')
text = text.replace(".", ' ')
text = word_tokenize(text)
## text = [re.sub('[^a-zA-Z0-9]+', '', word) for word in text]
text = [word for word in text if word not in stop_words]
text = [word for word in text if word.isalpha()]
return text
#pass titles and comments through pre-processor
titles = [preprocess(title) for title in titles]
posts = [preprocess(text) for text in content]
# process comments
##comments = [[preprocess(comment) for comment in thread] for thread in comments]
temp = []
for i, thread in enumerate(comments):
temp_thread = []
temp_thread.extend(titles[i])
for comment in thread:
temp_thread.extend(preprocess(comment))
temp.append(temp_thread)
comments = temp
# form a list of dictionaries for each title, compile
# each word and its corresponding frequencies in the post's comment section
list_of_dict = []
for index, title in enumerate(titles):
text = ''
bag_of_words = set(title)
text = ' '.join(comments_in_thread[index])
## text = comments[index]
dictionary = {word:text.count(word) for word in bag_of_words if text.count(word) > 0}
list_of_dict.append(dictionary)
title_keywords = [list(Dict.keys()) if len(Dict) > 0 else [0] for Dict in list_of_dict]
title_keywords = [word for sublist in title_keywords for word in sublist if word != 0 ]
title_keywords = set(title_keywords)
##title_keywords = set(title_keywords)
##count the number of keywords in the comment section
def count_keywords(comments, keywords):
## sample = ' '.join(comments).split()
return {word: comments.count(word) for word in keywords if comments.count(word) > 0}
keyword_dict = [count_keywords(comment, title_keywords) for comment in comments]
for index, thread in enumerate(keyword_dict):
#normalize each keyword by the number of words present
df = pd.DataFrame()
df['word'] = thread.keys()
df['count'] = thread.values()
df = df.sort_values('count', ascending = False)
#dividing by number of words in each thread
## df['frequency'] = df['count']/(len(comments[index]))
df['frequency'] = df['count']/(1+len(comments_in_thread[index]))
df['count'] = df['count']/(len(comments[index]))**0.5
keyword_dict[index] = df.reset_index(drop=True)
#save varialbes
variables = [data['title'], titles, posts, comments, comments_in_thread,
list_of_dict, title_keywords, keyword_dict]
with open('variables.txt', 'wb') as fp:
pickle.dump(variables, fp)
|
4,432 | 5287bd1847848aa527df8ce57e896bc30c70b43c | from django.test import TestCase
from stack_it.models import Image
class TextPageContentModelTest(TestCase):
def test_instance(self):
file = Image.create_empty_image_file(name='hello.jpg')
image = Image.objects.create(image=file, alt="World")
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(str(image)[16:21], 'hello')
|
4,433 | ef5d235f09eea827b240290218c397f880f1046d | import re
text = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'
search_pattern = re.compile('nuts')
search_match_object = search_pattern.search(text)
if search_match_object:
print(search_match_object.span())
print(search_match_object.start())
print(search_match_object.end())
print(search_match_object.group())
# Other methods of pattern
print(search_pattern.findall(text))
print(search_pattern.fullmatch('nuts')) # The entire string must match
print(search_pattern.match('nuts...')) # Start of the string must match
|
4,434 | 6cc23e370d1ec1e3e043c3fa6819f9166b6e3b40 | #!/usr/bin/python
class Symbol(object):
pass
class Fundef(Symbol):
def __init__(self, name, type, args):
self.name = name
self.type = type
self.args = args
class VariableSymbol(Symbol):
def __init__(self, name, type):
self.name = name
self.type = type
class Scope(object):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.entries = dict()
def put(self, name, symbol):
self.entries[name] = symbol
def get(self, name):
return self.entries[name]
def has_entry(self, name):
return self.entries.has_key(name)
def name(self):
return self.name
class SymbolTable(object):
def __init__(self, scope_name):
root_scope = Scope(None, scope_name)
self.scopes = dict()
self.scopes[scope_name] = root_scope
self.scope = root_scope
def push_scope(self, scope_name):
if not self.scopes.has_key(scope_name):
self.scopes[scope_name] = Scope(self.scope, scope_name)
self.set_scope(scope_name)
def pop_scope(self):
self.set_scope(self.scope.parent.name)
def set_scope(self, scope_name):
if self.scopes.has_key(scope_name):
self.scope = self.scopes[scope_name]
def put(self, name, symbol):
self.scopes[self.scope.name].put(name, symbol)
def get(self, name, scope=None):
scope_name = scope.name if scope != None else self.scope.name
if self.exists(name, scope=scope):
return self.scopes[scope_name].get(name)
def exists(self, name, scope=None):
scope_name = scope.name if scope != None else self.scope.name
return self.scopes[scope_name].has_entry(name)
def scope_exists(self, scope_name):
return self.scopes.has_key(scope_name)
def current_scope(self):
return self.scope.name
def find(self, name):
scope = self.scope
while scope != None:
if self.exists(name, scope=scope):
return self.get(name, scope=scope)
scope = scope.parent
def __str__(self):
s = ""
for scope_name, scope in self.scopes.iteritems():
s += str(scope_name) + ':\n'
for entry in scope.entries:
s += '\t' + str(entry) + ': ' + str(scope.entries[entry])
return s
|
4,435 | 2168d10a1b4796576cc7ebb6893e0dc8b58085ca |
""" view.py: Contains the View class. """
import random
import config
from graphics import *
class View:
""" The view class which handles the visual component of the application.
"""
def __init__(self, pygame, master):
""" Set up and initialise the view. Does not start the display. """
self._pygame = pygame
self._master = master
self._display = self._pygame.display
self._interface = None
self._state = None
self._cycle_colour = (200, 0, 0)
self._white = (255, 255, 255)
def start(self):
""" Start the display. """
self._screen = self._display.set_mode((640, 480))
self._display.set_caption('PolyominOhs!')
self._pygame.mouse.set_visible(0)
def update(self):
""" Update the screen. """
# Constantly cycle through a colour
h, s, v = rgb2hsv(self._cycle_colour)
h += 1
self._cycle_colour = hsv2rgb((h, s, v))
if self._state == config.GS_LOADING:
self._screen.blit(self._background, (0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES, config.GS_MENU_HELP]:
# Get current selections
selected = self._interface.get_selection()
settings = {config.MENU_LEVEL: str(self._interface.get_level()),
config.MENU_ORDER: str(self._interface.get_order()),
config.MENU_SFX: self._interface.get_sfx(),
config.MENU_MUSIC: self._interface.get_music()}
# Background and title
self._screen.blit(self._background, (0, 0))
draw_text(self._screen, (120, 25), 'PolyominOhs!', 36,
self._cycle_colour, self._pygame, True)
# Buttons
for button in self._buttons.items():
if button[0] == selected:
button[1].draw(self._screen, config.TXT_HOVER,
self._pygame, self._cycle_colour)
else:
button[1].draw(self._screen, config.TXT_NORMAL,
self._pygame)
# Radio Selections
for radio in self._radios.items():
if radio[0] == selected:
radio[1].draw(self._screen, settings[radio[0]],
config.TXT_HOVER, self._cycle_colour,
self._pygame)
else:
radio[1].draw(self._screen, settings[radio[0]],
config.TXT_NORMAL, self._cycle_colour,
self._pygame)
# Random polyomino
order = self._interface.get_order()
ominoes = self._master._ominoes[order - 1]
n = self._interface.get_random_omino()
shape = ominoes[0][n]
draw_polyomino(self._screen, (400, 160), shape, 21,
self._cycle_colour, self._pygame)
# Highscores
if self._state == config.GS_MENU_HIGHSCORES:
draw_border(self._highscores, self._cycle_colour, self._pygame)
for i, highscore in enumerate(self._master.get_highscores()):
name, score = highscore
name = name.replace('_', ' ')
if self._interface.get_highscore_highlight() == i:
colour = self._cycle_colour
else:
colour = self._white
draw_text(self._highscores, (20, 10 + (i + 1) * 25), name,
10, colour, self._pygame)
draw_text(self._highscores, (175, 10 + (i + 1) * 25),
str(score), 10, colour, self._pygame)
self._screen.blit(self._highscores, (200, 100))
# Enter highscore
if self._state == config.GS_MENU_ENTER_HIGHSCORE:
self._enterhighscore.fill((0, 0, 0))
draw_border(self._enterhighscore, self._cycle_colour,
self._pygame)
draw_text(self._enterhighscore, (60, 20), 'Highscore!', 14,
self._white, self._pygame)
draw_text(self._enterhighscore, (20, 60),
'Please enter your name:', 10, self._white,
self._pygame)
draw_text(self._enterhighscore, (70, 170), 'Press return', 10,
self._white, self._pygame)
self._name_entry.update(self._interface.get_highscore_name())
self._name_entry.draw(self._enterhighscore,
self._interface.get_name_selected(),
self._cycle_colour, self._pygame)
self._screen.blit(self._enterhighscore, (200, 120))
# Help
if self._state == config.GS_MENU_HELP:
draw_border(self._help, self._cycle_colour, self._pygame)
self._screen.blit(self._help, (115, 120))
elif self._state in [config.GS_GAME, config.GS_GAME_PAUSED,
config.GS_GAME_OVER]:
# Get current information
score = str(self._interface.get_score())
lines = str(self._interface.get_lines_cleared())
next_omino = self._interface.get_next_omino()
self._screen.blit(self._background, (0, 0))
# Score and number of lines cleared
draw_text(self._screen, (445, 155), score, 10, self._white,
self._pygame)
draw_text(self._screen, (445, 215), lines, 10, self._white,
self._pygame)
# Draw next polyomino
if self._state == config.GS_GAME:
draw_polyomino(self._screen, (440, 290), next_omino.get_shape(0),
21, next_omino.get_colour(), self._pygame)
# Draw grid of blocks (or pause or game over screen)
grid = self._interface.get_field().get_complete_grid()
self._grid.fill((0, 0, 0))
draw_border(self._grid, self._cycle_colour, self._pygame)
if self._state == config.GS_GAME:
size = config.sizes[self._interface.get_order()]
draw_grid(self._grid, (5, 5), grid, size, self._pygame)
elif self._state == config.GS_GAME_PAUSED:
draw_text(self._grid, (30, 115), 'Game Paused', 14,
self._cycle_colour, self._pygame, True)
draw_text(self._grid, (40, 185), 'Press y to quit', 10,
self._white, self._pygame)
draw_text(self._grid, (30, 215), 'or esc to resume', 10,
self._white, self._pygame)
elif self._state == config.GS_GAME_OVER:
draw_text(self._grid, (42, 115), 'Game Over', 14,
self._cycle_colour, self._pygame, True)
draw_text(self._grid, (47, 185), 'Press return', 10,
self._white, self._pygame)
self._screen.blit(self._grid, (60, 30))
self._display.flip()
def change_state(self, state, interface=None):
""" Change the state of the application and get the new interface
(if given). Set up graphics for the new state if required.
change_state(int, Menu/Game) -> void
"""
self._state = state
if interface != None:
self._interface = interface
if self._state == config.GS_LOADING:
# Background with loading text
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (180, 180), 'Loading...', 36,
self._white, self._pygame)
elif self._state == config.GS_GAME:
# Background with static text
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (410, 130), 'Score:', 10,
self._white, self._pygame)
draw_text(self._background, (410, 190), 'Lines Cleared:', 10,
self._white, self._pygame)
next_text = 'Next ' + \
config.names[self._interface.get_order()].title() + ':'
draw_text(self._background, (410, 250), next_text, 10,
self._white, self._pygame)
# Grid
w = 210 + 10 - self._interface.get_field().get_size()[0] + 1
h = 420 + 10 - self._interface.get_field().get_size()[1] + 1
self._grid = self._pygame.Surface((w, h))
self._grid = self._grid.convert()
self._grid.fill((0, 0, 0))
self._grid.set_colorkey((0, 0, 0))
elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,
config.GS_MENU_HIGHSCORES]:
# Background with static text
self._background = self._pygame.Surface(self._screen.get_size())
self._background = self._background.convert()
self._background.fill((0, 0, 0))
draw_text(self._background, (110, 300), 'Settings:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 340), 'Difficulty Level:', 10,
self._white, self._pygame)
draw_text(self._background, (130, 400), 'Polyomino Order:', 10,
self._white, self._pygame)
draw_text(self._background, (370, 300), 'Audio:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 340), 'Sound Effects:', 10,
self._white, self._pygame)
draw_text(self._background, (400, 400), 'Music:', 10,
self._white, self._pygame)
# Buttons
self._buttons = {}
start_game_button = Button('Start Game', 10, (90, 150))
self._buttons.update({config.MENU_START: start_game_button})
view_highscores_button = Button('View Highscores', 10, (90, 180))
self._buttons.update({config.MENU_HIGHSCORES: view_highscores_button})
help_button = Button('Help', 10, (90, 210))
self._buttons.update({config.MENU_HELP: help_button})
quit_button = Button('Quit', 10, (90, 240))
self._buttons.update({config.MENU_QUIT: quit_button})
# Radio Selections
self._radios = {}
level_selection = Radio_Selection([str(n + 1) for n in range(9)],
10, (160, 365))
self._radios.update({config.MENU_LEVEL: level_selection})
order_selection = Radio_Selection([str(n + 1) for n in range(6)],
10, (160, 425))
self._radios.update({config.MENU_ORDER: order_selection})
sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))
self._radios.update({config.MENU_SFX: sfx_selection})
music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))
self._radios.update({config.MENU_MUSIC: music_selection})
# Highscores Screen
self._highscores = self._pygame.Surface((250, 300))
self._highscores = self._highscores.convert()
self._highscores.fill((0, 0, 0))
draw_text(self._highscores, (15, 10), 'Highscores:', 10,
self._white, self._pygame)
# Enter highscore name screen
self._enterhighscore = self._pygame.Surface((250, 210))
self._enterhighscore = self._enterhighscore.convert()
self._enterhighscore.fill((0, 0, 0))
self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))
# Help Screen
self._help = self._pygame.Surface((410, 240))
self._help = self._help.convert()
self._help.fill((0, 0, 0))
draw_text(self._help, (15, 10), 'Controls:', 10, self._white,
self._pygame)
draw_text(self._help, (205, 10), 'Instructions:', 10,
self._white, self._pygame)
draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,
self._pygame)
draw_text(self._help, (20, 75), 'Left - Move Left', 10,
self._white, self._pygame)
draw_text(self._help, (20, 105), 'Right - Move Right', 10,
self._white, self._pygame)
draw_text(self._help, (20, 135), 'Down - Move Down', 10,
self._white, self._pygame)
draw_text(self._help, (20, 165), 'Space - Drop', 10, self._white,
self._pygame)
draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,
self._pygame)
text = config.instructions
rect = self._pygame.Rect(0, 0, 190, 190)
instructions = render_textrect(text, 8, rect, self._white,
(0, 0, 0), 0, self._pygame)
self._help.blit(instructions, (210, 45))
|
4,436 | d75187ed435c3d3aeeb31be4a0a4ed1754f8d160 | from temp_conversion_script import convert_c_to_f
from temp_conversion_script import fever_detection
def test_convert_c_to_f():
answer = convert_c_to_f(20.0)
expected = 68.0
assert answer == expected
def test2():
answer = convert_c_to_f(-40.0)
expected = -40.0
assert answer == expected
def test_fever_detection():
temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]
max_temp, is_fever = fever_detection(temp_list)
expected_max = 105.0
is_fever = True
assert max_temp == expected_max
|
4,437 | dd902f99ee8dc23f56641b8e75544a2d4576c19a | """
Given two strings A and B of lowercase letters, return true
if and only if we can swap two letters in A so that the result
equals B.
Example 1:
Input: A = "ab", B = "ba"
Output: true
"""
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
if A == B and len(A) > len(set(A)):
return True
re1 = ""
re2 = ""
for i in range(len(A)):
if A[i] != B[i]:
re1 += A[i]
re2 += B[i]
if len(re1) == len(re2) == 2 and re1 == re2[::-1]:
return True
return False
|
4,438 | 5c61ec549a3e78da4ea8a18bb4f8382f2b5c2cfa | #!/usr/bin/env python
# encoding: utf-8
# -*- coding: utf-8 -*-
# @contact: ybsdeyx@foxmail.com
# @software: PyCharm
# @time: 2019/3/6 9:59
# @author: Paulson●Wier
# @file: 5_词向量.py
# @desc:
# (1)Word2Vec
from gensim.models import Word2Vec
import jieba
# 定义停用词、标点符号
punctuation = ['、',')','(',',',",", "。", ":", ";", ".", "'", '"', "’", "?", "/", "-", "+", "&", "(", ")"]
sentences = [
"长江是中国第一大河,干流全长6397公里(以沱沱河为源),一般称6300公里。流域总面积一百八十余万平方公里,年平均入海水量约九千六百余亿立方米。以干流长度和入海水量论,长江均居世界第三位。",
"黄河,中国古代也称河,发源于中华人民共和国青海省巴颜喀拉山脉,流经青海、四川、甘肃、宁夏、内蒙古、陕西、山西、河南、山东9个省区,最后于山东省东营垦利县注入渤海。干流河道全长5464千米,仅次于长江,为中国第二长河。黄河还是世界第五长河。",
"黄河,是中华民族的母亲河。作为中华文明的发祥地,维系炎黄子孙的血脉.是中华民族民族精神与民族情感的象征。",
"黄河被称为中华文明的母亲河。公元前2000多年华夏族在黄河领域的中原地区形成、繁衍。",
"在兰州的“黄河第一桥”内蒙古托克托县河口镇以上的黄河河段为黄河上游。",
"黄河上游根据河道特性的不同,又可分为河源段、峡谷段和冲积平原三部分。 ",
"黄河,是中华民族的母亲河。"
]
sentences = [jieba.lcut(sen) for sen in sentences]
print('sentences:\n',sentences)
# 去标点
tokenized = []
for sentence in sentences:
words = []
for word in sentence:
if word not in punctuation:
words.append(word)
tokenized.append(words)
print('tokenized:\n',tokenized)
# 进行模型训练
model = Word2Vec(tokenized,sg=1,size=100,window=5,min_count=2,negative=1,sample=0.001,hs=1,workers=4)
'''
参数解释如下:
sg=1 是 skip-gram 算法,对低频词敏感;默认 sg=0 为 CBOW 算法。
size 是输出词向量的维数,值太小会导致词映射因为冲突而影响结果,值太大则会耗内存并使算法计算变慢,一般值取为100到200之间。
window 是句子中当前词与目标词之间的最大距离,3表示在目标词前看3-b 个词,后面看 b 个词(b 在0-3之间随机)。
min_count 是对词进行过滤,频率小于 min-count 的单词则会被忽视,默认值为5。
negative 和 sample 可根据训练结果进行微调,sample 表示更高频率的词被随机下采样到所设置的阈值,默认值为 1e-3。
hs=1 表示层级 softmax 将会被使用,默认 hs=0 且 negative 不为0,则负采样将会被选择使用。
'''
model.save('model') #保存模型
model = Word2Vec.load('model') #加载模型
#相似度
print(model.wv.similarity('黄河','长江'))
print(model.wv.most_similar(positive=['黄河','母亲河'],negative=['长江']))
# (2)Doc2Vec
# from gensim.models.doc2vec import Doc2Vec,LabeledSentence
# doc_labels = ["长江", "黄河", "黄河", "黄河", "黄河", "黄河", "黄河"]
# class LabeledLineSentence(object):
# def __init__(self,doc_list,labels_list):
# self.labels_list = labels_list
# self.doc_list = doc_list
#
# def __iter__(self):
# for idx ,doc in enumerate(self.doc_list):
# yield LabeledSentence(words=doc,tags=[self.labels_list[idx]])
#
# # model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)
# model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)
# model.save('model1')
# model = Doc2Vec.load('model1')
#
# iter_data = LabeledLineSentence(tokenized, doc_labels)
# model = Doc2Vec(dm=1, size=100, window=8, min_count=5, workers=4)
# model.build_vocab(iter_data) |
4,439 | c0adc0032a2647a19d3540c057fa9762906e5f62 | from __future__ import division
import random as rnd
import math
from collections import Counter
from matplotlib import pyplot as plt
import ds_library
import ds_algebra
import ds_probability
import ds_gradient_descent
def normal_pdfs_visualization():
xs = [x/10.0 for x in range(-50, 50)]
plt.plot(xs, [ds_probability.normal_pdf(x, sigma=1) for x in xs], '-', label='mu=0-sigma=1')
plt.plot(xs, [ds_probability.normal_pdf(x, sigma=2) for x in xs], '--', label='mu=0-sigma=2')
plt.plot(xs, [ds_probability.normal_pdf(x, sigma=0.5) for x in xs], ':', label='mu=0-sigma=0.5')
plt.plot(xs, [ds_probability.normal_pdf(x, mu=-1) for x in xs], '-.', label='mu=-1-sigma=1')
plt.legend()
plt.title('Various Normals pdfs')
plt.show()
def normal_cdfs_visualization():
xs = [x/10.0 for x in range(-50, 50)]
plt.plot(xs, [ds_probability.normal_cdf(x, sigma=1) for x in xs], '-', label='mu=0-sigma=1')
plt.plot(xs, [ds_probability.normal_cdf(x, sigma=2) for x in xs], '--', label='mu=0-sigma=2')
plt.plot(xs, [ds_probability.normal_cdf(x, sigma=0.5) for x in xs], ':', label='mu=0-sigma=0.5')
plt.plot(xs, [ds_probability.normal_cdf(x, mu=-1) for x in xs], '-.', label='mu=-1-sigma=1')
plt.legend()
plt.title('Various Normals cdfs')
plt.show()
def random_kid():
return rnd.choice(['boy', 'girl'])
def girl_probability():
both_g = 0
older_g = 0
either_g = 0
for _ in range(10000):
younger = random_kid()
older = random_kid()
if older == 'girl':
older_g += 1
if older == 'girl' and younger == 'girl':
both_g += 1
if older == 'girl' or younger == 'girl':
either_g += 1
print("P(both/older): ", both_g/older_g)
print("P(both/either): ", both_g/either_g)
def compare_binomial_dist_to_normal_approx(p, n, nb_points):
data = [ds_probability.binomial(n, p) for _ in range(nb_points)]
#showing actual binomial samples on bar chart
histogram = Counter(data)
plt.bar([x - 0.4 for x in histogram.keys()],
[v / nb_points for v in histogram.values()],
0.8, color='0.7')
mu_px = p * n
sigma_px = math.sqrt(n*p*(1 - p))
#line chart that shows the normal approximation of the binomial variable
xs = range(min(data), max(data)+1)
ys = [ds_probability.normal_cdf(i+0.5, mu_px, sigma_px) - ds_probability.normal_cdf(i-0.5, mu_px, sigma_px) for i in xs]
plt.plot(xs, ys)
plt.title('Binomial Dist vs Normal approximation')
plt.show()
if __name__ == '__main__':
# print('5/2: ' + str(5/2))
# print('5//2: ' + str(5//2))
# A=[[1,2,3], [1,1,1], [2,2,3]]
# print(ds_algebra.get_col(A,1))
# girl_probability()
#normal_cdfs_visualization()
# print(ds_probability.inverse_normal_cdf(0.98))
# compare_binomial_dist_to_normal_approx(0.75, 100, 100000)
#Gradient Descent example
#random starting point
v = [rnd.randint(-100, 100) for _ in range(3)]
tolerance = 0.000001
while True:
gradient = ds_gradient_descent.square_gradient(v)
next_v = ds_gradient_descent.step(v, gradient, -0.01)
if ds_algebra.distance(next_v, v) < tolerance:
print('final resting point: ', v)
break
v = next_v
|
4,440 | db1b6c545555116a334061440614e83e62994838 | from flask import Flask, render_template
serious12 = Flask(__name__)
@serious12.route("/")
def home():
return "HOME"
@serious12.route("/user/<username>")
def user(username):
user = {
"trung": {
"name": "Trung",
"age": 19,
"birthplace": "Hanoi"
},
"nguyenvana": {
"name": "A",
"age": 69,
"birthplace": "Trai Dat"
}
}
return render_template("user.html", user = user)
if __name__ == "__main__":
serious12.run(debug=True)
|
4,441 | 7378f76b4c1f67d8a549aa2a88db8caa9b05338e | # -*- coding:utf-8 -*-
import time
import random
import numpy as np
from collections import defaultdict
class Simulator(object):
ALLOCATION_INTERVAL_MEAN = 150
ALLOCATION_INTERVAL_STDEV = 30
AFTER_ALLOCATION_INTERVAL_MEAN = 150
AFTER_ALLOCATION_INTERVAL_STDEV = 30
CLICK_INTERVAL_MEAN = 30
CLICK_INTERVAL_STDEV = 20
OUTPUT_NAME = 'output.csv'
# Single thread 개발을 위해 time slot 간격으로 나누자.
# 같은 time slot에 들어온 요청들은 모두 동시 처리 되며 예산 감소도 마찬가지다.
def __init__(self, budget, unitPrice):
self.budget = budget
self.unitPrice = unitPrice
self.spent = 0
self.depositRequest = defaultdict(int)
self.allocationRequest = defaultdict(int)
self.timeSpent = defaultdict(int)
self.now = 0
self.past_spent = 0
def AdAllocation(self):
temp = np.random.normal(self.ALLOCATION_INTERVAL_MEAN, self.ALLOCATION_INTERVAL_STDEV, 1)[0]
if temp < 1:
requestCount = 0
else:
requestCount = int(temp)
if self.spent > self.budget * 0.9:
temp = np.random.normal(self.AFTER_ALLOCATION_INTERVAL_MEAN, self.AFTER_ALLOCATION_INTERVAL_STDEV, 1)[0]
if temp < 1:
requestCount = 0
else:
requestCount = int(temp)
if requestCount == 0:
return
for i in xrange(requestCount):
isCapped = self.IsCap()
if isCapped:
continue
self.allocationRequest[self.now] += 1
self.ClickAd()
def DepositBudget(self):
self.past_spent = self.spent
self.spent += self.depositRequest[self.now] * self.unitPrice
self.timeSpent[self.now] = self.spent
self.now += 1
def ClickAd(self):
interval = np.random.normal(self.CLICK_INTERVAL_MEAN, self.CLICK_INTERVAL_STDEV, 1)[0]
if interval >= 0:
clickTime = self.now + int(interval)
else:
clickTime = self.now
self.depositRequest[clickTime] += 1
def IsCap(self):
return NotImplemented
def PrintSpent(self):
for i in xrange(self.now):
print str(i) + ',' + str(self.timeSpent[i])
def PrintAllocation(self):
allocationSum = 0
for i in xrange(self.now):
allocationSum += self.allocationRequest[i]
print str(i) + ',' + str(allocationSum)
def OutputResult(self):
allocationSum = 0
f = open(self.OUTPUT_NAME, 'w')
f.write('time,spent,allocation\n')
for i in xrange(self.now):
allocationSum += self.allocationRequest[i]
f.write(str(i) + ',' + str(self.timeSpent[i]) + ',' + str(allocationSum) + '\n')
f.close()
|
4,442 | dfae1007adc557a15d03b78f2bf790fb5b06141a | from distributions.zero_inflated_poisson import ZeroInflatedPoisson
from distributions.negative_binomial import NegativeBinomial
from distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial
from distributions.zero_inflated import ZeroInflated
from distributions.categorized import Categorized
from distributions.pareto import Pareto
|
4,443 | 6a3fd3323ed8792853afdf5af76161f3e20d4896 | '''
The while statement allows you to repeatedly execute a block of statements as long as a condition is true.
A while statement is an example of what is called a looping statement. A while statement can have an optional else clause.
'''
#Modifying the values using while loop in a list
l1: list = [1,2,3,4,5,6,7,8,9,10]
print("The original list: " , l1)
i=0
while (i < len(l1)):
l1[i] = l1[i] + 100
i=i+1
print("The modified new list is: ", l1)
#Guessing game using while-else loop
number = 23
while True:
guess = int(input('Enter an integer : ')) #input statement to enter data from console
if guess == number:
print('Congratulations, you guessed it.')
break
elif guess < number:
print('No, it is a little higher than that.')
continue
else:
print('No, it is a little lower than that.')
continue
# Do anything else you want to do here
print('Done')
|
4,444 | d2fce15636e43ca618c39c5c963bbf0c3a6a3886 | # this is just to test with ilp_polytope
import polytope
polytope.ilp_polytope.test2()
|
4,445 | 533154fe58511ac9c9c693bf07f076146b0c6136 | import os
from PIL import Image
import urllib
import json
import math
def download_images(a,b):
image_count = 0
k = a
no_of_images = b
baseURL='https://graph.facebook.com/v2.2/'
imgURL='/picture?type=large'
sil_check='/picture?redirect=false'
while image_count<no_of_images:
obj=urllib.urlopen(baseURL+str(k)+sil_check)
data=obj.read()
jsondata=json.loads(data)
if not jsondata['data']['is_silhouette']:
img=urllib.urlopen(baseURL+str(k)+imgURL)
image=img.read()
f=open(str(k)+'.jpg','wb')
f.write(image)
f.close()
print 'Image written to '+str(k)+'.jpg'
image_count+=1
else:
print str(k)+' is Silhouette.'
k+=1
def resize_images():
files=[f for f in os.listdir('.') if os.path.isfile(f) and '.jpg' in f]
print 'Resizing images ...'
for i in files:
img=Image.open(i)
j = i.replace('jpg','png')
img.resize((100,100)).save(j)
img.close()
os.remove(i)
def create_mosaic(b):
files=[f for f in os.listdir('.') if os.path.isfile(f) and '.png' in f]
no_of_images = b
N = int(math.sqrt(no_of_images))
mosaic=Image.new('RGB',(N*100,N*100))
mpixels=mosaic.load()
mX,mY = 0,0
counter=0
print 'Combining images ...'
for img in files:
mX = (counter%N)*100
mY = (counter/N)*100
image=Image.open(img)
pixels=image.load()
for iY in range(100):
mX = (counter%N)*100
for iX in range(100):
try:
mpixels[mX,mY] = pixels[iX,iY]
except:
print mX,mY
mX+=1
mY+=1
counter+=1
image.close()
os.remove(img)
mosaic.save('mosaic.png')
a = int(raw_input('Enter the fb-id from where to begin:'))
b = int(raw_input('Enter the number of images to download (a square):'))
download_images(a,b)
resize_images()
create_mosaic(b)
|
4,446 | 166a8cd0e09fbec739f43019659eeaf98b1d4fa4 | import argparse
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
# d = "Do the wrong subtraction as per https://codeforces.com/problemset/problem/977/A"
#
# parser = argparse.ArgumentParser(description=d)
#
# parser.add_argument("n", type=int, help="input value for n")
# parser.add_argument("k", type=int, help="input value for k")
#
# args = parser.parse_args()
#
# n = args.n
# k = args.k
a = list(map(int, input().split()))
n = a[0]
k = a[1]
print(wrong_subtraction(n, k))
|
4,447 | 7a6d5309580b673413f57047e631a08e61e837cf | from django.core.exceptions import ValidationError
from django.utils import timezone
def year_validator(value):
if value < 1 or value > timezone.now().year:
raise ValidationError(
('%s is not a correct year!' % value)
)
def raiting_validator(value):
if value < 1 or value > 10:
raise ValidationError(
('%s is not a caorrect raiting!' % value)
)
|
4,448 | 43a23958b8c8779e3292f0f523a37b6d712fdbac | import time
class Block:
def __init__(self, index, transactions, previous_hash, nonce=0):
self.index = index
self.transaction = transactions
self.timestamp = time.time()
self.previous_hash = previous_hash
self.nonce = nonce
self.hash = None
|
4,449 | 913ff9b811d3abbe43bda0554e40a6a2c87053be | from abc import ABC, abstractmethod
from raspberry_home.view.geometry import *
from raspberry_home.view.renderable import Renderable
class View(Renderable, ABC):
@abstractmethod
def content_size(self, container_size: Size) -> Size:
pass
|
4,450 | 3179c13968f7bcdccbd00ea35b9f098dc49b42d8 | from functools import reduce
with open("input.txt") as f:
numbers = f.read().split("\n")
n = sorted(list(map(lambda x: int(x), numbers)))
n.insert(0, 0)
n.append(n[-1] + 3)
target = n[-1]
memoize = {}
def part2(number):
if number == target:
return 1
if number in memoize.keys():
return memoize[number]
paths = 0
if number + 1 in n:
paths += part2(number + 1)
if number + 2 in n:
paths += part2(number + 2)
if number + 3 in n:
paths += part2(number + 3)
memoize[number] = paths
print(number, paths)
return paths
print("Total:", part2(0))
|
4,451 | d2c9ee64472c74767812d842d2c49eec962e28c6 | from utils import *
from wordEmbedding import *
print("bat dau")
def predict(text, phobert, tokenizer):
model = load_model('model.h5')
X_test = word2vec(text, phobert, tokenizer)
x_test_tensor = tf.convert_to_tensor(X_test)
X_tests = []
X_tests.append(x_test_tensor)
X_tests = tf.convert_to_tensor(X_tests)
y = model.predict(X_tests)
y_predict = np.argmax(y, axis=-1)
print(y_predict+1)
if __name__ == "__main__":
print("1 Chỗ này hơi lâu bạn đợi tí")
phobert = AutoModel.from_pretrained("vinai/phobert-base")
print("2")
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base", use_fast=False)
print("3")
predict("tôi làm giấy X ở đâu", phobert, tokenizer)
print("4")
predict("tôi làm giấy X ở đâu", phobert, tokenizer)
print("5")
predict("tôi làm giấy X cần những gì", phobert, tokenizer)
|
4,452 | 1dcea61908753777604d99235407981e89c3b9d4 | import sys
sys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')
from numpy import sin, linspace
x = linspace(0, 4, 101)
y = sin(x)
from numpy import sin, linspace
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Funkcija $sin(x)$ un tās izvitzījums rindā')
plt.plot(x, y2)
plt.plot(x, y2, color = "#530000")
y1=x
plt.plot(x, y1, color = "#530000")
y2 = y1 - x*x*x/(1*2*3)
plt.plot(x, y2, color = "#530000")
plt.show()
|
4,453 | 6b647dc2775f54706a6c18ee91145ba60d70be21 | import konlpy
import nltk
# POS tag a sentence
sentence = u'만 6세 이하의 초등학교 취학 전 자녀를 양육하기 위해서는'
words = konlpy.tag.Twitter().pos(sentence)
# Define a chunk grammar, or chunking rules, then chunk
grammar = """
NP: {<N.*>*<Suffix>?} # Noun phrase
VP: {<V.*>*} # Verb phrase
AP: {<A.*>*} # Adjective phrase
"""
parser = nltk.RegexpParser(grammar)
chunks = parser.parse(words)
print("# Print whole tree")
print(chunks.pprint())
print("\n# Print noun phrases only")
for subtree in chunks.subtrees():
if subtree.label()=='NP':
print(' '.join((e[0] for e in list(subtree))))
print(subtree.pprint())
# Display the chunk tree
chunks.draw() |
4,454 | ff8ffeb418bf4f9bc7d5dadd126ebc7c34c5c2cd | speed, lic_plate = input().split()
salary = int(0)
while lic_plate != "A999AA":
if int(speed) > 60:
if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]:
salary += 1000
elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]:
salary += 500
elif lic_plate[2] == lic_plate[3]:
salary += 500
else:
salary += 100
speed, lic_plate = input().split()
print(salary)
|
4,455 | 382cb55a6b849f0240276d8f45746e995b16d714 | import pandas as pd
import folium
ctx = '../data/'
json = ctx + 'us-states.json'
csv = ctx + 'US_Unemployment_Oct2012.csv'
data = pd.read_csv(csv)
m = folium.Map(location=[37, -102], zoom_start=5)
m.choropleth(
geo_data=json,
name='choropleth',
data=data,
columns=['State', 'Unemployment'],
Key_on='feature.id',
fill_color='YlGn',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Unemployment Rate (%)'
)
folium.LayerControl().add_to(m)
m.save(ctx + 'result.html')
|
4,456 | 5eab41a2ef536365bab6f6b5ad97efb8d26d7687 | import numpy as np
import initialization as init
import evaluation as eval
import selection as sel
import recombination as rec
import mutation as mut
initialize = init.permutation
evaluate = eval.custom
select = sel.rank_based
mutate = mut.swap
reproduce = rec.pairwise
crossover = rec.order
replace = sel.rank_based
params = {'gens': 100,
'n_off': 50,
'n_pars': 100,
'n_objs': 1,
'pop_size': 150,
'len_gene': 100,
'mut_rate': 0.5}
population = initialize(params)
population = evaluate(params, population)
for gen in range(params['gens']):
parents = select(population, params['n_pars'])
offspring = reproduce(params, parents, crossover)
offspring = mutate(params, offspring)
offspring = evaluate(params, offspring)
population = replace(np.concatenate((population, offspring), axis=0), params['pop_size'])
print(gen)
|
4,457 | d178818faf5fb18f5da48c1e2cf7991600731d06 | # -*- coding: utf-8 -*-
class Bot(dict):
def __init__(self):
self["getRayon"] = 0
self["getPosition"] = (-1000, -1000)
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self["getRayon"]
def getPosition(self):
return self["getPosition"]
if __name__ == "__main__":
import sys
import os
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(FILE_DIR, "../../ia"))
sys.path.append(os.path.join(FILE_DIR, "../../libs"))
import time
from graphview import GraphView
from event.goals import navigation
from event import collision
filename = os.path.join(FILE_DIR, "../../ia/event/goals/navigation/map.xml")
try:
offset = sys.argv[1]
except:
offset = 0
start = time.time()
other_bot = Bot()
other_bot.name = 'other'
other_bot["getRayon"] = 200
used_bot = Bot()
used_bot.name = 'used'
used_bot["getRayon"] = 120
ennemy1 = Bot()
ennemy1.name = 'en1'
ennemy2 = Bot()
ennemy2.name = 'en2'
ennemy1["getPosition"] = (1800, 1500)
ennemy1["getRayon"] = 200
ennemy2["getPosition"] = (2200, 500)
ennemy1["getRayon"] = 120
ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)
col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])
print("init time : %s" % (time.time() - start))
v = GraphView(ng, col, other_bot, used_bot)
v.mainloop()
|
4,458 | 963499e071873083dc942486b9a5b094393cd99e | from db_upgrader.Repositories.store import Store, StoreException
from db_upgrader.Models.product import *
class ProductStore(Store):
table = 'product'
def add_product(self, product):
try:
c = self.conn.cursor()
c.execute(
'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)',
(product.name,product.customerId, product.is_enable))
return c.lastrowid
except Exception as e:
raise StoreException('error storing product: {}'.format(e))
|
4,459 | efe5df4005dbdb04cf4e7da1f350dab483c94c92 | from django.db import models
# Create your models here.
class person(models.Model):
name=models.CharField(max_length=20,unique=True)
age=models.IntegerField()
email=models.CharField(max_length=20,unique=True)
phone=models.CharField(max_length=10, unique=True)
gender=models.CharField(max_length=10)
locations=[('ind',"india"),('aus',"AUS")]
location=models.CharField(max_length=10,choices=locations)
marital_status=models.CharField(max_length=10)
def __unicode__(self):
return self.name
|
4,460 | 85d1069d85e285bc5c36811f569dabd793b5064b | config = {'numIndividuals': 50, 'maxNumGen':20, 'eliteProp':0.1, 'mutantProp':0.2, 'inheritanceProb':0.7}
|
4,461 | 229d7378695f7e00176eb7c3962519af3db1b7e1 | # encoding: utf-8
from GlyphsApp.plugins import *
from outlineTestPenGlyphs import OutlineTestPenGlyphs
from string import strip
plugin_id = "de.kutilek.RedArrow"
class RedArrow(ReporterPlugin):
def settings(self):
self.menuName = "Red Arrows"
self.keyboardShortcut = 'a'
self.keyboardShortcutModifier = NSCommandKeyMask | NSShiftKeyMask | NSAlternateKeyMask
self.generalContextMenus = [
{"name": Glyphs.localize({'en': u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}), "action": self.toggleLabels},
]
def start(self):
self.addMenuItem()
self.options = {
"extremum_calculate_badness": False,
"extremum_ignore_badness_below": 0,
"smooth_connection_max_distance": 4,
"fractional_ignore_point_zero": True,
"collinear_vectors_max_distance": 2,
"test_closepath": False,
}
self.run_tests = [
"test_extrema",
"test_fractional_coords",
"test_fractional_transform",
"test_smooth",
"test_empty_segments",
"test_collinear",
"test_semi_hv",
#"test_closepath",
"test_zero_handles",
]
self.errors = []
self.show_labels = Glyphs.defaults["%s.showLabels" % plugin_id]
self.show_labels = not(self.show_labels)
self.toggleLabels()
def addMenuItem(self):
mainMenu = NSApplication.sharedApplication().mainMenu()
s = objc.selector(self.selectGlyphsWithErrors,signature='v@:')
newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
Glyphs.localize({
'en': u"Select Glyphs With Outline Errors",
'de': u'Glyphen mit Outlinefehlern auswählen'
}),
s,
""
)
newMenuItem.setTarget_(self)
mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)
def foreground(self, Layer):
try:
self._updateOutlineCheck(Layer)
except Exception as e:
self.logToConsole( "drawForegroundForLayer_: %s" % str(e) )
def toggleLabels(self):
if self.show_labels:
self.show_labels = False
self.generalContextMenus = [
{
"name": Glyphs.localize(
{
'en': u'Show Error Labels',
'de': u'Fehlerbeschriftung anzeigen'
}
),
"action": self.toggleLabels
},
]
else:
self.show_labels = True
self.generalContextMenus = [
{
"name": Glyphs.localize(
{
'en': u'Hide Error Labels',
'de': u'Fehlerbeschriftung ausblenden'
}
),
"action": self.toggleLabels
},
]
Glyphs.defaults["%s.showLabels" % plugin_id] = self.show_labels
def selectGlyphsWithErrors(self):
"""
Selects all glyphs with errors in the active layer
"""
font = NSApplication.sharedApplication().font
if font is None:
return None
font.disableUpdateInterface()
mid = font.selectedFontMaster.id
selection = []
# pre-filter glyph list
#glyphlist = [glyph.name for glyph in font.glyphs if len(glyph.layers[mid].paths) > 0]
glyphlist = font.glyphs.keys()
for glyph_name in glyphlist:
glyph = font.glyphs[glyph_name]
layer = glyph.layers[mid]
if layer is not None:
#try:
outline_test_pen = OutlineTestPen(layer.parent.parent, self.options, self.run_tests)
layer.draw(outline_test_pen)
if len(outline_test_pen.errors) > 0:
glyph.selected = True
selection.append(glyph_name)
else:
glyph.selected = False
#except Exception as e:
# self.logToConsole( "selectGlyphsWithErrors: Layer '%s': %s" % (glyph_name, str(e)) )
font.enableUpdateInterface()
def _updateOutlineCheck(self, layer):
self.current_layer = layer
self.errors = []
if layer is not None:
outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent, self.options, self.run_tests)
layer.drawPoints(outline_test_pen)
self.errors = outline_test_pen.errors
if self.errors:
self._drawArrows()
def _drawArrow(self, position, kind, size, width):
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.9, 0.1, 0.0, 0.85 ).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_( width )
myPath.moveToPoint_( (x, y-size) )
myPath.lineToPoint_( (x, y) )
myPath.lineToPoint_( (x+size, y) )
myPath.moveToPoint_( (x, y) )
myPath.lineToPoint_( (x+size, y-size) )
myPath.stroke()
#mx, my = NSWindow.mouseLocationOutsideOfEventStream()
#NSLog("Mouse %f %f" % (mx, my))
#if NSMouseInRect((mx, my), NSMakeRect(x-size, y-size, size, size), False):
if self.show_labels:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_(
(position[0] + 1.8 * size, position[1] - 1.8 * size),
{
NSFontAttributeName: NSFont.systemFontOfSize_(size),
NSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.4, 0.6, 0.7 ),
}
)
def _drawUnspecified(self, position, kind, size, width):
circle_size = size * 1.3
width *= 0.8
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.9, 0.1, 0.0, 0.85 ).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_( width )
myPath.appendBezierPathWithOvalInRect_( NSMakeRect( x - 0.5 * circle_size, y - 0.5 * circle_size, circle_size, circle_size ) )
myPath.stroke()
# FIXME
#mx, my = NSWindow.mouseLocationOutsideOfEventStream()
#NSLog("Mouse %f %f" % (mx, my))
#if NSMouseInRect((mx, my), NSMakeRect(x-size, y-size, size, size), False):
if True: # show labels
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_(
(position[0] + 1.8 * size, position[1] - 1.8 * size),
{
NSFontAttributeName: NSFont.systemFontOfSize_(size),
NSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.4, 0.6, 0.7 ),
}
)
def _drawArrows(self, debug=False):
scale = self.getScale()
size = 10.0 / scale
width = 3.0 / scale
errors_by_position = {}
for e in self.errors:
if e.position is not None:
if (e.position[0], e.position[1]) in errors_by_position:
errors_by_position[(e.position[0], e.position[1])].extend([e])
else:
errors_by_position[(e.position[0], e.position[1])] = [e]
else:
if None in errors_by_position:
errors_by_position[None].extend([e])
else:
errors_by_position[None] = [e]
for pos, errors in errors_by_position.iteritems():
message = ""
for e in errors:
if e.badness is None or not debug:
message += "%s, " % (e.kind)
else:
message += "%s (Severity %0.1f), " % (e.kind, e.badness)
if pos is None:
#bb = self.current_layer.bounds
#pos = (bb.origin.x + 0.5 * bb.size.width, bb.origin.y + 0.5 * bb.size.height)
pos = (self.current_layer.width + 20, -10)
self._drawUnspecified(pos, message.strip(", "), size, width)
else:
self._drawArrow(pos, message.strip(", "), size, width)
|
4,462 | 6d4950ca61cd1e2ee7ef8b409577e9df2d65addd | from disaggregation import DisaggregationManager
import numpy as np
from more_itertools import windowed
x = np.random.random_sample(10 * 32 * 1024)
w = windowed(x, n=1024, step=128)
z = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)
print(z.shape)
print(x.shape)
assert z.shape == x.shape |
4,463 | a5ef2adbf85b5ab80c59697340f94bc57d60952e | """
Code for Alexa skill to check PB tracking
"""
from __future__ import print_function
import traceback
import requests
import os
import json
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome to PB Parcel Tracker"
speech_output = "Please give first 10 digits of tracking number"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please give first 10 digits of tracking number"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills Kit sample. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
#----- get tracking ------
def setFirstEleven(intent, session):
session_attributes = {}
should_end_session = False
speech_output = "Now give remaining digits"
reprompt_text = "Now give the next eleven numbers"
try:
tracking_number_1 = intent['slots']['One']['value']
tracking_number_2 = intent['slots']['Two']['value']
tracking_number_3 = intent['slots']['Three']['value']
tracking_number_4 = intent['slots']['Four']['value']
tracking_number_5 = intent['slots']['Five']['value']
tracking_number_6 = intent['slots']['Six']['value']
tracking_number_7 = intent['slots']['Seven']['value']
tracking_number_8 = intent['slots']['Eight']['value']
tracking_number_9 = intent['slots']['Nine']['value']
tracking_number_10 = intent['slots']['Ten']['value']
first_ten = "%s%s%s%s%s%s%s%s%s%s" % (tracking_number_1, tracking_number_2,tracking_number_3, tracking_number_4,tracking_number_5, tracking_number_6,tracking_number_7, tracking_number_8,tracking_number_9, tracking_number_10)
session_attributes['first_ten'] = first_ten
print("session after adding first ten--->")
print(session_attributes)
except Exception as app_exception:
traceback.print_tb
speech_output = "There was some problem, Please provide first ten digits of the tracking number"
reprompt_text = "Please say first ten digits of the tracking number"
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
#----- get tracking ------
def getParcelStatus(intent, session):
session_attributes = {}
should_end_session = True
speech_output = "There was some problem in taking your input"
reprompt_text = "Please say remaining digits of the tracking number"
try:
tracking_number_11= intent['slots']['Eleven']['value']
tracking_number_12 = intent['slots']['Twelve']['value']
tracking_number_13 = intent['slots']['Thirteen']['value']
tracking_number_14 = intent['slots']['Fourteen']['value']
tracking_number_15 = intent['slots']['Fifteen']['value']
tracking_number_16 = intent['slots']['Sixteen']['value']
tracking_number_17 = intent['slots']['Seventeen']['value']
tracking_number_18 = intent['slots']['Eighteen']['value']
tracking_number_19 = intent['slots']['Nineteen']['value']
tracking_number_20 = intent['slots']['Twenty']['value']
tracking_number_21 = intent['slots']['TwentyOne']['value']
tracking_number_22 = intent['slots']['TwentyTwo']['value']
tracking_number = "%s%s%s%s%s%s%s%s%s%s%s%s" % (tracking_number_11,tracking_number_12, tracking_number_13, tracking_number_14,tracking_number_15, tracking_number_16,tracking_number_17, tracking_number_18,tracking_number_19, tracking_number_20,tracking_number_21, tracking_number_22)
print("'first_ten' not in session['attributes']--->")
print('first_ten' not in session['attributes'])
full_tracking_number = "%s%s" % (session['attributes']['first_ten'], tracking_number)
bearer = "Bearer %s" % (session['access_token'])
print("USPS FULL Tracking Number ----> %s" % (full_tracking_number))
url = "https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS" %(full_tracking_number)
r=requests.get(url, headers={"Authorization" : bearer})
tracking_response = {}
tracking_response = json.loads(r.content)
if(r.status_code == 200):
speech_output = "The status of the parcel is "+tracking_response['status']
reprompt_text = "The status of the parcel is "+tracking_response['status']
else:
speech_output = tracking_response['errors'][0]['errorDescription']
reprompt_text = tracking_response['errors'][0]['errorDescription']
print(r.content)
except Exception as app_exception:
traceback.print_tb
should_end_session = False
if ('attributes' not in session or ('attributes' in session and 'first_ten' not in session['attributes'])):
speech_output = "Please provide only first ten digits of the tracking number"
reprompt_text = "Please provide only first ten digits of the tracking number"
else:
speech_output = "There was some problem, Please say remaining digits of the tracking number"
reprompt_text = "Please say remaining digits of the tracking number"
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = "Basic "+access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={"Authorization": access_key_value,
"Content-Type": "application/x-www-form-urlencoded"},
data={"grant_type": "client_credentials"})
print(r.status_code)
if(r.status_code == 200):
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if('access_token' not in session):
oauth_request(session)
print(session['access_token'])
# Dispatch to your skill's intent handlers
if intent_name == "Tracking":
return setFirstEleven(intent, session)
elif intent_name == "TrackingSecond":
return getParcelStatus(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
|
4,464 | 48cef0377087d9245aad1fb759adf8ff07d2b66f | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from PIL import Image, ImageDraw
import torch
from torchvision import transforms
import cfg
from label import point_inside_of_quad
from model_VGG import advancedEAST
from preprocess import resize_image
from nms import nms
def sigmoid(x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):
geo /= [scale_ratio_w, scale_ratio_h]
p_min = np.amin(geo, axis=0)
p_max = np.amax(geo, axis=0)
min_xy = p_min.astype(int)
max_xy = p_max.astype(int) + 2
sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()
for m in range(min_xy[1], max_xy[1]):
for n in range(min_xy[0], max_xy[0]):
if not point_inside_of_quad(n, m, geo, p_min, p_max):
sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255
sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')
sub_im.save(img_path + '_subim%d.jpg' % s)
def predict(east_detect, img_path, pixel_threshold, quiet=False):
img = Image.open(img_path) # 为PIL图像对象,默认RGB
d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
x = transforms.ToTensor()(img)
x = torch.unsqueeze(x, 0) # 增加一个维度
y = east_detect(x)
y = torch.squeeze(y, 0) # 减少一个维度
print(y.shape)
y = y.detach().numpy() # 7*64*64
if y.shape[0] == 7:
y = y.transpose((1, 2, 0)) # CHW->HWC
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
with Image.open(img_path) as im:
im_array = np.array(im.convert('RGB')) # 图片转为numpy数组
d_wight, d_height = resize_image(im, cfg.max_predict_img_size)
scale_ratio_w = d_wight / im.width
scale_ratio_h = d_height / im.height
im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
quad_im = im.copy()
draw = ImageDraw.Draw(im)
for i, j in zip(activation_pixels[0], activation_pixels[1]):
px = (j + 0.5) * cfg.pixel_size
py = (i + 0.5) * cfg.pixel_size
line_width, line_color = 1, 'red'
if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:
if y[i, j, 2] < cfg.trunc_threshold:
line_width, line_color = 2, 'yellow'
elif y[i, j, 2] >= 1 - cfg.trunc_threshold:
line_width, line_color = 2, 'green'
draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size)],
width=line_width, fill=line_color)
im.save(img_path + '_act.jpg')
quad_draw = ImageDraw.Draw(quad_im)
txt_items = []
for score, geo, s in zip(quad_scores, quad_after_nms,
range(len(quad_scores))):
if np.amin(score) > 0:
quad_draw.line([tuple(geo[0]),
tuple(geo[1]),
tuple(geo[2]),
tuple(geo[3]),
tuple(geo[0])], width=2, fill='red')
if cfg.predict_cut_text_line:
cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array,
img_path, s)
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h] # (N, 4, 2)标签坐标
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
elif not quiet:
print('quad invalid with vertex num less then 4.')
quad_im.save(img_path + '_predict.jpg')
if cfg.predict_write2txt and len(txt_items) > 0:
with open(img_path[:-4] + '.txt', 'w') as f_txt:
f_txt.writelines(txt_items)
def predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):
img = Image.open(img_path) # 为PIL图像对象,默认RGB
d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
scale_ratio_w = d_wight / img.width
scale_ratio_h = d_height / img.height
transform = transforms.Compose([
transforms.Resize((d_wight, d_height), interpolation=2),
transforms.ToTensor()
])
x = transform(img)
x = torch.unsqueeze(x, 0) # 增加一个维度
y = east_detect(x)
y = torch.squeeze(y, 0) # 减少一个维度
print(y.shape)
y = y.detach().numpy() # 7*64*64
if y.shape[0] == 7:
y = y.transpose((1, 2, 0)) # CHW->HWC
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
txt_items = []
for score, geo in zip(quad_scores, quad_after_nms):
if np.amin(score) > 0:
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
elif not quiet:
print('quad invalid with vertex num less then 4.')
if cfg.predict_write2txt and len(txt_items) > 0:
with open(txt_path, 'w') as f_txt:
f_txt.writelines(txt_items)
if __name__ == '__main__':
if not os.path.exists('demo'):
os.makedirs('./demo', exist_ok=True)
img_path = cfg.img_path
threshold = float(cfg.predict_threshold)
pth_path = cfg.pth_path if cfg.pth_path else 'saved_model/3T736_latest.pth'
print(img_path, threshold)
east = advancedEAST()
state_dict = {k.replace('module.', ''): v for k, v in torch.load(pth_path, map_location='cpu').items()}
east.load_state_dict(state_dict)
predict(east, img_path, threshold)
|
4,465 | da41f26489c477e0df9735606457bd4ee4e5a396 | import kubernetes.client
from kubernetes.client.rest import ApiException
from pprint import pprint
from kubeops_api.models.cluster import Cluster
class ClusterMonitor():
def __init__(self,cluster):
self.cluster = cluster
self.token = self.cluster.get_cluster_token()
self.cluster.change_to()
master = self.cluster.group_set.get(name='master').hosts.first()
configuration = kubernetes.client.Configuration()
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.api_key['authorization'] = self.token
print('---token----')
print(self.token)
configuration.debug = True
configuration.host = 'https://'+master.ip+":6443"
configuration.verify_ssl = False
print('https://'+master.ip+":6443")
self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))
def list_pods(self):
pods = self.api_instance.list_pod_for_all_namespaces()
return pods
|
4,466 | 6abfd6c0a644356ae0bc75d62472b5c495118a8e | import time
from bitfinex_trade_client import BitfinexClient,BitfinexTradeClient
KEY = "nBi8YyJZZ9ZhSOf2jEpMAoBpzKt2Shh6IoLdTjFRYvb"
SECRET = "XO6FUYbhFYqBflXYSaKMiu1hGHLhGf63xsOK0Pf7osA"
class EMA:
def __init__(self, duration):
self.value = 0
self.duration = duration
self.count = 0
self.multiplier = 2.0 / (self.duration + 1)
def update(self, px):
if(self.count < self.duration):
self.count += 1
multiplier = 2.0/(self.count + 1)
self.value = multiplier * px + (1 - multiplier) * self.value
else:
self.value = self.multiplier * px + (1 - self.multiplier) * self.value
def ready(self):
return (self.count >= self.duration * 0.05)
class Order:
def __init__(self, amount, price, side, ord_type, symbol):
self.amount = amount
self.price = price
self.side = side
self.ord_type = ord_type
self.symbol = symbol
self.traded_amount = 0
self.traded_px = 0
self.id = -1
def __str__(self):
return "Order => Amount:" + str(self.amount) + "|Price:" + str(self.price) + "|Side:" + str(self.side)
class BitfinexMMTrader:
def __init__(self, key, secret, symbol, single_order_amount, total_amount, interval, duration, threshold, stop_loss):
self.symbol = symbol
self.sym1 = symbol[:3]
self.sym2 = symbol[3:]
self.trade_client = BitfinexTradeClient(key, secret)
self.client = BitfinexClient()
self.total_amount = total_amount
self.single_order_amount = single_order_amount
self.interval = interval
self.duration = int(duration / interval)
self.threshold = threshold
self.fees_per = self.get_fees()
self.stop_loss = stop_loss
self.ema = EMA(self.duration)
self.buy_order = None
self.sell_order = None
self.buy_position = 0
self.buy_px = 0
self.sell_position = 0
self.sell_px = 0
self.last_email_time = 0
self.run = True
self.ticker = None
def get_fees(self):
account_info = self.trade_client.account_info()
return float(account_info[0]["maker_fees"])
def get_pnl(self):
pos = max(self.buy_position, self.sell_position)
if(pos == 0):
return 0
bid = float(self.ticker['bid'])
ask = float(self.ticker['ask'])
buy_avg_px = ((pos - self.buy_position) * ask + self.buy_position * self.buy_px) / pos
sell_avg_px = ((pos - self.sell_position) * bid + self.sell_position * self.sell_px) / pos
cost = (buy_avg_px + sell_avg_px) * pos * self.fees_per / 100.0
return pos * (sell_avg_px - buy_avg_px) - cost
def buy_sell_px(self):
position_offset = self.net_position() / self.total_amount / 2.0
buy_threshold = self.threshold * (1 + position_offset)
sell_threshold = self.threshold * (1 - position_offset)
buy_px = round(self.ema.value * (1 - buy_threshold / 100.0))
sell_px = round(self.ema.value * (1 + sell_threshold / 100.0))
return (buy_px, sell_px)
def can_buy(self, buy_px, bid):
return (buy_px >= bid and self.net_position() < self.total_amount)
def can_sell(self, sell_px, ask):
return (sell_px <= ask and -self.net_position() < self.total_amount)
def net_position(self):
return (self.buy_position - self.sell_position)
def update_order_status(self):
try:
if(self.buy_order != None):
status = self.trade_client.status_order(self.buy_order.id)
executed_amount = float(status["executed_amount"])
executed_px = float(status["avg_execution_price"])
new_executed_amount = executed_amount - self.buy_order.traded_amount
if (new_executed_amount > 0):
print(status)
new_executed_px = (executed_amount * executed_px - self.buy_order.traded_amount * self.buy_order.traded_px) / new_executed_amount
self.buy_order.traded_amount = executed_amount
self.buy_order.traded_px = executed_px
self.buy_px = (self.buy_px * self.buy_position + new_executed_amount * new_executed_px) / (self.buy_position + new_executed_amount)
self.buy_position += new_executed_amount
if(not status["is_live"]):
self.buy_order = None
if (self.sell_order != None):
status = self.trade_client.status_order(self.sell_order.id)
executed_amount = float(status["executed_amount"])
executed_px = float(status["avg_execution_price"])
new_executed_amount = executed_amount - self.sell_order.traded_amount
if(new_executed_amount > 0):
print(status)
new_executed_px = (executed_amount * executed_px - self.sell_order.traded_amount * self.sell_order.traded_px) / new_executed_amount
self.sell_order.traded_amount = executed_amount
self.sell_order.traded_px = executed_px
self.sell_px = (self.sell_px * self.sell_position + new_executed_amount * new_executed_px) / (self.sell_position + new_executed_amount)
self.sell_position += new_executed_amount
print("PNL => " + str(self.get_pnl()))
if (not status["is_live"]):
self.sell_order = None
print("Position => BuyPos:" + str(self.buy_position) + "|BuyPx:" + str(self.buy_px) + "|SellPos:" + str(self.sell_position) + "|SellPx:" + str(self.sell_px))
except Exception as e:
print("Update Order Status Exception: " + str(e))
pass
def sqoff(self):
pnl = self.get_pnl()
if(pnl < -self.stop_loss):
position = self.net_position()
if(position > 0):
self.sell_order = Order(position, float(self.ticker['bid']), "sell", "exchange fill-or-kill", self.symbol)
print(self.buy_order)
status = self.trade_client.place_order(str(position), str(self.ticker['bid']), "sell", "exchange fill-or-kill", False,
self.symbol)
print(status)
if ("order_id" in status):
self.sell_order.id = status["order_id"]
else:
self.sell_order = None
return True
elif(position < 0):
self.buy_order = Order(-position, float(self.ticker['ask']), "buy", "exchange fill-or-kill", self.symbol)
print(self.buy_order)
status = self.trade_client.place_order(str(-position), str(self.ticker['ask']), "buy", "exchange fill-or-kill", False,
self.symbol)
print(status)
if ("order_id" in status):
self.buy_order.id = status["order_id"]
else:
self.buy_order = None
return True
return False
def email(self, message, sub, emails):
curr_time = time.time()
if (curr_time - self.last_email_time > 1800):
print(message)
def trade(self):
while (self.run):
time.sleep(self.interval)
try:
self.ticker = self.client.ticker(self.symbol)
except Exception as e:
print("Ticker Exception: " + str(e))
continue
self.update_order_status()
self.ema.update(self.ticker['mid'])
if(self.ema.ready()):
if (not self.sqoff()):
buy_px, sell_px = self.buy_sell_px()
bid = self.ticker['bid']
ask = self.ticker['ask']
buy_px = min(bid, buy_px)
sell_px = max(ask, sell_px)
print("Market => EMA: " + str(round(self.ema.value)) + "|BuyPx:" + str(buy_px) + "|SellPx:" + str(sell_px) + "|Bid:" + str(bid) + "|Ask:" + str(ask))
if(self.can_buy(buy_px, bid)):
if(self.buy_order == None):
amount = min(self.single_order_amount, self.total_amount - max(0, self.net_position()))
self.buy_order = Order(amount, buy_px, "buy", "exchange limit", self.symbol)
print(self.buy_order)
status = self.trade_client.place_order(str(amount), str(buy_px), "buy", "exchange limit", True, self.symbol)
print(status)
if ("order_id" in status):
self.buy_order.id = status["order_id"]
else:
self.buy_order = None
else:
if(abs(self.buy_order.price - buy_px) / buy_px * 100.0 > self.threshold / 10.0 ):
self.trade_client.delete_order(self.buy_order.id)
if (self.can_sell(sell_px, ask)):
if (self.sell_order == None):
amount = min(self.single_order_amount, self.total_amount - max(0, -self.net_position()))
self.sell_order = Order(amount, sell_px, "sell", "exchange limit", self.symbol)
print(self.sell_order)
status = self.trade_client.place_order(str(amount), str(sell_px), "sell", "exchange limit", True, self.symbol)
print(status)
if("order_id" in status):
self.sell_order.id = status["order_id"]
else:
self.sell_order = None
else:
if (abs(self.sell_order.price - sell_px) / sell_px * 100.0 > self.threshold / 10.0 or self.sell_order.amount < amount):
self.trade_client.delete_order(self.sell_order.id)
|
4,467 | d122267e1da2d9cf68d245148bb496dfba3e7d19 | #!/usr/bin/env python
"""
Load API client for a Tool Registry Service (TRS) endpoint based
either on the GA4GH specification or an existing client library.
"""
import logging
from bravado.requests_client import RequestsClient
from ga4ghtest.core.config import trs_config
from .client import TRSClient
logger = logging.getLogger(__name__)
def _get_trs_opts(service_id):
"""
Look up stored parameters for tool registry services.
"""
return trs_config()[service_id]
def _init_http_client(service_id=None, opts=None):
"""
Initialize and configure HTTP requests client for selected service.
"""
if service_id:
opts = _get_trs_opts(service_id)
http_client = RequestsClient()
http_client.set_api_key(host=opts['host'],
api_key=opts['auth'],
param_in='header')
return http_client
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
def toolsIdGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version, descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id, tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
def load_trs_client(service_id, http_client=None):
"""Return an API client for the selected workflow execution service."""
trs_client = TRSClient(service=_get_trs_opts(service_id))
return TRSAdapter(trs_client)
|
4,468 | 45750152313fd3670867c61d0173e4cb11a806ba | T = int(input())
for cnt in range(1, T + 1):
S = input()
S_list = []
card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}
print('#' + str(cnt), end=' ')
for i in range(0, len(S), 3):
S_list.append(S[i:i + 3])
if len(set(S_list)) != len(S_list):
print('ERROR')
else:
for i in S_list:
card[i[0]] -=1
print(*card.values())
|
4,469 | e2e5ca388d67f2a13eaef6067fc19e2dfe284a55 | import json
import sys
import os
# Change to Singularity working directory.
os.chdir('/mnt/cwd')
# Take subset index as argument
subset_index = sys.argv[1]
# Open up subset matching this.
with open('/mnt/scripts/outputs/instcat_list_subset'+str(subset_index)+'.json', 'r') as f:
instcat_list_subset = json.load(f)
# Import instcat trimmer
sys.path.append('/mnt/scripts')
import instcat_trimmer as ict
ict.determine_instcat_work(instcat_list_subset, '/mnt/scripts/outputs/worklist_subset'+str(subset_index)+'.json')
|
4,470 | 6c27f70e820202f6cc4348de3c9198e7b20ec7d9 | from zExceptions import Unauthorized
if REQUEST is not None:
raise Unauthorized
portal = context.getPortalObject()
compute_node = context
reference = "TIOCONS-%s-%s" % (compute_node.getReference(), source_reference)
version = "%s" % context.getPortalObject().portal_ids.generateNewId(
id_group=('slap_tioxml_consumption_reference', reference), default=1)
document = portal.consumption_document_module.newContent(
portal_type="Computer Consumption TioXML File",
source_reference=source_reference,
title="%s consumption (%s)" % (compute_node.getReference(), source_reference),
reference=reference,
version=version,
data=consumption_xml,
classification="personal",
publication_section="other",
contributor_value=compute_node,
)
document.submit()
return document.getRelativeUrl()
|
4,471 | 00c6899b9d49cbbd0f1980eada77ad91562211a0 | import requests
import json
class Parser:
init_url = r'https://www.joom.com/tokens/init'
products_url = r'https://api.joom.com/1.1/search/products?language=ru-RU¤cy=RUB'
def __init__(self, links_list):
self.links = links_list
self.product_info_dict = {}
access_token = json.loads(requests.post(self.init_url).text)['accessToken']
count = int(links_list[0])
for url in self.links[1:]:
id_link = url[33:]
headers = {'Authorization': 'Bearer ' + access_token}
data = {
'count': count,
'filters': [{
'id': 'categoryId',
'value': {
'type': 'categories',
'items': [
{'id': id_link}
]
}
}
]
}
res = requests.post(self.products_url, json=data, headers=headers)
for product in json.loads(res.text)['payload']['items']:
content = requests.get(product['mainImage']['images'][1]['url']).content
self.product_info_dict.update({
product['id']:
dict(
price=product['price'],
name=product['name'],
image=content,
description=self.get_description(product['id'], headers)
)
})
def get_description(self, id_str, headers):
link = 'https://api.joom.com/1.1/products/' + id_str + '?language=ru-RU¤cy=RUB'
res = requests.get(link, headers=headers)
return json.loads(res.text)['payload']['description']
|
4,472 | 7dce240a891e807b1f5251a09a69368f4e513973 | # Advent of Code: Day 4
"""A new system policy has been put in place that requires all accounts to
use a passphrase instead of simply a password. A passphrase consists of a
series of words (lowercase letters) separated by spaces.
To ensure security, a valid passphrase must contain no duplicate words.
"""
def valid(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = 0
for line in lines:
split = line.rstrip().split(' ')
if len(split) == len(set(split)):
result += 1
return result
"""For added security, yet another system policy has been put in place.
Now, a valid passphrase must contain no two words that are anagrams of
each other - that is, a passphrase is invalid if any word's letters can
be rearranged to form any other word in the passphrase.
"""
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
if __name__ == '__main__':
print(valid('day4-input.txt'))
print(valid_anagram('day4-input.txt')) |
4,473 | a52e0dde47d7df1b7b30887a690b201733ac7592 | from trytond.pool import Pool
from .reporte import MyInvoiceReport
def register():
Pool.register(
MyInvoiceReport,
module='cooperar-reporte-factura', type_='report')
|
4,474 | 0d28ab54f08301d9788ca9a5e46d522e043e9507 | from django.test import TestCase, Client
from pdf_crawler.models import Document
from rest_framework.reverse import reverse
class TestCase(TestCase):
client = Client()
def setUp(self):
Document.objects.create(name='First').save()
def test_endpoints(self):
"""
test for endpoints
"""
self.assertEqual(self.client.get(reverse('pdf_crawler:document-list')).status_code, 200)
self.assertEqual(self.client.get(reverse('pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)
self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).status_code, 200)
|
4,475 | 5456fb2938ae4d0f69414c153390f86437088114 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jul 18 2016
@author: mandd
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import abc
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from BaseClasses import BaseType
from utils import utils
#Internal Modules End--------------------------------------------------------------------------------
class Metric(utils.metaclass_insert(abc.ABCMeta,BaseType)):
"""
This is the general interface to any RAVEN metric object.
It contains an initialize, a _readMoreXML, and an evaluation (i.e., distance) methods
"""
def __init__(self):
"""
This is the basic method initialize the metric object
@ In, none
@ Out, none
"""
BaseType.__init__(self)
self.type = self.__class__.__name__
self.name = self.__class__.__name__
self.acceptsProbability = False #If True the metric needs to be able to handle (value,probability) where value and probability are lists
self.acceptsDistribution = False #If True the metric needs to be able to handle a passed in Distribution
def initialize(self,inputDict):
"""
This method initialize each metric object
@ In, inputDict, dict, dictionary containing initialization parameters
@ Out, none
"""
pass
def _readMoreXML(self,xmlNode):
"""
Method that reads the portion of the xml input that belongs to this specialized class
and initialize internal parameters
@ In, xmlNode, xml.etree.Element, Xml element node
@ Out, None
"""
self._localReadMoreXML(xmlNode)
def distance(self,x,y,**kwargs):
"""
This method actually calculates the distance between two dataObjects x and y
@ In, x, dict, dictionary containing data of x
@ In, y, dict, dictionary containing data of y
@ In, kwargs, dictionary of parameters characteristic of each metric (e.g., weights)
@ Out, value, float, distance between x and y
"""
pass
|
4,476 | b7d3af29e024b0b2cf5d2c054290f799eae7fed1 | import pymysql
pymysql.install_as_MySQLdb()
# from keras.models import load_model
# from keras.models import Model
# from ai import settings
#
# print('load model ...')
# model = load_model(settings.MODEL_PATH)
# model = Model(inputs=model.input, outputs=model.get_layer('dnsthree').output)
# print('load done.')
|
4,477 | 08f0b261b5a9b0f5133c468b3f92dc00285eda6a | import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
def loadTrainSet(filepath):
raw = np.loadtxt(filepath, delimiter=',', dtype=np.str, skiprows=1)
X, y = raw[:,1:], raw[:,0]
trainSet = np.hstack((X, y.reshape(-1,1)))
return trainSet
def out(trainset):
trainset = trainset
X=trainset[:,:(trainset.shape[1]-1)]
y=trainset[:,(trainset.shape[1]-1)]
X=np.asarray(X)
y=np.asarray(y.T)[0]
return X,y
def gridsearchcv(X,y):
accuracy=[]
stdlist=[]
classifier = RandomForestClassifier(verbose=2, n_jobs=1,oob_score=1)
param_grid={'n_estimators':np.arange(1, 100, 10)}
# param_grid={'n_estimators':np.arange(1, 202, 10)}
# param_grid={'n_estimators':[200], 'criterion':['gini', 'entropy']}
# param_grid={'n_estimators':[200], 'max_features':np.append(np.arange(28-20, 28, 1), np.arange(28, 28+20, 1))}
# param_grid={'n_estimators':[200], 'max_depth':np.arange(40, 40+20, 1)}
# param_grid={'n_estimators':[200], 'min_samples_split':np.arange(2, 2+10, 1)}
# param_grid={'n_estimators':[200], 'min_samples_leaf':np.arange(1, 1+10, 1)}
# param_grid={'n_estimators':[200], 'max_leaf_nodes':np.arange(3000, 3000+1000, 100)}
grid = GridSearchCV(classifier , param_grid=param_grid)
grid.fit(X,y)
fig=plt.figure(1, figsize=(16, 12))
plt.clf()
ax1=fig.add_subplot(1,2,1)
ax2=fig.add_subplot(1,2,2)
scores=grid.grid_scores_
for i in range(len(scores)):
accu=scores[i][1]
stdnum=np.std(scores[i][2])
accuracy.append(accu)
stdlist.append(stdnum)
ax1.plot(np.arange(1, 100, 10),accuracy, linewidth=2)
ax2.plot(np.arange(1, 100, 10),stdlist, linewidth=2)
plt.axis('tight')
ax1.set_xlabel('n_estimators')
ax1.set_ylabel('accuracy')
ax2.set_xlabel('n_estimators')
ax2.set_ylabel('std_accuracy')
|
4,478 | 8ae6630ccd2f2b5a10401cadb4574772f6ecbc4a | import numpy as np
from math import inf
"""
Strategy made by duckboycool for carykh's Prisoner's Dilemma Tournament. (https://youtu.be/r2Fw_rms-mA)
It is a nice Tit for Tat based strategy that attempts to detect when the opponent is not changing their actions based
off of ours so we can maximize with defects, and attempts to cooperate extra in certain scenarios to avoid defection chains.
"""
# Non-responsive factor sensitivities
deafThresh = 0.37
deafThresh2 = 0.13
patterns = [
(np.array([[],[]], dtype=int), (True, None)), # Start (cooperate)
(np.array([[True, False, True], # Defection chain
[False, True, False]], dtype=int), (True, None)), # Cooperate extra to try to get out (max of 9)
(np.array([[True, False, True, False, True], # Longer defection chain (if first is at max)
[False, True, False, True, False]], dtype=int), (True, None)), # Cooperate extra to try to get out (max of 4)
(np.array([[True, False, False, True, False, False, True, False], # Other defection chain
[False, False, True, False, False, True, False, False]], dtype=int), (True, [True, True])), # Cooperate a lot extra to try to get out (max of 3)
(np.array([[True, False, True], # They didn't respond to retaliation (possibly forgiving despite also testing?)
[False, True, True]], dtype=int), (False, None)), # Defect again to see how they react
(np.array([[True, False, True, False, False, False], # Forgiving, but retaliating after 2
[True, True, True, True, True, False]], dtype=int), (True, [False, True])), # Defect every other (and don't detect random, max of 6)
(np.array([[True, False, True], # No response to defection (probably forgiving)
[True, True, True]], dtype=int), (False, None)), # Defect to take advantage of forgiveness
(np.array([[False, False, False], # No response to defection
[True, True, True]], dtype=int), (False, [False])), # Defect a few times to take advantage
(np.array([[False, False, False, False], # Inconsistent defection
[True, True, False, True]], dtype=int), (False, [False])), # Defect (expected of >3, max of 6)
(np.array([[False, False, False, False], # Defected inconsistently to defection
[False, True, True, True]], dtype=int), (False, [False])), # Defect (expected of >3, max of 2)
(np.array([[True, True, False, True, True, True], # Tried defecting
[True, False, True, False, True, True]], dtype=int), (False, None)), # Defect to test (since they're non-nice anyway, max of 2)
(np.array([[True, False, True, True, True, False], # Alternating
[False, True, False, True, False, True]], dtype=int), (False, [False])), # Defect (non-responsive potentially, max of 7)
]
counts = []
maxes = [inf, 9, 4, 3, inf, 6, inf, inf, 6, 2, 2, 7]
defects = np.array([[False, False, False, False],
[False, False, False, False]], dtype=int)
def strategy(history, memory):
if not(history.size):
counts.clear()
for x in patterns:
counts.append(0)
if memory: # Do sequence given in memory
return memory.pop(0), memory
for i, (pattern, response) in enumerate(patterns):
if counts[i] < maxes[i]:
if history.size >= pattern.size and np.array_equal(pattern, history[:,-pattern.shape[1]:]):
counts[i] += 1
return response
# Non-responsive detection
responses = np.array([history[0,:-1], history[1,1:]], int)
Ts = np.where(responses[0] == True)[0]
Fs = np.where(responses[0] == False)[0]
TtT = np.count_nonzero(responses[1][Ts])
TtF = np.count_nonzero(np.logical_not(responses[1][Ts]))
FtT = np.count_nonzero(responses[1][Fs])
FtF = np.count_nonzero(np.logical_not(responses[1][Fs]))
if (
len(Ts) > 2 and len(Fs) > 3 and counts[5] == 0 and # Enough sample (and haven't already tried against forgiving)
(((abs(TtT - TtF)/len(Ts)) < deafThresh and (abs(FtT - FtF)/len(Fs)) < deafThresh) or # Sensed not responding (low ratios)
abs(TtF/len(Ts) - FtF/len(Fs)) < deafThresh2) # Sensed not responding (similar ratios)
): # Probably non-responsive (random, or following a set pattern)
return False, None # Defect since they won't retaliate
if np.array_equal(history[:,-4:], defects) and (True in history[1,-22:]): # All defections recently and player has cooperated somewhat recently (or currently close to start)
return True, [True, True] # Cooperate now and next 2 turns, try to avoid all defections if possible
return history[1,-1], None # Tit for Tat
|
4,479 | 149ac778a552fac4499d7146db8600c91c68c60e | from time import sleep
import RPi.GPIO as gpio
buzzer_pin = 18
gpio.setmode(gpio.BCM)
gpio.setup(buzzer_pin, gpio.OUT)
def buzz(pitch, duration):
peroid = 1.0/pitch
delay = peroid / 2.0
cycles = int(duration*pitch)
for i in range(cycles):
gpio.output(buzzer_pin, True)
sleep(delay)
gpio.output(buzzer_pin, False)
sleep(delay)
pitch = float(1000)
duration = float(2)
buzz(pitch, duration)
|
4,480 | 5ed34ada35dfb2f783af4485bf9d31aa42712b9a | """
Django settings for hauki project.
"""
import logging
import os
import subprocess
import environ
import sentry_sdk
from django.conf.global_settings import LANGUAGES as GLOBAL_LANGUAGES
from django.core.exceptions import ImproperlyConfigured
from sentry_sdk.integrations.django import DjangoIntegration
CONFIG_FILE_NAME = "config_dev.env"
# This will get default settings, as Django has not yet initialized
# logging when importing this file
logger = logging.getLogger(__name__)
def get_git_revision_hash() -> str:
"""
Retrieve the git hash for the underlying git repository or die trying
We need a way to retrieve git revision hash for sentry reports
I assume that if we have a git repository available we will
have git-the-comamand as well
"""
try:
# We are not interested in gits complaints
git_hash = subprocess.check_output(
["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL, encoding="utf8"
)
# ie. "git" was not found
# should we return a more generic meta hash here?
# like "undefined"?
except FileNotFoundError:
git_hash = "git_not_available"
except subprocess.CalledProcessError:
# Ditto
git_hash = "no_repository"
return git_hash.rstrip()
root = environ.Path(__file__) - 2 # two levels back in hierarchy
env = environ.Env(
DEBUG=(bool, False),
DJANGO_LOG_LEVEL=(str, "INFO"),
CONN_MAX_AGE=(int, 0),
SYSTEM_DATA_SOURCE_ID=(str, "hauki"),
LANGUAGES=(list, ["fi", "sv", "en"]),
DATABASE_URL=(str, "postgres:///hauki"),
TEST_DATABASE_URL=(str, ""),
TOKEN_AUTH_ACCEPTED_AUDIENCE=(str, ""),
TOKEN_AUTH_SHARED_SECRET=(str, ""),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
ADMINS=(list, []),
SECURE_PROXY_SSL_HEADER=(tuple, None),
MEDIA_ROOT=(environ.Path(), root("media")),
STATIC_ROOT=(environ.Path(), root("static")),
MEDIA_URL=(str, "/media/"),
STATIC_URL=(str, "/static/"),
TRUST_X_FORWARDED_HOST=(bool, False),
SENTRY_DSN=(str, ""),
SENTRY_ENVIRONMENT=(str, "development"),
COOKIE_PREFIX=(str, "hauki"),
INTERNAL_IPS=(list, []),
INSTANCE_NAME=(str, "Hauki"),
EXTRA_INSTALLED_APPS=(list, []),
ENABLE_DJANGO_EXTENSIONS=(bool, False),
MAIL_MAILGUN_KEY=(str, ""),
MAIL_MAILGUN_DOMAIN=(str, ""),
MAIL_MAILGUN_API=(str, ""),
RESOURCE_DEFAULT_TIMEZONE=(str, None),
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = root()
# Django environ has a nasty habit of complanining at level
# WARN about env file not being preset. Here we pre-empt it.
env_file_path = os.path.join(BASE_DIR, CONFIG_FILE_NAME)
if os.path.exists(env_file_path):
# Logging configuration is not available at this point
print(f"Reading config from {env_file_path}")
environ.Env.read_env(env_file_path)
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
ADMINS = env("ADMINS")
INTERNAL_IPS = env("INTERNAL_IPS", default=(["127.0.0.1"] if DEBUG else []))
DATABASES = {"default": env.db()}
DATABASES["default"]["CONN_MAX_AGE"] = env("CONN_MAX_AGE")
if env("TEST_DATABASE_URL"):
DATABASES["default"]["TEST"] = env.db("TEST_DATABASE_URL")
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
AUTH_USER_MODEL = "users.User"
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/v1/"
LOGOUT_REDIRECT_URL = "/v1/"
RESOURCE_DEFAULT_TIMEZONE = env("RESOURCE_DEFAULT_TIMEZONE")
DJANGO_ORGHIERARCHY_DATASOURCE_MODEL = "hours.DataSource"
SYSTEM_DATA_SOURCE_ID = env("SYSTEM_DATA_SOURCE_ID")
SITE_ID = 1
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"timestamped_named": {
"format": "%(asctime)s %(name)s %(levelname)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "timestamped_named",
},
# Just for reference, not used
"blackhole": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
# Application definition
INSTALLED_APPS = [
"helusers.apps.HelusersConfig",
"modeltranslation",
"helusers.apps.HelusersAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.humanize",
"simple_history",
# disable Django’s development server static file handling
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
"django_orghierarchy",
"timezone_field",
"mptt",
# Apps within this repository
"users",
"hours",
# OpenAPI
"drf_spectacular",
] + env("EXTRA_INSTALLED_APPS")
if env("SENTRY_DSN"):
sentry_sdk.init(
dsn=env("SENTRY_DSN"),
environment=env("SENTRY_ENVIRONMENT"),
release=get_git_revision_hash(),
integrations=[DjangoIntegration()],
)
MIDDLEWARE = [
# CorsMiddleware should be placed as high as possible and above WhiteNoiseMiddleware
# in particular
"corsheaders.middleware.CorsMiddleware",
# Ditto for securitymiddleware
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"simple_history.middleware.HistoryRequestMiddleware",
]
# django-extensions is a set of developer friendly tools
if env("ENABLE_DJANGO_EXTENSIONS"):
INSTALLED_APPS.append("django_extensions")
ROOT_URLCONF = "hauki.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hauki.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
# Map language codes to the (code, name) tuples used by Django
# We want to keep the ordering in LANGUAGES configuration variable,
# thus some gyrations
language_map = {x: y for x, y in GLOBAL_LANGUAGES}
try:
LANGUAGES = tuple((lang, language_map[lang]) for lang in env("LANGUAGES"))
except KeyError as e:
raise ImproperlyConfigured(f'unknown language code "{e.args[0]}"')
LANGUAGE_CODE = env("LANGUAGES")[0]
TIME_ZONE = "Europe/Helsinki"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [root("locale")]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = env("STATIC_URL")
MEDIA_URL = env("MEDIA_URL")
STATIC_ROOT = env("STATIC_ROOT")
MEDIA_ROOT = env("MEDIA_ROOT")
# Whether to trust X-Forwarded-Host headers for all purposes
# where Django would need to make use of its own hostname
# fe. generating absolute URLs pointing to itself
# Most often used in reverse proxy setups
# https://docs.djangoproject.com/en/3.0/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = env("TRUST_X_FORWARDED_HOST")
# Specifies a header that is trusted to indicate that the request was using
# https while traversing over the Internet at large. This is used when
# a proxy terminates the TLS connection and forwards the request over
# a secure network. Specified using a tuple.
# https://docs.djangoproject.com/en/3.0/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = env("SECURE_PROXY_SSL_HEADER")
CORS_ORIGIN_ALLOW_ALL = True
CSRF_COOKIE_NAME = "%s-csrftoken" % env("COOKIE_PREFIX")
SESSION_COOKIE_NAME = "%s-sessionid" % env("COOKIE_PREFIX")
# DRF Settings
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"hours.renderers.BrowsableAPIRendererWithoutForms",
],
"DEFAULT_FILTER_BACKENDS": [
"rest_framework.filters.OrderingFilter",
"django_filters.rest_framework.DjangoFilterBackend",
],
"DEFAULT_AUTHENTICATION_CLASSES": [
"hours.authentication.HaukiSignedAuthentication",
"hours.authentication.HaukiTokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
],
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.IsAuthenticatedOrReadOnly",
],
"DEFAULT_METADATA_CLASS": "hours.metadata.TranslatedChoiceNamesMetadata",
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
# shown in the browsable API
INSTANCE_NAME = env("INSTANCE_NAME")
#
# Anymail
#
if env("MAIL_MAILGUN_KEY"):
ANYMAIL = {
"MAILGUN_API_KEY": env("MAIL_MAILGUN_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAIL_MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAIL_MAILGUN_API"),
}
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
elif not env("MAIL_MAILGUN_KEY") and DEBUG is True:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
#
# Django spectacular (OpenAPI) settings
#
SPECTACULAR_SETTINGS = {
"TITLE": "Hauki API",
"DESCRIPTION": """
API for the City of Helsinki opening hours database
# Introduction
To do.
# Authentication methods
<SecurityDefinitions />
""",
"VERSION": "0.0.1",
"EXTERNAL_DOCS": {
"description": "Hauki API in GitHub",
"url": "https://github.com/City-of-Helsinki/hauki",
},
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
local_settings_path = os.path.join(BASE_DIR, "local_settings.py")
if os.path.exists(local_settings_path):
with open(local_settings_path) as fp:
code = compile(fp.read(), local_settings_path, "exec")
# Here, we execute local code on the server. Luckily, local_settings.py and BASE_DIR
# are hard-coded above, so this cannot be used to execute any other files.
exec(code, globals(), locals()) # nosec
# Django SECRET_KEY setting, used for password reset links and such
SECRET_KEY = env("SECRET_KEY")
if not DEBUG and not SECRET_KEY:
raise Exception("In production, SECRET_KEY must be provided in the environment.")
# If a secret key was not supplied elsewhere, generate a random one and print
# a warning (logging is not configured yet?). This means that any functionality
# expecting SECRET_KEY to stay same will break upon restart. Should not be a
# problem for development.
if not SECRET_KEY:
logger.warning(
"SECRET_KEY was not defined in configuration."
" Generating a temporary key for dev."
)
import random
system_random = random.SystemRandom()
SECRET_KEY = "".join(
[
system_random.choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
for i in range(64)
]
)
|
4,481 | 04938e14f22c44437188469b53dfb05d2ecd4a5c | '''
tag名だけで変えてもいいよね??
ただし,置換するときは,「代表参照表現(参照表現)」のように,元の参照表現が分かるように配慮せよ.→何を言いたい!?
もしかして:
<mention> -> <mention representative="false"> 欲しい??「元の参照表現が分かる」とは <mention representative="true"> と区割りできればいいよね?
'''
from bs4 import BeautifulSoup, element
soup = BeautifulSoup(open("nlp.txt.xml"),"lxml")
mentions = soup.find_all('mention')
'''
<coreference>
<mention representative="true">
<sentence>1</sentence>
<start>33</start>
<end>34</end>
<head>33</head>
<text>computers</text>
</mention>
<mention>
<sentence>3</sentence>
<start>14</start>
<end>15</end>
<head>14</head>
<text>computers</text>
</mention>
</coreference>
'''
for m in mentions:
if m.has_attr('representative') == False:
print("before: =>\n{}".format(m))
# add new attribute
m["representative"] = "false"
print("after: =>\n{}".format(m))
|
4,482 | c173c4673fd716a8b88faf751639d52e9ea4ffab | '''
Copyright 2014-2015 Reubenur Rahman
All Rights Reserved
@author: reuben.13@gmail.com
'''
import XenAPI
inputs = {'xenserver_master_ip': '15.22.18.17',
'xenserver_password': 'reuben',
'xenserver_user': 'root',
'vm_name': 'SLES11SP2x64',
'target_host': 'xenserver-2'
}
"""
NB: You need a shared storage to perform this action
"""
def main():
try:
print "Aquiring session with the provided xenserver IP..."
session = XenAPI.Session('http://' + inputs['xenserver_master_ip'])
print "Trying to connect to xenserver %s ..." % inputs['xenserver_master_ip']
session.xenapi.login_with_password(inputs['xenserver_user'], inputs['xenserver_password'])
print "Connected to xenserver !"
for vm_ref in session.xenapi.VM.get_by_name_label(inputs['vm_name']):
vm_uuid = session.xenapi.VM.get_uuid(vm_ref)
vm = session.xenapi.VM.get_by_uuid(vm_uuid)
for host_ref in session.xenapi.host.get_by_name_label(inputs['target_host']):
host_uuid = session.xenapi.host.get_uuid(host_ref)
target_host = session.xenapi.host.get_by_uuid(host_uuid)
print "Migrating VM using XenMotion..."
try:
session.xenapi.VM.pool_migrate(vm, target_host, {"live": "true"})
msg = "Successfully migrated VM %s to %s" % (inputs['vm_name'], inputs['target_host'])
print msg
except Exception, e:
print e
msg = "Failed to Migrate VM %s to %s " % (inputs['vm_name'], inputs['target_host'])
print msg
except Exception, e:
print "Caught exception: %s" % str(e)
session.logout()
# Start program
if __name__ == "__main__":
main()
|
4,483 | 1ac3630e6433a2d11c716b558640cab7c559f6ba | # coding: utf8
from __future__ import unicode_literals
from nltk.tag import stanford
from .SequenceTagger import SequenceTagger
class POSTagger(SequenceTagger):
"""
>>> tagger = POSTagger(model='resources/postagger.model')
>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])
[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]
"""
class StanfordPOSTagger(stanford.StanfordPOSTagger):
"""
>>> tagger = StanfordPOSTagger(model_filename='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar')
>>> tagger.tag(['من', 'به', 'مدرسه', 'رفته_بودم', '.'])
[('من', 'PRO'), ('به', 'P'), ('مدرسه', 'N'), ('رفته_بودم', 'V'), ('.', 'PUNC')]
"""
def __init__(self, model_filename, path_to_jar, *args, **kwargs):
self._SEPARATOR = '/'
super(stanford.StanfordPOSTagger, self).__init__(model_filename=model_filename, path_to_jar=path_to_jar, *args, **kwargs)
def tag(self, tokens):
return self.tag_sents([tokens])[0]
def tag_sents(self, sentences):
refined = map(lambda s: [w.replace(' ', '_') for w in s], sentences)
return super(stanford.StanfordPOSTagger, self).tag_sents(refined)
|
4,484 | 3da82bcff0a4f91c1245892bc01e9f743ea354a8 | import sys
n=int(input().strip())
a=list(input().strip().split(' '))
H=list(input().strip().split(' '))
a = [int(i) for i in a]
m=int(H[0])
hmin=int(H[1])
hmax=int(H[2])
pos=0
found = 0
d=a[-1]-a[0]
if(d==m):
print(a[0])
elif(0<d<m):
for i in range(hmin, hmax+1):
fin1 = a[0]-i+m
if(hmin<=fin1-a[-1]<=hmax or fin1==a[-1]):
print(a[0]-i)
found = 1
break
if(found == 0):
i = 0
while(i<(n-1)):
found = 0
invalid = 0
d = a[i+1]-a[i]
print(a[i], a[i+1], d)
if(d<hmin or d>hmax):
i=i+1
continue
for j in range(i+1, n):
d = a[j]-a[j-1]
print(a[i], a[j], d)
if(d<hmin or d>hmax):
i = j-1
invalid = 1
break
if(a[j]-a[i]>m):
invalid = 1
break
if(a[j]-a[i]==m):
found = 1
invalid = 0
break
if(invalid == 1):
i = i+1
continue
if(found == 1 or (a[-1]-a[i]+hmin<=m and a[-1]-a[i]+hmax>=m)):
print(a[i])
break
i = i+1
if(n == 1):
print(a[0]+hmax-m)
|
4,485 | 3956d4cdb0a8654b6f107975ac003ce59ddd3de1 | import random
def generatePassword ():
numLowerCase = numUpperCase = numSpecialCase = numNumber = 0
password = ""
randomChars = "-|@.,?/!~#%^&*(){}[]\=*"
length = random.randint(10, 25)
while(numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or numUpperCase < 1):
password = ""
numLowerCase = numUpperCase = numSpecialCase = numNumber = 0
for i in range (length):
charType = random.randint(0, 3)
#lowercase letters
if(charType == 0):
password+= chr(random.randint(97, 121))
numLowerCase+=1
#uppercase letters
elif(charType == 1):
password+= chr(random.randint(65, 90))
numUpperCase+=1
#number letters
elif(charType == 2):
password+= chr(random.randint(48, 57))
numNumber+=1
#special characters
else:
password+= randomChars[random.randint(0, len(randomChars)-1)]
numSpecialCase+=1
return password
def main():
print(generatePassword())
main()
|
4,486 | fe3584dd858c06d66215b4a182adf87d35324975 | from pyecharts import options as opts
from pyecharts.charts import *
import pandas as pd
import namemap
from pyecharts.globals import ThemeType
#
import time
import json
import requests
from datetime import datetime
import pandas as pd
import numpy as np
def read_country_code():
"""
获取国家中英文字典
:return:
"""
country_dict = {}
for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换
country_dict[val] = key
return country_dict
def read_csv():
"""
读取数据,返回国家英文名称列表和累计确诊数列表
:return:
"""
country_dict = read_country_code()
data = pd.read_csv("2019-nCoV.csv", index_col=False)
countrys_names = list()
confirmed_count = list()
for x in range(len(data.index)):
if data['name'].iloc[x] in country_dict.keys():
countrys_names.append(country_dict[data['name'].iloc[x]])
confirmed_count.append(data['confirm'].iloc[x])
else:
print(data['name'].iloc[x])
return countrys_names, confirmed_count
def catch_data():
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
reponse = requests.get(url=url).json()
data = json.loads(reponse['data'])
return data
# 定义数据处理函数
def confirm(x):
confirm = eval(str(x))['confirm']
return confirm
def suspect(x):
suspect = eval(str(x))['suspect']
return suspect
def dead(x):
dead = eval(str(x))['dead']
return dead
def heal(x):
heal = eval(str(x))['heal']
return heal
def draw_map():
"""
china!
"""
data = catch_data()
dict_keys = data.keys()
# China
lastUpdateTime = data['lastUpdateTime']
chinaTotal = data['chinaTotal']
chinaAdd = data['chinaAdd']
#结果{'confirm': 84970, 'heal': 79963, 'dead': 4645, 'nowConfirm': 362, 'suspect': 11,
#'nowSevere': 13, 'importedCase': 1868, 'noInfect': 108}
areaTree = data['areaTree']
china_data = areaTree[0]['children']
china_list = []
for a in range(len(china_data)):
province = china_data[a]['name']
province_list = china_data[a]['children']
for b in range(len(province_list)):
city = province_list[b]['name']
total = province_list[b]['total']
today = province_list[b]['today']
china_dict = {}
china_dict['province'] = province
china_dict['city'] = city
china_dict['total'] = total
china_dict['today'] = today
china_list.append(china_dict)
china_data = pd.DataFrame(china_list)
china_data.head()
# 函数映射
china_data['confirm'] = china_data['total'].map(confirm)
china_data['suspect'] = china_data['total'].map(suspect)
china_data['dead'] = china_data['total'].map(dead)
china_data['heal'] = china_data['total'].map(heal)
china_data['addconfirm'] = china_data['today'].map(confirm)
#['addsuspect'] = china_data['today'].map(suspect)
#china_data['adddead'] = china_data['today'].map(dead)
#china_data['addheal'] = china_data['today'].map(heal)
china_data = china_data[["province","city","confirm","suspect","dead","heal","addconfirm"]]
china_data.head()
total_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小
total_pie.add("",[list(z) for z in zip(chinaTotal.keys(), chinaTotal.values())],
center=["50%", "70%"], #图的位置
radius=[50, 80]) #内外径大小
total_pie.set_global_opts(
title_opts=opts.TitleOpts(title="全国总量",subtitle=("截止"+lastUpdateTime)))
total_pie.set_series_opts(label_opts=opts.LabelOpts(formatter="{c}")) #标签格式
total_pie.render_notebook()
totaladd_pie = Pie(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width = '900px',height ='350px')) #设置主题,和画布大小
totaladd_pie.add("",[list(z) for z in zip(chinaAdd.keys(), chinaAdd.values())],
center=["50%", "50%"],
radius=[50, 80])
totaladd_pie.set_global_opts(
title_opts=opts.TitleOpts(title="昨日新增"))
totaladd_pie.set_series_opts(label_opts=opts.LabelOpts(formatter="{c}")) #标签格式
totaladd_pie.render_notebook()
area_data = china_data.groupby("province")["confirm"].sum().reset_index()
area_data.columns = ["province","confirm"]
area_map = Map(init_opts=opts.InitOpts(theme=ThemeType.WESTEROS))
area_map.add("",[list(z) for z in zip(list(area_data["province"]), list(area_data["confirm"]))], "china",is_map_symbol_show=False)
area_map.set_global_opts(title_opts=opts.TitleOpts(title="2019_nCoV中国疫情地图"),visualmap_opts=opts.VisualMapOpts(is_piecewise=True,
pieces = [
{"min": 1001 , "label": '>1000',"color": "#893448"}, #不指定 max,表示 max 为无限大
{"min": 500, "max": 1000, "label": '500-1000',"color": "#ff585e"},
{"min": 101, "max": 499, "label": '101-499',"color": "#fb8146"},
{"min": 10, "max": 100, "label": '10-100',"color": "#ffb248"},
{"min": 0, "max": 9, "label": '0-9',"color" : "#fff2d1" }]))
area_map.render_notebook()
page = Page()
page.add(total_pie)
page.add(totaladd_pie)
page.add(area_map)
"""
绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return:
"""
# 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int
# 感谢公众号的 @李康伟 同学提出
countrys_names, confirmed_count = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
# countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']
#
# confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]
c = (
Map()
.add(
"确诊人数",
[list(z) for z in zip(countrys_names, confirmed_count_list)],
is_map_symbol_show=False,
maptype="world",
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)")
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"),
visualmap_opts=opts.VisualMapOpts(max_=1700000),
)
#.render("map_world.html")
)
page.add(c)
page.render('covid-19 中国和世界数据.html')
|
4,487 | 90a220775efcc8ff9e83f1a1f011f424ddc3476d | import tensorflow as tf
from model import CabbageModel
import numpy as np
from krx import KrxCrawler
from naver_stock import StockModel as sm
from scattertest import scattertest as st
class CabbageController:
def __init__(self):
#def __init__(self, avg_temp, min_temp, max_temp, rain_fall):
#self._avg_temp = avg_temp
#self._min_temp = min_temp
#self._max_temp= max_temp
#self._rain_fall = rain_fall
self._avg_temp = 1
self._min_temp = 2
self._max_temp = 3
self._rain_fall = 4
def service(self):
#NONE -> 행 값
#4 -> 열 값
X = tf.placeholder(tf.float32, shape=[None,4])
#Y는 미래의 값이기 때문에 현재 존재할 수 가없다. Y의 값을 예측하는 것
W = tf.Variable(tf.random_normal([4,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
saver = tf.train.Saver()
#텐서 세션의 영역
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, 'cabbage/saved_model/saved.ckpt')
#매트릭스 구조
data = [[self._avg_temp, self._min_temp, self._max_temp, self._rain_fall],]
arr = np.array(data, dtype = np.float32)
dict = sess.run(tf.matmul(X,W) +b,{X: arr[0:4]})
return dict[0]
def exec(self, flag):
if flag == 'd':
url = "http://kind.krx.co.kr/disclosureSimpleSearch.do?method=disclosureSimpleSearchMain"
d = KrxCrawler(url)
d.scrap()
elif flag == 'e':
url = ''
e = sm('005930')
e.selWeb()
#e.scrap()
elif flag == 'f':
scat = st()
scat.test() |
4,488 | 5e6bbb10ec82e566c749dd4d794eabd2e8f7a648 | #!/usr/bin/env python3
import numpy as np
from DMP.PIDMP import RLDMPs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(50)
dmp_y0 = np.array([-1.52017496, 0.04908739, 1.41433029])
dmp_goal = np.array([-1.50848603, 0.0591503 , 1.44347592])
load_file_name = "w_0_2_right_3_100_1000.0_0.01_4"
#load_file_name = raw_input('file name: ')
load_file_name_list = load_file_name.split('_')
### learning ep
ep = int(load_file_name_list[1])
### pouring number of ball to the other tube
numofball = int(load_file_name_list[2])
### which arm do the pouring motion
pour_arm = load_file_name_list[3]
n_dmps = int(load_file_name_list[4])
n_bfs = int(load_file_name_list[5])
decay = float(load_file_name_list[6])
dt = float(load_file_name_list[7])
### initial DMP
rl = RLDMPs(n_dmps = n_dmps , n_bfs = n_bfs , decay = decay, y0 = dmp_y0 , goal = dmp_goal,ay=np.ones(n_dmps)*10.0,dt = dt)
rl.load_weight(load_file_name)
traj_init = rl.predict().y
track = rl.rollout()
print(rl.w)
x = np.linspace(0,1,len(traj_init[0][:,0]))
plt.scatter(x,track.y[0][:,0],c='b',label="random")
plt.scatter(x,track.y[1][:,0],c='b')
plt.scatter(x,track.y[2][:,0],c='b')
plt.scatter(x,track.y[3][:,0],c='b')
plt.scatter(x,track.y[4][:,0],c='b')
plt.scatter(x,traj_init[0][:,0],c='r',label="initial")
plt.xlabel("time(s)")
plt.ylabel("raw (rad)")
plt.legend(loc = 4)
plt.show()
plt.scatter(x,track.y[0][:,1],c='b',label="random")
plt.scatter(x,track.y[1][:,1],c='b')
plt.scatter(x,track.y[2][:,1],c='b')
plt.scatter(x,track.y[3][:,1],c='b')
plt.scatter(x,track.y[4][:,1],c='b')
plt.scatter(x,traj_init[0][:,1],c='r',label="initial")
plt.xlabel("time(s)")
plt.ylabel("yaw (rad)")
plt.legend(loc = 4)
plt.show()
plt.scatter(x,track.y[0][:,2],c='b',label="random")
plt.scatter(x,track.y[1][:,2],c='b')
plt.scatter(x,track.y[2][:,2],c='b')
plt.scatter(x,track.y[3][:,2],c='b')
plt.scatter(x,track.y[4][:,2],c='b')
plt.scatter(x,traj_init[0][:,2],c='r',label="initial")
plt.xlabel("time(s)")
plt.ylabel("pitch (rad)")
plt.legend(loc = 4)
plt.show()
|
4,489 | af80cb4d4ce5c071efc39e85f89bb412cff6bf6e | IMAGE_SIZE=(640, 480)
|
4,490 | eee60a6f46549ededfbc7b0b294ab723e2e73f7e | from .base import *
RAVEN_CONFIG = {}
ALLOWED_HOSTS = ['*']
|
4,491 | f996dffcb9650663278ec1e31d9f88d50142f4ea | class TrieNode:
def __init__(self):
self.children: Dict[str, TrieNode] = collections.defaultdict(TrieNode)
self.word: Optional[str] = None
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
m = len(board)
n = len(board[0])
ans = []
root = TrieNode()
def insert(word: str) -> None:
node = root
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.word = word
for word in words:
insert(word)
def dfs(i: int, j: int, node: TrieNode) -> None:
if i < 0 or i == m or j < 0 or j == n:
return
if board[i][j] == '*':
return
c = board[i][j]
if c not in node.children:
return
child = node.children[c]
if child.word:
ans.append(child.word)
child.word = None
board[i][j] = '*'
dfs(i + 1, j, child)
dfs(i - 1, j, child)
dfs(i, j + 1, child)
dfs(i, j - 1, child)
board[i][j] = c
for i in range(m):
for j in range(n):
dfs(i, j, root)
return ans
|
4,492 | 8fedaeb13fde117cf6b7ace23b59c26e4aab2bc2 | a = input()
b = []
ind = []
for i in a:
if i.isalpha():
b.append(i)
else:
ind.append(a.index(i))
c = list(reversed(b))
for i in ind:
c.insert(i,a[i])
print(''.join(c))
|
4,493 | b216c0f92bcf91fd538eabf0239cf149342ef2eb | from django.shortcuts import render
from django.views.generic import ListView
from auth_person.models import Post_news, User
# Create your views here.
def blog(request, foo):
inf = {'login': foo}
return render(request, 'blog/blog.html', context=inf)
class feed(ListView):
template_name = 'blog/feed.html'
model = Post_news
paginate_by = 10
def get_queryset(self):
user_name = self.kwargs['foo']
print(user_name)
return Post_news.objects.all().order_by('-date_post').filter(user__login=user_name)
def get_context_data(self, *, object_list=None, **kwargs):
|
4,494 | 9b715fb95e89804a57ea77a98face673b57220c6 | import socket
import struct
def parsing_ethernet_header(data):
ethernet_header=struct.unpack("!6c6c2s",data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header="0x"+ethernet_header[12].hex()
print("=========ethernet header==========")
print("src_mac_address:", ether_src)
print("dest_mac_address:",ether_dest)
print("ip_version",ip_header)
def convert_ethernet_address(data):
ethernet_addr =list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr=":".join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header=struct.unpack("!1c1c2s2s2s1c1c2s4c4c",data)
print("============ip header=============")
ip_ver_len= int(ip_header[0].hex(), 16)
print("ip_version:",ip_ver_len // 16)
print("ip_length:", ip_ver_len % 16)
differ_expli=int(ip_header[1].hex(),16)
print("differentiated_service_codepoint:",differ_expli//16)
print("explicit_congestion_notification:",differ_expli%16)
total_length=int(ip_header[2].hex(),16)
print("total_length:",total_length)
identification=ip_header[3].hex()
print("identification:0x",identification)
flags=ip_header[4].hex()
print("flags:0x",flags)
flags_int=int(ip_header[4].hex(),16)
print(">>>reserved_bit:",flags_int>>15)
print(">>>fragments:",(flags_int>>13)& 0x0001)
print(">>>fragments_offset:",flags_int & 0x1fff)
time_to_live=int(ip_header[5].hex(),16)
print("Time to live:",time_to_live)
protocol=ip_header[6].hex()
print("protocol:0x",protocol)
header_check=ip_header[7].hex()
print("header checksum:0x",header_check)
source_addr=convert_ip_address(ip_header[8:12])
print("source_ip_address:",source_addr)
dest_addr=convert_ip_address(ip_header[12:16])
print("dest_ip_address:",dest_addr)
def ch_UDP_TCP(data):
temp=struct.unpack("1c",data)
result=int(temp[0].hex(),16)
return result
def convert_ip_address(data):
ip_addr=list()
for i in data:
ip_addr.append(str(int(i.hex(),16)) )
ip_addr=".".join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print("=============tcp header==============")
TCP_header=struct.unpack("!2s2s1I1I2s2s2s2s",data)
src_port=int(TCP_header[0].hex(),16)
print("src_port:",src_port)
dec_port=int(TCP_header[1].hex(),16)
print("dec_port:",dec_port)
seq_num=TCP_header[2]
print("seq_num:",seq_num)
ack_num=TCP_header[3]
print("ack_num:",ack_num)
header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f
print("header_len:",header_len)
flags=int(TCP_header[4].hex(),16)&0x0fff
print("flags:",flags)
reserved=flags>>9
print(">>>reserved",reserved)
nonce=(flags>>8)&0x001
print(">>>nonce:",nonce)
cwr=(flags>>7)&0x001
print(">>>cwr:",cwr)
urgent=(flags>>5)&0x001
print(">>>urgent:",urgent)
ack=(flags>>4)&0x001
print(">>>ack:",ack)
push=(flags>>3)&0x001
print(">>>push:",push)
reset=(flags>>2)&0x001
print(">>>reset:",reset)
syn=(flags>>1)&0x001
print(">>>syn:",syn)
fin=flags&0x001
print(">>>fin:",fin)
window_size=int(TCP_header[5].hex(),16)
print("Window_size_value:",window_size)
checksum=int(TCP_header[6].hex(),16)
print("checksum:",checksum)
urgent_pointer=int(TCP_header[7].hex(),16)
print("urgent_pointer:",urgent_pointer)
def parsing_UDP_header(data):
UDP_header=struct.unpack("2s2s2s2s",data)
print("=============udp_header=============")
src_port=int(UDP_header[0].hex(),16)
print("src_port:",src_port)
dst_port=int(UDP_header[1].hex(),16)
print("dst_port:",dst_port)
leng=int(UDP_header[2].hex(),16)
print("leng:",leng)
header_checksum=UDP_header[3].hex()
print("header_checksum:0x",header_checksum)
recv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))
print("<<<<<<Packet Capture Start>>>>>>>")
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag =ch_UDP_TCP(data[0][23:24])
if flag==6:
parsing_TCP_header(data[0][34:54])
elif flag==17:
parsing_UDP_header(data[0][34:42])
|
4,495 | 7a243f5e24d81d3395cc790dface5e795b9c04e6 | """Distribution script for unitreport."""
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="unitreport",
version="0.1.1",
author="annahadji",
author_email="annahadji@users.noreply.github.com",
description="A small unittest-based tool for generating single page html reports in Python.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="static unittest report generator Markdown plots tables",
url="https://github.com/annahadji/unitreport",
packages=["unitreport"],
package_data={"unitreport": ["templates/**"]},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering :: Visualization",
],
python_requires=">=3.6",
install_requires=["jinja2", "markdown", "matplotlib"],
)
|
4,496 | b63ed9e09b9e8c539aff765d719f3610283663fe | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from .utils import parse_query_parameters
class CollectionMixin(with_metaclass(ABCMeta, object)):
@abstractmethod
def list(self, size=100, offset=None, **filter_fields):
"""
:param size: A limit on the number of objects to be returned.
:type size: int
:param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.
:type offset: uuid.UUID
:param filter_fields: Dictionary containing values to filter for
:type filter_fields: dict
:rtype: dict
:return: Dictionary containing dictionaries
"""
def iterate(self, window_size=10, **filter_fields):
current_offset = None
while True:
response = self.list(size=window_size, offset=current_offset, **filter_fields)
for item in response['data']:
yield item
next_url = response.get('next', None)
if next_url is None:
return
current_offset = parse_query_parameters(next_url).get('offset')[0]
|
4,497 | ba13bcf9e89ae96e9a66a42fc4e6ae4ad33c84b4 | import mclient
from mclient import instruments
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
#from pulseseq import sequencer, pulselib
mpl.rcParams['figure.figsize']=[6,4]
qubit_info = mclient.get_qubit_info('qubit_info')
qubit_ef_info = mclient.get_qubit_info('qubit_ef_info')
vspec = instruments['vspec']
awg1 = instruments['AWG1']
qubit_brick = instruments['qubit_brick']
qubit_ef_brick = instruments['qubit_ef_brick']
va_lo = instruments['va_lo']
funcgen = instruments['funcgen']
alazar = instruments['alazar']
spec_brick = instruments['spec_brick']
spec_info = mclient.get_qubit_info('spec_info')
cavity_info = mclient.get_qubit_info('cavity_info')
field = 0.0
temp = 'cd'
#voltage = laser_info.get_DCOffset()
################################################################################################################################################
from scripts.single_qubit import T1measurement, T2measurement
# from scripts.single_qubit import T1measurement_QP, T2measurement_QP
# from scripts.single_qubit import FT1measurement, EFT2measurement, GFT2measurement
# from scripts.single_qubit import efrabi
# from scripts.single_qubit import efrabi_QP
# from scripts.single_qubit import QPdecay
from scripts.single_qubit import rabi
def try_twice(func, N=2, **kwargs):
for i in range(N):
try:
return func(**kwargs)
except Exception, e:
print 'Error %s' % (e,)
pass
print 'Failed to do %s %s times...' % (func, N)
# work in progress. For looping over multiple qubits
# def T1T2Loop(qubit_params):
# # from scripts.single_qubit.t1t2_plotting import do_T1_plot, do_T2_plot, do_T2echo_plot
# T1s={}
# T2s={}
# T2Es={}
# rep_rates = [500]
# for qubit in enumerate(qubit_params)
# T1s[qubit] = {'t1s':[], 't1s_err':[], 'ofs':[], 'ofs_err':[], 'amps':[], 'amps_err':[],}
# T2s[qubit] = {'t2s':[], 't2s_err':[], 't2freqs':[], 't2freqs_err':[], 'amps':[], 'amps_err':[], 't22s':[], 't22s_err':[], 't22freqs':[], 't22freqs_err':[], 'amp2s':[], 'amp2s_err':[],}
# T2Es[qubit] = {'t2es':[], 't2es_err':[]}
# for i in range(1000): #set number of repetitions.
# for qubit, params in enumerate(qubit_params)
# qubit_info = params[1]
# qubit_freq = params[2]
# if 1:
# for rep_rate in rep_rates:
# funcgen.set_frequency(rep_rate)
# do_T1_plot(qubit_info, 500, np.concatenate((np.linspace(0, 10e3, 21), np.linspace(11e3, 60e3, 50))), T1s[qubit_info], 300*(qubit_ind+1))
# do_T2_plot(qubit_info, 500, np.linspace(0, 10e3, 101), 1000e3, T2s[qubit_info], 301*(qubit_ind+1), double_freq=False)
# do_T2echo_plot(qubit_info, 500, np.linspace(1e3, 20e3, 101), 500e3, T2Es[qubit_info], 302*(qubit_ind+1))
def do_ROspec_plot(qubit_info, n_avg, freqs, ro_powers, ro_fits, fig_num, var=None):
from scripts.single_cavity import rocavspectroscopy
alazar.set_naverages(n_avg)
rospec = rocavspectroscopy.ROCavSpectroscopy(qubit_info, ro_powers, freqs) #qubit_pulse=np.pi/2
rospec.measure()
plt.close()
ro_fits['x0s'].append(rospec.fit_params[0][2])
ro_fits['x0s_err'].append(rospec.fit_params[1][2])
ro_fits['As'].append(rospec.fit_params[0][1])
ro_fits['As_err'].append(rospec.fit_params[1][1])
ro_fits['ws'].append(rospec.fit_params[0][3])
ro_fits['ws_err'].append(rospec.fit_params[1][3])
if var!=None:
ro_fits['vars'].append(var)
plt.figure(fig_num)
plt.clf()
if ro_fits['vars']==[]:
plt.subplot(311).axis(xmin=-len(ro_fits['x0s'])*0.10, xmax=len(ro_fits['x0s'])*1.10)
plt.errorbar(range(len(ro_fits['x0s'])),ro_fits['x0s'],ro_fits['x0s_err'],fmt='go')
else:
xmin=min(ro_fits['vars'])
xmax=max(ro_fits['vars'])
plt.subplot(311).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(ro_fits['vars'],ro_fits['x0s'],ro_fits['x0s_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Center frequency(MHz)")
if ro_fits['vars']==[]:
plt.subplot(312).axis(xmin=-len(ro_fits['As'])*0.10, xmax=len(ro_fits['As'])*1.10)
plt.errorbar(range(len(ro_fits['As'])),ro_fits['As'],ro_fits['As_err'],fmt='go')
else:
xmin=min(ro_fits['vars'])
xmax=max(ro_fits['vars'])
plt.subplot(312).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(ro_fits['vars'],ro_fits['As'],ro_fits['As_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Amplitude")
if ro_fits['vars']==[]:
plt.subplot(313).axis(xmin=-len(ro_fits['ws'])*0.10, xmax=len(ro_fits['ws'])*1.10)
plt.errorbar(range(len(ro_fits['ws'])),ro_fits['ws'],ro_fits['ws_err'],fmt='go')
else:
xmin=min(ro_fits['vars'])
xmax=max(ro_fits['vars'])
plt.subplot(313).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(ro_fits['vars'],ro_fits['ws'],ro_fits['ws_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Width")
return rospec
def do_spec_plot(qubit_info, n_avg, freqs, spec_params, spec_fits, fig_num, plen=50000, amp=0.01,var=None):
from scripts.single_qubit import spectroscopy as spectroscopy
alazar.set_naverages(n_avg)
s = spectroscopy.Spectroscopy(qubit_info, freqs, spec_params,
plen, amp, plot_seqs=False,subtraction = False) #1=1ns5
s.measure()
plt.close()
spec_fits['x0s'].append(s.fit_params['x0'].value)
spec_fits['x0s_err'].append(s.fit_params['x0'].stderr)
spec_fits['ofs'].append(s.fit_params['ofs'].value)
spec_fits['ofs_err'].append(s.fit_params['ofs'].stderr)
spec_fits['ws'].append(s.fit_params['w'].value)
spec_fits['ws_err'].append(s.fit_params['w'].stderr)
if var!=None:
spec_fits['vars'].append(var)
plt.figure(fig_num)
plt.clf()
if spec_fits['vars']==[]:
plt.subplot(311).axis(xmin=-len(spec_fits['x0s'])*0.10, xmax=len(spec_fits['x0s'])*1.10)
plt.errorbar(range(len(spec_fits['x0s'])),spec_fits['x0s'],spec_fits['x0s_err'],fmt='go')
else:
xmin=min(spec_fits['vars'])
xmax=max(spec_fits['vars'])
plt.subplot(311).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(spec_fits['vars'],spec_fits['x0s'],spec_fits['x0s_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Center frequency(MHz)")
if spec_fits['vars']==[]:
plt.subplot(312).axis(xmin=-len(spec_fits['ofs'])*0.10, xmax=len(spec_fits['ofs'])*1.10)
plt.errorbar(range(len(spec_fits['ofs'])),spec_fits['ofs'],spec_fits['ofs_err'],fmt='go')
else:
xmin=min(spec_fits['vars'])
xmax=max(spec_fits['vars'])
plt.subplot(312).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(spec_fits['vars'],spec_fits['ofs'],spec_fits['ofs_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Offset")
if spec_fits['vars']==[]:
plt.subplot(313).axis(xmin=-len(spec_fits['ws'])*0.10, xmax=len(spec_fits['ws'])*1.10)
plt.errorbar(range(len(spec_fits['ws'])),spec_fits['ws'],spec_fits['ws_err'],fmt='go')
else:
xmin=min(spec_fits['vars'])
xmax=max(spec_fits['vars'])
plt.subplot(313).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(spec_fits['vars'],spec_fits['ws'],spec_fits['ws_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Width")
return s
def do_T1(qubit_info, delays, double_exp = False):
from scripts.single_qubit import T1measurement
t1 = T1measurement.T1Measurement(qubit_info, delays)
t1.data.set_attrs(field_current=field)
t1.data.set_attrs(temperature=temp)
# t1.data.set_attrs(laser_power=voltage)
t1.measure()
plt.close()
return t1
def do_T1_plot(qubit_info, n_avg, delays, t1_fits, fig_num, double_exp = False, var=None):
alazar.set_naverages(n_avg)
t1 = do_T1(qubit_info, delays)
t1_fits['t1s'].append(t1.fit_params['tau'].value)
t1_fits['t1s_err'].append(t1.fit_params['tau'].stderr)
t1_fits['ofs'].append(t1.fit_params['ofs'].value)
t1_fits['ofs_err'].append(t1.fit_params['ofs'].stderr)
t1_fits['amps'].append(t1.fit_params['A'].value)
t1_fits['amps_err'].append(t1.fit_params['A'].stderr)
if var!=None:
t1_fits['vars'].append(var)
plt.figure(fig_num)
plt.clf()
if t1_fits['vars']==[]:
plt.subplot(211).axis(xmin=-len(t1_fits['t1s'])*0.10, xmax=len(t1_fits['t1s'])*1.10)
plt.errorbar(range(len(t1_fits['t1s'])),t1_fits['t1s'],t1_fits['t1s_err'],fmt='go')
else:
xmin=min(t1_fits['vars'])
xmax=max(t1_fits['vars'])
plt.subplot(211).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(t1_fits['vars'],t1_fits['t1s'],t1_fits['t1s_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("T1(us)")
if t1_fits['vars']==[]:
plt.subplot(212).axis(xmin=-len(t1_fits['t1s'])*0.10, xmax=len(t1_fits['t1s'])*1.10)
plt.errorbar(range(len(t1_fits['amps'])),t1_fits['amps'],t1_fits['amps_err'],fmt='go')
else:
xmin=min(t1_fits['vars'])
xmax=max(t1_fits['vars'])
plt.subplot(212).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(t1_fits['vars'],t1_fits['amps'],t1_fits['amps_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Amplitude")
def do_T1_phonon(qubit_info, delays, amp, piLength, sigma = 10):
from scripts.single_qubit import stark_swap
t1 = stark_swap.phonon_T1(qubit_info,
delays, phonon_pi = piLength, amp = amp,
sigma = sigma,
)
t1.measure()
plt.close()
return t1
def do_T1_phonon_plot(qubit_info, n_avg, delays, amp, piLength, t1_fits, fig_num, sigma = 10, var=None):
alazar.set_naverages(n_avg)
t1 = do_T1_phonon(qubit_info, delays, amp, piLength, sigma)
t1_fits['t1s'].append(t1.fit_params['tau'].value)
t1_fits['t1s_err'].append(t1.fit_params['tau'].stderr)
t1_fits['ofs'].append(t1.fit_params['ofs'].value)
t1_fits['ofs_err'].append(t1.fit_params['ofs'].stderr)
t1_fits['amps'].append(t1.fit_params['A'].value)
t1_fits['amps_err'].append(t1.fit_params['A'].stderr)
if var!=None:
t1_fits['vars'].append(var)
plt.figure(fig_num)
plt.clf()
if t1_fits['vars']==[]:
plt.subplot(211).axis(xmin=-len(t1_fits['t1s'])*0.10, xmax=len(t1_fits['t1s'])*1.10)
plt.errorbar(range(len(t1_fits['t1s'])),t1_fits['t1s'],t1_fits['t1s_err'],fmt='go')
else:
xmin=min(t1_fits['vars'])
xmax=max(t1_fits['vars'])
plt.subplot(211).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(t1_fits['vars'],t1_fits['t1s'],t1_fits['t1s_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("T1(us)")
if t1_fits['vars']==[]:
plt.subplot(212).axis(xmin=-len(t1_fits['t1s'])*0.10, xmax=len(t1_fits['t1s'])*1.10)
plt.errorbar(range(len(t1_fits['amps'])),t1_fits['amps'],t1_fits['amps_err'],fmt='go')
else:
xmin=min(t1_fits['vars'])
xmax=max(t1_fits['vars'])
plt.subplot(212).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax))
plt.errorbar(t1_fits['vars'],t1_fits['amps'],t1_fits['amps_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Amplitude")
def do_T2(qubit_info, delays, detune, fix_freq=None, fit_type='exp_decay_sine',):
from scripts.single_qubit import T2measurement
t2 = T2measurement.T2Measurement(qubit_info, delays, detune=detune, fix_freq = fix_freq, fit_type = fit_type)
t2.data.set_attrs(field_current=field)
t2.data.set_attrs(temperature=temp)
# t2.data.set_attrs(laser_power=voltage)
t2.measure()
plt.close()
return t2
def do_T2_plot(qubit_info, n_avg, delays, detune, t2_fits, fig_num, fix_freq=None, fit_type='exp_decay_sine', var=None):
alazar.set_naverages(n_avg)
t2 = do_T2(qubit_info, delays, detune, fix_freq, fit_type)
if (t2!=None):
t2_fits['t2s'].append(t2.fit_params['tau'].value)
t2_fits['t2s_err'].append(t2.fit_params['tau'].stderr)
t2_fits['t2freqs'].append(t2.fit_params['f'].value*1000 - detune/1e6)
t2_fits['t2freqs_err'].append(t2.fit_params['f'].stderr*1000.0)
t2_fits['amps'].append(t2.fit_params['A'].value)
t2_fits['amps_err'].append(t2.fit_params['A'].stderr)
# if double_freq == True:
# t2_fits['t22s'].append(t2.fit_params['tau2'].value)
# t2_fits['t22s_err'].append(t2.fit_params['tau2'].stderr)
# t2_fits['t22freqs'].append(t2.fit_params['freq2'].value*1000 -detune/1e6)
# t2_fits['t22freqs_err'].append(t2.fit_params['freq2'].stderr*1000.0)
# t2_fits['amp2s'].append(t2.fit_params['amp2'].value)
# t2_fits['amp2s_err'].append(t2.fit_params['amp2'].stderr)
if var!=None:
t2_fits['vars'].append(var)
if fit_type == 'exp_decay_sine':
plt.figure(fig_num)
plt.clf()
if t2_fits['vars']==[]:
plt.subplot(211).axis(xmin=-len(t2_fits['t2s'])*0.10, xmax=len(t2_fits['t2s'])*1.10, ymin= min(t2_fits['t2s'])*0.7, ymax=max(t2_fits['t2s'])*1.3)
plt.errorbar(range(len(t2_fits['t2s'])),t2_fits['t2s'],t2_fits['t2s_err'],fmt='rs')
else:
xmin=min(t2_fits['vars'])
xmax=max(t2_fits['vars'])
plt.subplot(211).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax), ymin= min(t2_fits['t2s'])*0.7, ymax=max(t2_fits['t2s'])*1.3)
plt.errorbar(t2_fits['vars'],t2_fits['t2s'],t2_fits['t2s_err'],fmt='rs')
plt.xlabel("Measurement iterations")
plt.ylabel("T2(us)")
if t2_fits['vars']==[]:
plt.subplot(212).axis(xmin=-len(t2_fits['t2freqs'])*0.10, xmax=len(t2_fits['t2freqs'])*1.10, ymin=min(t2_fits['t2freqs'])-0.02, ymax=max(t2_fits['t2freqs'])+0.02)
plt.errorbar(range(len(t2_fits['t2freqs'])),t2_fits['t2freqs'],t2_fits['t2freqs_err'],fmt='b^')
else:
xmin=min(t2_fits['vars'])
xmax=max(t2_fits['vars'])
plt.subplot(212).axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax), ymin=min(t2_fits['t2freqs'])-0.02, ymax=max(t2_fits['t2freqs'])+0.02)
plt.errorbar(t2_fits['vars'],t2_fits['t2freqs'],t2_fits['t2freqs_err'],fmt='b^')
plt.xlabel("Measurement iterations")
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
# if fit_type == 'exp_decay_sine':
# plt.figure(fig_num)
# plt.clf()
# plt.subplot(311).axis(xmin=-len(t2_fits['t2s'])*0.10, xmax=len(t2_fits['t2s'])*1.10, ymin= min(t2_fits['t2s'])*0.7, ymax=max(t2_fits['t22s'])*1.3)
# plt.errorbar(range(len(t2_fits['t2s'])),t2_fits['t2s'],t2_fits['t2s_err'],fmt='rs')
# plt.errorbar(range(len(t2_fits['t22s'])),t2_fits['t22s'],t2_fits['t22s_err'],fmt='b^')
# plt.ylabel("T2(us)")
# plt.subplot(312).axis(xmin=-len(t2_fits['t2freqs'])*0.10, xmax=len(t2_fits['t2freqs'])*1.10,ymin= min(min(t2_fits['t2freqs']),min(t2_fits['t22freqs']))-0.02, ymax=max(max(t2_fits['t2freqs']), max(t2_fits['t22freqs']))+0.02)
# plt.errorbar(range(len(t2_fits['t2freqs'])),t2_fits['t2freqs'],t2_fits['t2freqs_err'],fmt='rs')
# plt.errorbar(range(len(t2_fits['t22freqs'])),t2_fits['t22freqs'],t2_fits['t22freqs_err'],fmt='b^')
# plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
# plt.subplot(313).axis(xmin=-len(t2_fits['amps'])*0.10, xmax=len(t2_fits['amps'])*1.10,ymin= min(t2_fits['amp2s'])*0.8, ymax=max(t2_fits['amps'])*1.2)
# plt.errorbar(range(len(t2_fits['amps'])),t2_fits['amps'],t2_fits['amps_err'],fmt='rs')
# plt.errorbar(range(len(t2_fits['amp2s'])),t2_fits['amp2s'],t2_fits['amp2s_err'],fmt='b^')
# plt.xlabel("Measurement iterations")
# plt.ylabel("Amplitudes (AU)")
# plt.semilogy()
def do_T2echo(qubit_info, delays, detune, fix_freq=None, fit_type='exp_decay_sine'):
# t2e = T2measurement.T2Measurement(qubit_info, delays, detune, echotype=T2measurement.ECHO_HAHN, title='T2 Echo')
from scripts.single_qubit import T2measurement
t2e = T2measurement.T2Measurement(qubit_info, delays, detune, echotype=T2measurement.ECHO_CPMG, fix_freq = fix_freq, fit_type = fit_type, title='T2 Echo')
t2e.data.set_attrs(field_current=field)
t2e.data.set_attrs(temperature=temp)
# t2e.data.set_attrs(laser_power=voltage)
t2e.measure()
plt.close()
return t2e
def do_T2echo_plot(qubit_info, n_avg, delays, detune, t2E_fits, fig_num, fix_freq=None, fit_type='exp_decay_sine', var=None):
alazar.set_naverages(n_avg)
t2e = do_T2echo(qubit_info, delays, detune, fix_freq, fit_type)
if fit_type == 'gaussian_decay':
tname = 'sigma'
else:
tname = 'tau'
if t2e!=None:
t2E_fits['t2es'].append(t2e.fit_params[tname].value)
t2E_fits['t2es_err'].append(t2e.fit_params[tname].stderr)
if var!=None:
t2E_fits['vars'].append(var)
plt.figure(fig_num)
plt.clf()
if t2E_fits['vars']==[]:
plt.axis(xmin=-len(t2E_fits['t2es'])*0.10, xmax=len(t2E_fits['t2es'])*1.10, ymin= min(t2E_fits['t2es'])*0.8, ymax=max(t2E_fits['t2es'])*1.2)
plt.errorbar(range(len(t2E_fits['t2es'])),t2E_fits['t2es'],t2E_fits['t2es_err'],fmt='mv') # magenta color and v-shape markers
else:
xmin=min(t2E_fits['vars'])
xmax=max(t2E_fits['vars'])
plt.axis(xmin=xmin-0.1*abs(xmin), xmax=xmax+0.1*abs(xmax), ymin= min(t2E_fits['t2es'])*0.8, ymax=max(t2E_fits['t2es'])*1.2)
plt.errorbar(t2E_fits['vars'],t2E_fits['t2es'],t2E_fits['t2es_err'],fmt='mv') # magenta color and v-shape markers
plt.xlabel("Measurement iterations")
plt.ylabel("T2Echo(us)")
def smart_T1_delays(T1_int=90e3, QPT1=1.5e6, half_decay_point=1e6, eff_T1_delay=800.0, probe_point=0.5, meas_per_QPinj=30, meas_per_reptime=5):
"""
T1_int = 90e3 # Intrinsic T1 of the qubit
QPT1 = 1.5e6 # Guess the lifetime of the quasiparticles
half_decay_point = 1e6 # The QP_delay time that would make qubit relax halfway to ground state with T1_delay=0, i.e. relax during readout pulse
eff_T1_delay = 800.0 # The effective T1_delay due to the finite length of the readout pulse, usually taken as readout pulse length/2
"""
# rep_time = 1.0e9/fg.get_frequency()
# T1_QPref = 1/(np.log(2)/eff_T1_delay-1/T1_int) # T1 at half decay point = effective readout delay/ln(2), excluding intrinsic part giving the T1 due to quasiparticles
# n_delayless = int(half_decay_point/rep_time) # Number of points with T1_delay = 0
#
## QP_times_s = np.linspace(rep_time, half_decay_point, n_delayless)
# T1_delays_s = np.linspace(0, 0, n_delayless)
# QP_times_l = np.linspace(half_decay_point+rep_time, meas_per_QPinj*rep_time, meas_per_QPinj-n_delayless)
# T1_delays_l = np.log(2)/(1/T1_int+1/T1_QPref*np.exp(-(QP_times_l-half_decay_point)/QPT1))-eff_T1_delay
## QP_times = np.concatenate((QP_times_s, QP_times_l))
# T1_delays = np.concatenate((T1_delays_s, T1_delays_l))
rep_time = 1.0e9/fg.get_frequency()
n_points = meas_per_QPinj * meas_per_reptime
step_time = rep_time / meas_per_reptime
T1_QPref = 1/(np.log(2)/eff_T1_delay-1/T1_int) # T1 at half decay point = effective readout delay/ln(2), excluding intrinsic part giving the T1 due to quasiparticles
QP_times = np.linspace(0, (n_points-1)*step_time, n_points)
T1_est = 1/(1/T1_int+1/T1_QPref*np.exp(-(QP_times-half_decay_point)/QPT1))
T1_delays = -np.log(probe_point)*T1_est-eff_T1_delay
for j, delay in enumerate(T1_delays):
if delay < 0:
T1_delays[j]=0.0
return T1_delays
def do_QPdecay(qubit_info, T1_delay, **kwargs):
rep_time = 1e9/fg.get_frequency()
qpd = QPdecay.QPdecay(qubit_info, T1_delay, rep_time, **kwargs)
qpd.data.set_attrs(field_current=field)
qpd.data.set_attrs(temperature=temp)
# qpd.data.set_attrs(T1_delay=T1_delay)
qpd.data.set_attrs(inj_power=ag3.get_power())
# qpd.data.set_attrs(laser_voltage=laser_info.get_DCOffset())
# qpd.measure()
# plt.close()
return qpd
def do_QPdecay_plot(qubit_info, n_avg, T1_delay, qpd_fits, fig_num, **kwargs):
alz.set_naverages(n_avg)
ag3.set_rf_on(True)
qpd = do_QPdecay(qubit_info, T1_delay, **kwargs)
qpd.measure()
plt.close()
if qpd!=None:
qpd_fits['qpt1s'].append(qpd.fit_params['tau'].value/1000.0)
qpd_fits['qpt1s_err'].append(qpd.fit_params['tau'].stderr/1000.0)
qpd_fits['qpofs'].append(qpd.fit_params['ofs'].value)
qpd_fits['qpofs_err'].append(qpd.fit_params['ofs'].stderr)
# qpd_fits['amps'].append(qpd.fit_params['amplitude'].value)
qpofs_array = np.array(qpd_fits['qpofs'])
qpofs_err_array = np.array(qpd_fits['qpofs_err'])
plt.figure(fig_num)
plt.clf()
plt.subplot(211).axis(xmin=-len(qpd_fits['qpt1s'])*0.10, xmax=len(qpd_fits['qpt1s'])*1.10)#, ymin=0, ymax=1)
plt.errorbar(range(len(qpd_fits['qpt1s'])),qpd_fits['qpt1s'],qpd_fits['qpt1s_err'],fmt='go')
plt.ylabel("Tau QP(ms)")
plt.subplot(212).axis(xmin=-len(np.array(qpd_fits['qpofs']))*0.10, xmax=len(np.array(qpd_fits['qpofs']))*1.10)#, ymin=10, ymax=30)
plt.errorbar(range(len(qpofs_array)), 1/qpofs_array, qpofs_err_array/qpofs_array/qpofs_array, fmt='b^')
plt.xlabel("Measurement iterations")
plt.ylabel("Qubit T1-floor(us)")
ag3.set_rf_on(False)
return qpd
def do_FT1(qubit_info, ef_info, delays):
ft1 = FT1measurement.FT1Measurement(qubit_info, ef_info, delays)
ft1.data.set_attrs(field_current=field)
ft1.data.set_attrs(temperature=temp)
ft1.measure()
plt.close()
return ft1
def do_FT1_plot(qubit_info, ef_info, n_avg, delays, ft1_fits, fig_num):
alz.set_naverages(n_avg)
brick1.set_rf_on(True)
ft1 = do_FT1(qubit_info, ef_info, delays)
if ft1!=None:
ft1_fits['ft1s'].append(ft1.fit_params['tau'].value/1000.0)
ft1_fits['ft1s_err'].append(ft1.fit_params['tau'].stderr/1000.0)
ft1_fits['ofs'].append(ft1.fit_params['ofs'].value)
ft1_fits['amps'].append(ft1.fit_params['amplitude'].value)
plt.figure(fig_num)
plt.clf()
plt.axis(xmin=-len(ft1_fits['ft1s'])*0.10, xmax=len(ft1_fits['ft1s'])*1.10, ymin= min(ft1_fits['ft1s'])*0.8, ymax=max(ft1_fits['ft1s'])*1.2)
plt.errorbar(range(len(ft1_fits['ft1s'])),ft1_fits['ft1s'],ft1_fits['ft1s_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("FT1(us)")
brick1.set_rf_on(False)
def do_EFT2(qubit_info, ef_info, delays, detune, double_freq=False, QP_injection_delay=None, QP_injection_length=10e3):
eft2 = EFT2measurement.EFT2Measurement(qubit_info, ef_info, delays, detune=detune, double_freq=double_freq)
eft2.data.set_attrs(field_current=field)
eft2.data.set_attrs(temperature=temp)
eft2.measure()
plt.close()
return eft2
def do_EFT2_plot(qubit_info, ef_info, n_avg, delays, detune, ft2_fits, fig_num, double_freq=False, QP_injection_delay=None, QP_injection_length=10e3, laser_power = None):
alz.set_naverages(n_avg)
brick1.set_rf_on(True)
eft2 = do_EFT2(qubit_info, ef_info, delays, detune, double_freq, QP_injection_delay, QP_injection_length)
if (eft2!=None):
ft2_fits['eft2s'].append(eft2.fit_params['tau'].value/1000)
ft2_fits['eft2s_err'].append(eft2.fit_params['tau'].stderr/1000.0)
ft2_fits['eft2freqs'].append(eft2.fit_params['freq'].value*1000 - detune/1e6)
ft2_fits['eft2freqs_err'].append(eft2.fit_params['freq'].stderr*1000.0)
ft2_fits['eft2amps'].append(eft2.fit_params['amp'].value)
ft2_fits['eft2amps_err'].append(eft2.fit_params['amp'].stderr)
if double_freq == True:
ft2_fits['eft22s'].append(eft2.fit_params['tau2'].value/1000)
ft2_fits['eft22s_err'].append(eft2.fit_params['tau2'].stderr/1000.0)
ft2_fits['eft22freqs'].append(eft2.fit_params['freq2'].value*1000 -detune/1e6)
ft2_fits['eft22freqs_err'].append(eft2.fit_params['freq2'].stderr*1000.0)
ft2_fits['eft2amp2s'].append(eft2.fit_params['amp2'].value)
ft2_fits['eft2amp2s_err'].append(eft2.fit_params['amp2'].stderr)
if QP_injection_delay is not None:
ft2_fits['eft2s_QP'].append(eft2.fit_params['tau'].value/1000)
ft2_fits['eft2s_QP_err'].append(eft2.fit_params['tau'].stderr/1000.0)
ft2_fits['eft2freqs_QP'].append(eft2.fit_params['freq'].value*1000 -detune/1e6)
ft2_fits['eft2freqs_QP_err'].append(eft2.fit_params['freq'].stderr*1000.0)
if double_freq == False and QP_injection_delay is None:
plt.figure(fig_num)
plt.clf()
plt.subplot(211).axis(xmin=-len(ft2_fits['eft2s'])*0.10, xmax=len(ft2_fits['eft2s'])*1.10, ymin= min(ft2_fits['eft2s'])*0.7, ymax=max(ft2_fits['eft2s'])*1.3)
plt.errorbar(range(len(ft2_fits['eft2s'])),ft2_fits['eft2s'],ft2_fits['eft2s_err'],fmt='rs')
plt.ylabel("EFT2(us)")
plt.subplot(212).axis(xmin=-len(ft2_fits['eft2freqs'])*0.10, xmax=len(ft2_fits['eft2freqs'])*1.10, ymin=min(ft2_fits['eft2freqs'])-0.02, ymax=max(ft2_fits['eft2freqs'])+0.02)
plt.errorbar(range(len(ft2_fits['eft2freqs'])),ft2_fits['eft2freqs'],ft2_fits['eft2freqs_err'],fmt='b^')
plt.xlabel("Measurement iterations")
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
if double_freq == False and QP_injection_delay is not None:
plt.figure(fig_num)
plt.clf()
plt.subplot(211).axis(xmin=-len(ft2_fits['eft2s_QP'])*0.10, xmax=len(ft2_fits['eft2s_QP'])*1.10, ymin= min(ft2_fits['eft2s_QP'])*0.7, ymax=max(ft2_fits['eft2s_QP'])*1.3)
plt.errorbar(range(len(ft2_fits['eft2s_QP'])),ft2_fits['eft2s_QP'],ft2_fits['eft2s_QP_err'],fmt='rs')
plt.ylabel("EFT2 with QP injection (us)")
plt.subplot(212).axis(xmin=-len(ft2_fits['eft2freqs_QP'])*0.10, xmax=len(ft2_fits['eft2freqs_QP'])*1.10, ymin=min(ft2_fits['eft2freqs_QP'])-0.02, ymax=max(ft2_fits['eft2freqs_QP'])+0.02)
plt.errorbar(range(len(ft2_fits['eft2freqs_QP'])),ft2_fits['eft2freqs_QP'],ft2_fits['eft2freqs_QP_err'],fmt='b^')
plt.xlabel("Measurement iterations")
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
if double_freq is True:
plt.figure(fig_num)
plt.clf()
plt.subplot(311).axis(xmin=-len(ft2_fits['eft2s'])*0.10, xmax=len(ft2_fits['eft2s'])*1.10, ymin= min(ft2_fits['eft2s'])*0.7, ymax=max(ft2_fits['eft22s'])*1.3)
plt.errorbar(range(len(ft2_fits['eft2s'])),ft2_fits['eft2s'],ft2_fits['eft2s_err'],fmt='rs')
plt.errorbar(range(len(ft2_fits['eft22s'])),ft2_fits['eft22s'],ft2_fits['eft22s_err'],fmt='b^')
plt.ylabel("EFT2(us)")
plt.subplot(312).axis(xmin=-len(ft2_fits['eft2freqs'])*0.10, xmax=len(ft2_fits['eft2freqs'])*1.10,ymin= min(min(ft2_fits['eft2freqs']),min(ft2_fits['eft22freqs']))-0.02, ymax=max(max(ft2_fits['eft2freqs']), max(ft2_fits['eft22freqs']))+0.02)
plt.errorbar(range(len(ft2_fits['eft2freqs'])),ft2_fits['eft2freqs'],ft2_fits['eft2freqs_err'],fmt='rs')
plt.errorbar(range(len(ft2_fits['eft22freqs'])),ft2_fits['eft22freqs'],ft2_fits['eft22freqs_err'],fmt='b^')
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
plt.subplot(313).axis(xmin=-len(ft2_fits['eft2amps'])*0.10, xmax=len(ft2_fits['eft2amps'])*1.10,ymin= min(ft2_fits['eft2amp2s'])*0.8, ymax=max(ft2_fits['eft2amps'])*1.2)
plt.errorbar(range(len(ft2_fits['eft2amps'])),ft2_fits['eft2amps'],ft2_fits['eft2amps_err'],fmt='rs')
plt.errorbar(range(len(ft2_fits['eft2amp2s'])),ft2_fits['eft2amp2s'],ft2_fits['eft2amp2s_err'],fmt='b^')
plt.xlabel("Measurement iterations")
plt.ylabel("Amplitudes (AU)")
brick1.set_rf_on(False)
def do_EFT2echo(qubit_info, ef_info, delays, detune, laser_power = None):
eft2e = EFT2measurement.EFT2Measurement(qubit_info, ef_info, delays, detune, echotype=EFT2measurement.ECHO_HAHN, title='EFT2 Echo')
eft2e.data.set_attrs(field_current=field)
eft2e.data.set_attrs(temperature=temp)
# t2e.data.set_attrs(laser_power=voltage)
eft2e.measure()
plt.close()
return eft2e
def do_EFT2echo_plot(qubit_info, ef_info, n_avg, delays, detune, t2E_fits, fig_num, laser_power = None):
alz.set_naverages(n_avg)
brick1.set_rf_on(True)
eft2e = do_EFT2echo(qubit_info, ef_info, delays, detune, laser_power = laser_power)
if eft2e!=None:
t2E_fits['eft2es'].append(eft2e.fit_params['tau'].value/1000)
t2E_fits['eft2es_err'].append(eft2e.fit_params['tau'].stderr/1000)
plt.figure(fig_num)
plt.clf()
plt.axis(xmin=-len(t2E_fits['eft2es'])*0.10, xmax=len(t2E_fits['eft2es'])*1.10, ymin= min(t2E_fits['eft2es'])*0.8, ymax=max(t2E_fits['eft2es'])*1.2)
plt.errorbar(range(len(t2E_fits['eft2es'])),t2E_fits['eft2es'],t2E_fits['eft2es_err'],fmt='mv') # magenta color and v-shape markers
plt.xlabel("Measurement iterations")
plt.ylabel("EFT2Echo(us)")
brick1.set_rf_on(False)
def do_GFT2(qubit_info, ef_info, delays, detune, double_freq=False, QP_injection_delay=None, QP_injection_length=10e3):
gft2 = GFT2measurement.GFT2Measurement(qubit_info, ef_info, delays, detune=detune, double_freq=double_freq)
gft2.data.set_attrs(field_current=field)
gft2.data.set_attrs(temperature=temp)
gft2.measure()
plt.close()
return gft2
def do_GFT2_plot(qubit_info, ef_info, n_avg, delays, detune, ft2_fits, fig_num, double_freq=False, QP_injection_delay=None, QP_injection_length=10e3, laser_power = None):
alz.set_naverages(n_avg)
brick1.set_rf_on(True)
gft2 = do_GFT2(qubit_info, ef_info, delays, detune, double_freq, QP_injection_delay, QP_injection_length)
if (gft2!=None):
ft2_fits['gft2s'].append(gft2.fit_params['tau'].value/1000)
ft2_fits['gft2s_err'].append(gft2.fit_params['tau'].stderr/1000.0)
ft2_fits['gft2freqs'].append(gft2.fit_params['freq'].value*1000 - detune/1e6)
ft2_fits['gft2freqs_err'].append(gft2.fit_params['freq'].stderr*1000.0)
ft2_fits['gft2amps'].append(gft2.fit_params['amp'].value)
ft2_fits['gft2amps_err'].append(gft2.fit_params['amp'].stderr)
if double_freq == True:
ft2_fits['gft22s'].append(gft2.fit_params['tau2'].value/1000)
ft2_fits['gft22s_err'].append(gft2.fit_params['tau2'].stderr/1000.0)
ft2_fits['gft22freqs'].append(gft2.fit_params['freq2'].value*1000 -detune/1e6)
ft2_fits['gft22freqs_err'].append(gft2.fit_params['freq2'].stderr*1000.0)
ft2_fits['gft2amp2s'].append(gft2.fit_params['amp2'].value)
ft2_fits['gft2amp2s_err'].append(gft2.fit_params['amp2'].stderr)
if QP_injection_delay is not None:
ft2_fits['gft2s_QP'].append(gft2.fit_params['tau'].value/1000)
ft2_fits['gft2s_QP_err'].append(gft2.fit_params['tau'].stderr/1000.0)
ft2_fits['gft2freqs_QP'].append(gft2.fit_params['freq'].value*1000 -detune/1e6)
ft2_fits['gft2freqs_QP_err'].append(gft2.fit_params['freq'].stderr*1000.0)
if double_freq == False and QP_injection_delay is None:
plt.figure(fig_num)
plt.clf()
plt.subplot(211).axis(xmin=-len(ft2_fits['gft2s'])*0.10, xmax=len(ft2_fits['gft2s'])*1.10, ymin= min(ft2_fits['gft2s'])*0.7, ymax=max(ft2_fits['gft2s'])*1.3)
plt.errorbar(range(len(ft2_fits['gft2s'])),ft2_fits['gft2s'],ft2_fits['gft2s_err'],fmt='ks')
plt.ylabel("GFT2(us)")
plt.subplot(212).axis(xmin=-len(ft2_fits['gft2freqs'])*0.10, xmax=len(ft2_fits['gft2freqs'])*1.10, ymin=min(ft2_fits['gft2freqs'])-0.02, ymax=max(ft2_fits['gft2freqs'])+0.02)
plt.errorbar(range(len(ft2_fits['gft2freqs'])),ft2_fits['gft2freqs'],ft2_fits['gft2freqs_err'],fmt='c^')
plt.xlabel("Measurement iterations")
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
if double_freq == False and QP_injection_delay is not None:
plt.figure(fig_num)
plt.clf()
plt.subplot(211).axis(xmin=-len(ft2_fits['gft2s_QP'])*0.10, xmax=len(ft2_fits['gft2s_QP'])*1.10, ymin= min(ft2_fits['gft2s_QP'])*0.7, ymax=max(ft2_fits['gft2s_QP'])*1.3)
plt.errorbar(range(len(ft2_fits['gft2s_QP'])),ft2_fits['gft2s_QP'],ft2_fits['gft2s_QP_err'],fmt='ks')
plt.ylabel("GFT2 with QP injection (us)")
plt.subplot(212).axis(xmin=-len(ft2_fits['gft2freqs_QP'])*0.10, xmax=len(ft2_fits['gft2freqs_QP'])*1.10, ymin=min(ft2_fits['gft2freqs_QP'])-0.02, ymax=max(ft2_fits['gft2freqs_QP'])+0.02)
plt.errorbar(range(len(ft2_fits['gft2freqs_QP'])),ft2_fits['gft2freqs_QP'],ft2_fits['gft2freqs_QP_err'],fmt='c^')
plt.xlabel("Measurement iterations")
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
if double_freq is True:
plt.figure(fig_num)
plt.clf()
plt.subplot(311).axis(xmin=-len(ft2_fits['gft2s'])*0.10, xmax=len(ft2_fits['gft2s'])*1.10, ymin= min(ft2_fits['gft2s'])*0.7, ymax=max(ft2_fits['gft22s'])*1.3)
plt.errorbar(range(len(ft2_fits['gft2s'])),ft2_fits['gft2s'],ft2_fits['gft2s_err'],fmt='ks')
plt.errorbar(range(len(ft2_fits['gft22s'])),ft2_fits['gft22s'],ft2_fits['gft22s_err'],fmt='c^')
plt.ylabel("GFT2(us)")
plt.subplot(312).axis(xmin=-len(ft2_fits['gft2freqs'])*0.10, xmax=len(ft2_fits['gft2freqs'])*1.10,ymin= min(min(ft2_fits['gft2freqs']),min(ft2_fits['gft22freqs']))-0.02, ymax=max(max(ft2_fits['gft2freqs']), max(ft2_fits['gft22freqs']))+0.02)
plt.errorbar(range(len(ft2_fits['gft2freqs'])),ft2_fits['gft2freqs'],ft2_fits['gft2freqs_err'],fmt='ks')
plt.errorbar(range(len(ft2_fits['gft22freqs'])),ft2_fits['gft22freqs'],ft2_fits['gft22freqs_err'],fmt='c^')
plt.ylabel("Ramsey Freq.(MHz) (= Actual Qubit Freq. - Drive Freq.)")
plt.subplot(313).axis(xmin=-len(ft2_fits['gft2amps'])*0.10, xmax=len(ft2_fits['gft2amps'])*1.10,ymin= min(ft2_fits['gft2amp2s'])*0.8, ymax=max(ft2_fits['gft2amps'])*1.2)
plt.errorbar(range(len(ft2_fits['gft2amps'])),ft2_fits['gft2amps'],ft2_fits['gft2amps_err'],fmt='ks')
plt.errorbar(range(len(ft2_fits['gft2amp2s'])),ft2_fits['gft2amp2s'],ft2_fits['gft2amp2s_err'],fmt='c^')
plt.xlabel("Measurement iterations")
plt.ylabel("Amplitudes (AU)")
brick1.set_rf_on(False)
def do_GFT2echo(qubit_info, ef_info, delays, detune, laser_power = None):
gft2e = GFT2measurement.GFT2Measurement(qubit_info, ef_info, delays, detune, echotype=EFT2measurement.ECHO_HAHN, title='GFT2 Echo')
gft2e.data.set_attrs(field_current=field)
gft2e.data.set_attrs(temperature=temp)
# t2e.data.set_attrs(laser_power=voltage)
gft2e.measure()
plt.close()
return gft2e
def do_GFT2echo_plot(qubit_info, ef_info, n_avg, delays, detune, t2E_fits, fig_num, laser_power = None):
alz.set_naverages(n_avg)
brick1.set_rf_on(True)
gft2e = do_GFT2echo(qubit_info, ef_info, delays, detune, laser_power = laser_power)
if gft2e!=None:
t2E_fits['gft2es'].append(gft2e.fit_params['tau'].value/1000)
t2E_fits['gft2es_err'].append(gft2e.fit_params['tau'].stderr/1000)
plt.figure(fig_num)
plt.clf()
plt.axis(xmin=-len(t2E_fits['gft2es'])*0.10, xmax=len(t2E_fits['gft2es'])*1.10, ymin= min(t2E_fits['gft2es'])*0.8, ymax=max(t2E_fits['gft2es'])*1.2)
plt.errorbar(range(len(t2E_fits['gft2es'])),t2E_fits['gft2es'],t2E_fits['gft2es_err'],fmt='yv') # yellow color and v-shape markers
plt.xlabel("Measurement iterations")
plt.ylabel("GFT2Echo(us)")
brick1.set_rf_on(False)
def do_FT2echo_plot(qubit_info, ef_info, n_avg, delays, detune, t2E_fits, fig_num, laser_power = None):
alz.set_naverages(n_avg)
brick1.set_rf_on(True)
eft2e = do_EFT2echo(qubit_info, ef_info, delays, detune, laser_power = laser_power)
if eft2e!=None:
t2E_fits['eft2es'].append(eft2e.fit_params['tau'].value/1000)
t2E_fits['eft2es_err'].append(eft2e.fit_params['tau'].stderr/1000)
plt.figure(fig_num)
plt.clf()
plt.axis(xmin=-len(t2E_fits['eft2es'])*0.10, xmax=len(t2E_fits['eft2es'])*1.10, ymin= min(t2E_fits['eft2es'])*0.8, ymax=max(t2E_fits['eft2es'])*1.2)
plt.errorbar(range(len(t2E_fits['eft2es'])),t2E_fits['eft2es'],t2E_fits['eft2es_err'],fmt='mv', label='EFT2echo') # magenta color and v-shape markers
plt.errorbar(range(len(t2E_fits['gft2es'])),t2E_fits['gft2es'],t2E_fits['gft2es_err'],fmt='yv', label='GFT2echo') # yellow color and v-shape markers
plt.xlabel("Measurement iterations")
plt.ylabel("FT2Echo(us)")
gft2e = do_GFT2echo(qubit_info, ef_info, delays, detune, laser_power = laser_power)
if gft2e!=None:
t2E_fits['gft2es'].append(gft2e.fit_params['tau'].value/1000)
t2E_fits['gft2es_err'].append(gft2e.fit_params['tau'].stderr/1000)
plt.figure(fig_num)
plt.clf()
plt.axis(xmin=-len(t2E_fits['gft2es'])*0.10, xmax=len(t2E_fits['gft2es'])*1.10, ymin= min(t2E_fits['eft2es'])*0.8, ymax=max(t2E_fits['gft2es'])*1.2)
plt.errorbar(range(len(t2E_fits['eft2es'])),t2E_fits['eft2es'],t2E_fits['eft2es_err'],fmt='mv', label='EFT2echo') # magenta color and v-shape markers
plt.errorbar(range(len(t2E_fits['gft2es'])),t2E_fits['gft2es'],t2E_fits['gft2es_err'],fmt='yv', label='GFT2echo') # yellow color and v-shape markers
plt.xlabel("Measurement iterations")
plt.ylabel("FT2Echo(us)")
brick1.set_rf_on(False)
def do_rabiup(qubit_info, ef_info, amps, QP_injection_delay=None, laser_power= None):
if QP_injection_delay == None:
rabiup = efrabi.EFRabi(qubit_info, ef_info, amps, laser_power = laser_power)
else:
rabiup = efrabi_QP.EFRabi_QP(qubit_info, ef_info, amps, QP_injection_delay, laser_power = laser_power)
rabiup.data.set_attrs(QP_delay=QP_injection_delay)
rabiup.data.set_attrs(field_current=field)
rabiup.data.set_attrs(temperature=temp)
rabiup.data.set_attrs(laser_power=laser_power)
rabiup.measure()
plt.close()
return rabiup
def do_rabinoup(qubit_info, ef_info, amps, force_period, QP_injection_delay=None, laser_power=None):
if QP_injection_delay == None:
rabinoup = efrabi.EFRabi(qubit_info, ef_info, amps, first_pi=False, force_period=force_period,laser_power = laser_power)
else:
rabinoup = efrabi_QP.EFRabi_QP(qubit_info, ef_info, amps, first_pi=False, force_period=force_period, QP_delay=QP_injection_delay)
rabinoup.data.set_attrs(QP_delay=QP_injection_delay)
rabinoup.data.set_attrs(field_current=field)
rabinoup.data.set_attrs(temperature=temp)
rabinoup.data.set_attrs(laser_power=laser_power)
rabinoup.measure()
#population = 100*rabinoup.fit_params['amp'].value/(rabiup.fit_params['amp'].value+rabinoup.fit_params['amp'].value)
plt.close()
return rabinoup
def do_population_plot(qubit_info, ef_info, n_avg_rabiup, n_avg_rabinoup, amps, pops_fits, fig_num, QP_injection_delay=None, laser_power = None):
brick1.set_rf_on(True)
alz.set_naverages(n_avg_rabiup)
rabiup = do_rabiup(qubit_info, ef_info, amps, QP_injection_delay, laser_power = laser_power)
if rabiup!=None:
pops_fits['rabiupAmp'].append(abs(rabiup.fit_params['amp'].value))
pops_fits['rabiupAmp_err'].append(rabiup.fit_params['amp'].stderr)
plt.figure(fig_num).show()
# plt.clf()
plt.subplot(211).axis(xmin=-len(pops_fits['rabiupAmp'])*0.10, xmax=len(pops_fits['rabiupAmp'])*1.10, ymin=min(pops_fits['rabiupAmp'])*0.7, ymax=max(pops_fits['rabiupAmp'])*1.3)
plt.errorbar(range(len(pops_fits['rabiupAmp'])),pops_fits['rabiupAmp'],pops_fits['rabiupAmp_err'],fmt='b^')
#plt.xlabel("Measurement iterations")
plt.ylabel("Rabiup")
alz.set_naverages(n_avg_rabinoup)
rabinoup = do_rabinoup(qubit_info, ef_info, amps, force_period=rabiup.fit_params['period'].value, QP_injection_delay=QP_injection_delay, laser_power = laser_power)
if rabinoup!=None:
pops_fits['rabinoupAmp'].append(abs(rabinoup.fit_params['amp'].value))
pops_fits['rabinoupAmp_err'].append(rabinoup.fit_params['amp'].stderr)
#population.append(population)
plt.figure(fig_num).show()
plt.subplot(212).axis(xmin=-len(pops_fits['rabinoupAmp'])*0.10, xmax=len(pops_fits['rabinoupAmp'])*1.10, ymin=0.0, ymax=max(pops_fits['rabinoupAmp'])*2.0)
plt.errorbar(range(len(pops_fits['rabinoupAmp'])),pops_fits['rabinoupAmp'],pops_fits['rabinoupAmp_err'],fmt='go')
plt.xlabel("Measurement iterations")
plt.ylabel("Rabinoup")
brick1.set_rf_on(False)
'''
def do_qubitSSBspec()
from scripts.single_qubit import ssbspec
qubitSSBspec = ssbspec.SSBSpec(qubit_info, np.linspace(-3e6, 3e6, 51), plot_seqs=False)
qubitSSBspec.measure()
return qubitSSBspec
'''
|
4,498 | 2bc20f3410d068e0592c8a45e3c13c0559059f24 | #Use bisection search to determine square root
def square_calculator(user_input):
"""
accepts input from a user to determine the square root
returns the square root of the user input
"""
precision = .000000000001
counter = 0
low = 0
high = user_input
guess = (low + high) / 2.0
while abs(guess**2 - user_input) >= precision:
if guess**2 > user_input:
high = guess
if guess**2 < user_input:
low = guess
guess = (low + high) / 2.0
counter+= 1
return(guess, counter)
while True:
user_input = int(input("Enter a number: "))
answer, counter = square_calculator(user_input)
print("The square root of", user_input, "is", round(answer,6))
print("It took", counter, "guesses to figure it out.") |
4,499 | 4f0a0089ad128edca3052da58a4c71f935592e25 | import sys
from arguments_parser import parse_args
from open_ldap import OpenLdap
from csv_parser import parse_csv, random_password
from smtp_mail import SmtpServer
def create_user(open_ldap, smtp, entries):
"""
If the 'ldap_insert' returns True, then
the email will be send with the account info.
"""
try:
if open_ldap.ldap_insert(entries):
smtp.send_email(entries)
return True
else:
return False
except Exception as e:
print('ERROR - ', e)
return
def run(args):
"""
Creates the OpenLDAP and SMTP
objects and iterates over the .csv file.
Calls the create_user function and check the
result (if 'true' the count will be increased).
Returns the total count of users created.
"""
open_ldap = OpenLdap(args.user,
args.password,
args.address)
smtp = SmtpServer(args.smtp_host,
args.port,
args.email,
args.email_password)
entries = {}
count = 0
for row in parse_csv(args.file):
try:
entries['name'] = row['name']
entries['lastname'] = row['lastname']
entries['email'] = row['email']
except KeyError as e:
return "ERROR - Missing '{}' csv header".format(e)
entries['password'] = random_password()
if create_user(open_ldap, smtp, entries):
count += 1
return "INFO - Finished. Total of {} user(s) created".format(count)
def main():
args = parse_args()
print(run(args))
return 0
if __name__ == "__main__":
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.