text stringlengths 38 1.54M |
|---|
from flask import Flask, render_template, request, redirect, url_for, session,\
send_file
from forms import DateForm, InstructorForm, StudentForm
import datetime
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os
app = Flask(__name__)
app.config['SECRET_KEY'] = 'manchas solares'
fpath = os.path.dirname(os.path.realpath(__file__))
# Uncomment at deploy
'''
class WebFactionMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = '/projects/sunspot'
return self.app(environ, start_response)
app.wsgi_app = WebFactionMiddleware(app.wsgi_app)
'''
@app.route('/', methods=['GET', 'POST'])
def index():
if not session.get('date'):
date = datetime.date.today()
else:
date = datetime.date.fromordinal(session.get('date'))
form = DateForm(year=date.year, month=date.month, day=date.day)
if request.method == 'POST':
date = datetime.date(form.year.data,
form.month.data,
form.day.data)
session['date'] = date.toordinal()
return redirect(url_for('index'))
else:
session.clear()
img = get_img(date)
if not img:
img = get_img(date - datetime.timedelta(days=1))
return render_template('index.html', form = form,
date = date, img = img)
@app.route('/student', methods=['GET', 'POST'])
def student():
form = StudentForm()
if request.method == 'POST':
if form.validate_on_submit():
session['number_of_dates'] = form.number_of_dates.data
if form.dates:
dates = []
images = []
for date in form.dates:
datex = datetime.date(date.year.data,
date.month.data,
date.day.data)
dates.append(datex.toordinal())
images.append(get_img(datex))
session["dates"] = dates
session["images"] = images
return redirect(url_for('student'))
return redirect(url_for('student'))
else:
return render_template('student.html', form = form)
elif request.method == 'GET':
if session.get('number_of_dates'):
form.add_dates(session.get('number_of_dates'))
form.number_of_dates.data = session.get('number_of_dates')
if session.get('dates'):
dates = [datetime.date.fromordinal(date)
for date in session.get('dates')]
else:
dates = None
images = session.get('images')
if images:
images = zip(dates, images)
return render_template('student.html', form = form,
images = images)
@app.route('/instructor', methods=['GET', 'POST'])
def instructor():
form = InstructorForm()
if request.method == 'POST':
if form.validate_on_submit():
if form.generate_csv.data:
dates = [datetime.date.fromordinal(date)
for date in session.get('dates')]
ssn = session.get('ssn')
students = session.get('students')*\
session.get('dates_per_student')
x = ["{0},{1},{2}\n".format(d, st, s)
for d, s, st in zip(dates, students, ssn)]
y = ["{0},{1}\n".format(d, s)
for d, s in zip(dates, students)]
fname = session.get('fname').split('.')[0]
ifname = os.path.join(fpath, 'static', 'files', fname+'i.csv')
sfname = os.path.join(fpath, 'static', 'files', fname+'s.csv')
with open(ifname, 'w') as csv:
csv.write('Date,Official SSN,Student\n'+''.join(x))
with open(sfname, 'w') as csv:
csv.write('Date,Student\n'+''.join(y))
figname = os.path.join(fpath, 'static', 'files', fname+'.png')
zname = os.path.join(fpath, 'static', 'files', fname+'.zip')
os.system("zip -j {0} {1} {2} {3}".\
format(zname, ifname, sfname,figname))
os.system("printf '@ "+fname+\
"i.csv\n@=instructor.csv\n' | zipnote -w "+zname)
os.system("printf '@ "+fname+\
"s.csv\n@=students.csv\n' | zipnote -w "+zname)
os.system("printf '@ "+fname+\
".png\n@=date_vs_ssn.png\n' | zipnote -w "+zname)
return send_file(zname, as_attachment=True,
attachment_filename="sunspot_project.zip")
ns = form.number_of_students.data
dps = form.dates_per_student.data
nd = np.random.random_integers(-7, 7)
xi = datetime.date(2001, 10, 1+7) + datetime.timedelta(days=nd)
xf = datetime.date(2013, 9, 30)
dd = (xf - xi)/(ns * dps)
dates = [xi + i*dd for i in range(ns*dps)]
ssn_file = os.path.join(fpath, 'ISSN_D_tot.csv')
ssd = np.genfromtxt(ssn_file, delimiter=',').transpose()
ssn = [str(get_ssn(dates[i], ssd)) for i in range(ns*dps)]
plt.figure(figsize=(5,4))
plt.plot(dates, ssn, 'bo')
plt.xlabel('Date')
plt.ylabel('Sunspot number')
plt.ylim(0., 200.)
plt.tight_layout()
fname = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.png")
ssn_fig = os.path.join(fpath, 'static', 'files', fname)
plt.savefig(ssn_fig)
plt.close()
session['number_of_students'] = ns
session['students'] = [str(i+1) for i in range(ns)]
session['dates_per_student'] = dps
session['ssn_fig'] = 'static/files/'+fname
session['fname'] = fname
session['dates'] = [date.toordinal() for date in dates]
session['ssn'] = ssn
return redirect(url_for('instructor'))
else:
return render_template('instructor.html', form = form,
ssn_fig = None)
elif request.method == 'GET':
form.number_of_students.data = session.get('number_of_students')
form.dates_per_student.data = session.get('dates_per_student')
ssn_fig = session.get('ssn_fig')
return render_template('instructor.html', form = form,
ssn_fig = ssn_fig)
@app.route('/help')
def help():
return render_template('help.html')
import httplib
def exists(site, path):
conn = httplib.HTTPConnection(site)
conn.request('HEAD', path)
response = conn.getresponse()
conn.close()
return response.status == 200
def get_img(date):
site = "sohowww.nascom.nasa.gov"
location = "/data/synoptic/sunspots/"
# Pictures exist from 20011001 to 20110113 and from 20110307
if date >= datetime.date(2011, 03, 07):
fname = "sunspots_512_"+str(date).replace("-", "")+".jpg"
else:
fname = "sunspots_"+str(date).replace("-", "")+".jpg"
if exists(site, location+fname):
return "http://"+site+location+fname
else:
return None
def get_ssn(date, ssd):
year, month, day = ssd[0], ssd[1], ssd[2]
k = (year==date.year) & (month==date.month) & (day==date.day)
if k.any():
return ssd[4][k][0]
else:
return None
|
#! /usr/bin/env python3
import xml.etree.ElementTree as ET
import sys
import argparse
import json
arg_parse = argparse.ArgumentParser()
arg_parse.add_argument('age', help='The maximum uptime to filter',type=int)
arg_parse.add_argument('file', help='Xml file to read. Use - for stdin')
args = arg_parse.parse_args()
# Read from stdin if - is used
def read_json(file_path):
if file_path == '-':
try:
return json.load(sys.stdin)
except:
print('Cannot open stdin')
else:
try:
with open(file_path) as content:
return json.load(content)
except FileNotFoundError as e:
sys.stderr.write('Error opening file: {}'.format(e))
def find_longrunning(json_data, age):
try:
for thread, stats in json_data.items():
for client in stats['active_clients'].values():
if client['connected_at']['relative_timestamp'] < age:
print(stats['pid'], client['connected_at']['relative_timestamp'])
except (AttributeError, TypeError) as e:
sys.stderr.write('Key/Val not iterable {}:{} : {}'.format(thread, stats, e))
find_longrunning(read_json(args.file), args.age)
# tree = ET.parse(source)
# root = tree.getroot()
#
# for child in root.findall('supergroups/supergroup/group/processes/process'):
# print(child.find('pid').text, child.find('uptime').text)
# data = json.load(source)
#
# for thread in data.items():
# for thing in thread:
# print(k, v)
# # for attribute, value in thread.iter():
# # print(attribute, value)
|
# modified from https://pypi.org/project/full-width-to-half-width
FULL_TO_HALF_TABLE = {i + 0xFEE0: i for i in range(0x21, 0x7F)}
HALF_TO_FULL_TABLE = {i: i + 0xFEE0 for i in range(0x21, 0x7F)}
def f2h(string: str) -> str:
""" Convert into half-width. """
return string.translate(FULL_TO_HALF_TABLE)
def h2f(string: str) -> str:
""" Convert into full-width. """
return string.translate(HALF_TO_FULL_TABLE)
|
from django.db.models.signals import post_save, pre_delete, post_delete
from django.dispatch import receiver
from problems.models import *
import logging
logger = logging.getLogger(__name__)
def skip_signal_if_required(func):
def wrapper(sender, instance, **kwargs):
if not getattr(instance, "skip_signals", False):
try:
func(sender, instance, **kwargs)
except Exception as e:
logger.error(e, e)
return wrapper
@receiver(post_save, sender=Solution, dispatch_uid="invalidate_testcase_solution")
@receiver(pre_delete, sender=Solution, dispatch_uid="invalidate_testcase_solution_delete")
@skip_signal_if_required
def invalidate_testcase_on_solution_change(sender, instance, **kwargs):
problem = instance.problem
testcases = problem.testcase_set.all()
for testcase in testcases:
if testcase.solution == instance:
testcase.invalidate()
@receiver(post_save, sender=Validator, dispatch_uid="invalidate_testcase_validator")
@skip_signal_if_required
def invalidate_testcase_on_validator_change(sender, instance, **kwargs):
testcases = instance.testcases
for testcase in testcases:
testcase.invalidate()
# TODO: We can only invalidate validation results here. Should we?
@receiver(post_save, sender=InputGenerator, dispatch_uid="invalidate_testcase_generator")
@receiver(pre_delete, sender=InputGenerator, dispatch_uid="invalidate_testcase_generator_delete")
@skip_signal_if_required
def invalidate_testcase_on_generator_change(sender, instance, **kwargs):
testcases = instance.problem.testcase_set.filter(
_input_generator_name=instance.name
)
for testcase in testcases:
testcase.invalidate()
@receiver(post_save, sender=Resource, dispatch_uid="invalidate_file_compilation_resource")
@receiver(pre_delete, sender=Resource, dispatch_uid="invalidate_file_compilation_resource_delete")
@skip_signal_if_required
def invalidate_compiled_on_resource_change(sender, instance, **kwargs):
revision = instance.problem
revision.validator_set.update(compiled_file=None, compilation_task_id=None, compilation_finished=False)
revision.checker_set.update(compiled_file=None, compilation_task_id=None, compilation_finished=False)
revision.inputgenerator_set.update(compiled_file=None, compilation_task_id=None, compilation_finished=False)
@receiver(post_save, sender=Grader, dispatch_uid="invalidate_problem_judge_grader")
@receiver(pre_delete, sender=Grader, dispatch_uid="invalidate_problem_judge_grader_delete")
@receiver(post_save, sender=ProblemData, dispatch_uid="invalidate_problem_judge_data")
@skip_signal_if_required
def invalidate_problem_initialization(sender, instance, **kwargs):
revision = instance.problem
revision.invalidate_judge_initialization()
@receiver(post_delete, sender=ProblemBranch, dispatch_uid="delete_branch_working_copy")
@skip_signal_if_required
def delete_working_copy_on_branch_delete(sender, instance, **kwargs):
if instance.has_working_copy():
instance.working_copy.delete()
|
#! usr/bin/env python
import logging
class SendPacket():
def __init__(self):
logging.info("Controller is configured to handle packets coming from switch.")
def send(self,datapath, msg, port, action):
data = None
parser = datapath.ofproto_parser
if msg.buffer_id == datapath.ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=port, actions=action, data=data)
datapath.send_msg(out) |
import contextlib
@contextlib.contextmanager
def test_context():
print('a')
yield
print('b')
def run_context(c):
print('one')
with c:
print('two')
yield
print('three')
print('four')
def test_it():
print('U')
tc = run_context(test_context())
print('V')
next(tc)
print('W')
try:
next(tc)
except StopIteration:
print('X')
else:
print('Y!!!')
test_it()
|
import numpy as np
def min_pooling(img, out_size):
out_img = np.zeros(out_size)
ori_h, ori_w = img.shape[:2]
out_h, out_w = out_size
y_stride = round(ori_h/out_h)
x_stride = round(ori_w/out_w)
thresh=0.05
for y in range(out_h):
for x in range(out_w):
if y == out_h-1 and x != out_w-1:
if img[y*y_stride:ori_h, x*x_stride:(x+1)*x_stride].sum()>(1-thresh)*(ori_h-y*y_stride)*x_stride:
out_img[y, x] = 1
elif y != out_h-1 and x == out_w-1:
if img[y*y_stride:(y+1)*y_stride, x*x_stride:ori_w].sum()>(1-thresh)*y_stride*(ori_w-x*x_stride):
out_img[y, x] = 1
elif y == out_h-1 and x == out_w-1:
if img[y*y_stride:ori_h, x*x_stride:ori_w].sum()>(1-thresh)*(ori_h-y*y_stride)*(ori_w-x*x_stride):
out_img[y, x] = 1
else:
if img[y*y_stride:(y+1)*y_stride, x*x_stride:(x+1)*x_stride].sum()>(1-thresh)*y_stride*x_stride:
out_img[y, x] = 1
return out_img
|
class gbXMLServiceType(Enum,IComparable,IFormattable,IConvertible):
"""
This enumeration corresponds to the systemType attribute in gbXML
and is used for specifying the service for the building or space.
enum gbXMLServiceType,values: ActiveChilledBeams (22),CentralHeatingConvectors (1),CentralHeatingHotAir (3),CentralHeatingRadiantFloor (2),CentralHeatingRadiators (0),ConstantVolumeDualDuct (20),ConstantVolumeFixedOA (16),ConstantVolumeTerminalReheat (18),ConstantVolumeVariableOA (17),FanCoilSystem (14),ForcedConvectionHeaterFlue (8),ForcedConvectionHeaterNoFlue (9),InductionSystem (15),MultizoneHotDeckColdDeck (19),NoOfServiceTypes (28),NoServiceType (-1),OtherRoomHeater (4),RadiantCooledCeilings (21),RadiantHeaterFlue (5),RadiantHeaterMultiburner (7),RadiantHeaterNoFlue (6),SplitSystemsWithMechanicalVentilation (26),SplitSystemsWithMechanicalVentilationWithCooling (27),SplitSystemsWithNaturalVentilation (25),VariableRefrigerantFlow (24),VAVDualDuct (11),VAVIndoorPackagedCabinet (12),VAVSingleDuct (10),VAVTerminalReheat (13),WaterLoopHeatPump (23)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
ActiveChilledBeams=None
CentralHeatingConvectors=None
CentralHeatingHotAir=None
CentralHeatingRadiantFloor=None
CentralHeatingRadiators=None
ConstantVolumeDualDuct=None
ConstantVolumeFixedOA=None
ConstantVolumeTerminalReheat=None
ConstantVolumeVariableOA=None
FanCoilSystem=None
ForcedConvectionHeaterFlue=None
ForcedConvectionHeaterNoFlue=None
InductionSystem=None
MultizoneHotDeckColdDeck=None
NoOfServiceTypes=None
NoServiceType=None
OtherRoomHeater=None
RadiantCooledCeilings=None
RadiantHeaterFlue=None
RadiantHeaterMultiburner=None
RadiantHeaterNoFlue=None
SplitSystemsWithMechanicalVentilation=None
SplitSystemsWithMechanicalVentilationWithCooling=None
SplitSystemsWithNaturalVentilation=None
value__=None
VariableRefrigerantFlow=None
VAVDualDuct=None
VAVIndoorPackagedCabinet=None
VAVSingleDuct=None
VAVTerminalReheat=None
WaterLoopHeatPump=None
|
from flask import Flask, request
from flask_restful import Resource, Api
from similarity.normalized_levenshtein import NormalizedLevenshtein
from flask_cors import CORS
from fuzzywuzzy import fuzz
from operator import itemgetter
app = Flask(__name__)
CORS(app)
api = Api(app)
from krs import get_krs_obj
companies_arr = get_krs_obj()
class Company(Resource):
def get(self, company_id):
print(company_id)
for obj in companies_arr:
if int(obj['id']) == company_id:
return obj
# TODO throw error
class Companies(Resource):
def get(self):
return companies_arr
class Search(Resource):
def get(self, company_name):
ratios = []
for obj in companies_arr:
ratio = fuzz.token_set_ratio(company_name, obj['nazwa'])
ratios.append((obj, ratio))
sorted_ratios = sorted(ratios, key=itemgetter(1), reverse=True)
return [obj for obj, ratio in sorted_ratios[:5]]
api.add_resource(Company, '/companies/<int:company_id>')
api.add_resource(Companies, '/companies')
api.add_resource(Search, '/search/<string:company_name>')
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python
"""
QC Heads Up Display (HUD)
Displays sensor information from and commands sent to the QC
Created by: Josh Saunders
Date Created: 4/2/2016
Date Modified: 5/2/2016
"""
# Python libraries
from __future__ import print_function
import sys
import cv2
import math
import numpy as np
# We're using ROS here
import rospy
import roslib
from cv_bridge import CvBridge, CvBridgeError
# ROS messages
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from ardrone_autonomy.msg import Navdata, navdata_altitude
class HUD:
def __init__(self, box_top_left, box_bottom_right):
self.image_pub = rospy.Publisher("heads_up",Image, queue_size=1000)
self.bridge = CvBridge()
# Subscribe to the correct topic
self.image_sub = rospy.Subscriber("ardrone/image_raw",Image,self.cv_callback)
self.navdata_sub = rospy.Subscriber("ardrone/navdata",Navdata,self.navdata_callback)
self.twist_sub = rospy.Subscriber("cmd_vel", Twist, self.twist_callback)
self.sub_altitude = rospy.Subscriber('/ardrone/navdata_altitude', \
navdata_altitude, self.altitude_cb)
self.tag_acquired = False
self.tag_x = 0
self.tag_y = 0
self.tag_length = 0
self.tag_width = 0
self.tag_theta = 0
# HUD information
self.altitude = 0
self.vx = 0
self.vy = 0
self.vz = 0
self.i = 0
self.twist = Twist()
self.battery = 0
self.mode = 0
self.pwm1 = 0
self.pwm2 = 0
self.pwm3 = 0
self.pwm4 = 0
self.time = 0
# Bounding box dimensions
# These are tuples
self.box_top_left = box_top_left
self.box_bottom_right = box_bottom_right
def cv_callback(self,data):
"""
CAllback for the images streamed from the QC. Also, draws the HUD
information.
"""
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if self.tag_acquired:
# Drawing some crosshairs
self.crosshair(cv_image)
# Write the info
self.hud_info(cv_image)
# Draw the bounding box
red = (0, 0, 255)
cv2.rectangle(cv_image, self.box_top_left, self.box_bottom_right, red, 1)
cv2.imshow("QC HUD", cv_image)
cv2.waitKey(3)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
except CvBridgeError as e:
print(e)
def navdata_callback(self,data):
"""
Callback for the navdata subscriber.
"""
# HUD information
self.vx = data.vx
self.vy = data.vy
self.vz = data.vz
# Converts the time running from us to s
self.time = data.tm / 1000000.0
self.battery = data.batteryPercent
self.mode = data.state
self.pwm1 = data.motor1
self.pwm2 = data.motor2
self.pwm3 = data.motor3
self.pwm4 = data.motor4
if(data.tags_count > 0):
self.tag_acquired = True
# The positions need to be scaled due to the actual resolution
# Actual resolution = 640 x 360
# Data given as 1000 x 1000
self.tag_x = int(data.tags_xc[0] * 640/1000)
self.tag_y = int(data.tags_yc[0] * 360/1000)
self.tag_theta = data.tags_orientation[0]
self.tag_length = data.tags_height[0] * 360/1000
self.tag_width = data.tags_width[0] * 640/1000
else:
self.tag_acquired = False
def twist_callback(self, msg):
"""
Callback for the twist command subscriber.
"""
self.twist.linear.x = msg.linear.x
self.twist.linear.y = msg.linear.y
self.twist.linear.z = msg.linear.z
self.twist.angular.x = msg.angular.x
self.twist.angular.y = msg.angular.y
self.twist.angular.z = msg.angular.z
def altitude_cb(self, msg):
# Convert mm to m
self.altitude = msg.altitude_raw/1000.0
def hud_info(self, cv_image):
"""
Displays current info (direction of travel, altitude, PWM values, tag
crosshair, tag position, velocity commands, mode, time running) about
the QC onto the HUD
"""
font = cv2.FONT_HERSHEY_PLAIN
font_color = (0, 255, 0)
# These are taken from Mike Hamer's ardrone_tutorials package and
# the ardrone_autonomy package documentation
mode = [
'Emergency', 'Inited', 'Landed', 'Flying', 'Hovering', 'Test',
'Taking Off', 'Flying', 'Landing', 'Looping'
]
# Make the strings with the HUD info
altd = "Altitude: %.3f m" % self.altitude
tag_pos = "Tag: (%d, %d) px" % (self.tag_x, self.tag_y)
tag_theta = "Tag Theta: %.1f" % self.tag_theta
vx_est = "Vx: %.2f mm/s" % self.vx
vy_est = "Vy: %.2f mm/s" % self.vy
# vz_est = "Vz: %.2f mm/s" % self.vz
time = "Time: %f " % self.time
info = "Sent Velocities"
linear_x = "Vx: %.3f mm/s" % (self.twist.linear.x/2500.0)
linear_y = "Vy: %.3f mm/s" % (self.twist.linear.y/2500.0)
linear_z = "Vz: %.3f mm/s" % self.twist.linear.z
angular_x = "Rx: %.3f rad/s" % self.twist.angular.x
angular_y = "Ry: %.3f rad/s" % self.twist.angular.y
angular_z = "Rz: %.3f rad/s" % self.twist.angular.z
pwm1 = "PWM1: %d" % self.pwm1
pwm2 = "PWM2: %d" % self.pwm2
pwm3 = "PWM3: %d" % self.pwm3
pwm4 = "PWM4: %d" % self.pwm4
battery = "Battery: %.1f%%" % self.battery
state = "Mode: %s" % mode[self.mode]
battery_font_color = self.set_battery_font(60, 30)
# Put the text on the image
# Top left
cv2.putText(cv_image, altd, (0, 15), font, 1.25, font_color)
cv2.putText(cv_image, tag_pos, (0, 32), font, 1.25, font_color)
cv2.putText(cv_image, tag_theta, (0, 48), font, 1.25, font_color)
cv2.putText(cv_image, vx_est, (0, 64), font, 1.25, font_color)
cv2.putText(cv_image, vy_est, (0, 80), font, 1.25, font_color)
# cv2.putText(cv_image, vz_est, (0, 96), font, 1.25, font_color)
cv2.putText(cv_image, time, (0, 96), font, 1.25, font_color)
# Bottom left
cv2.putText(cv_image, info, (0, 265), font, 1.25, font_color)
cv2.putText(cv_image, linear_x, (0, 280), font, 1.25, font_color)
cv2.putText(cv_image, linear_y, (0, 295), font, 1.25, font_color)
cv2.putText(cv_image, linear_z, (0, 310), font, 1.25, font_color)
cv2.putText(cv_image, angular_x, (0, 325), font, 1.25, font_color)
cv2.putText(cv_image, angular_y, (0, 340), font, 1.25, font_color)
cv2.putText(cv_image, angular_z, (0, 355), font, 1.25, font_color)
# Top right
cv2.putText(cv_image, pwm1, (520, 15), font, 1.25, font_color)
cv2.putText(cv_image, pwm2, (520, 32), font, 1.25, font_color)
cv2.putText(cv_image, pwm3, (520, 48), font, 1.25, font_color)
cv2.putText(cv_image, pwm4, (520, 64), font, 1.25, font_color)
# Bottom right
cv2.putText(cv_image, battery, (440, 340), font, 1.25, battery_font_color)
cv2.putText(cv_image, state, (440, 355), font, 1.25, font_color)
# Draw velocity vector
self.heading(cv_image)
def crosshair(self, cv_image):
"""
Draws a crosshair over the center of the bounding of the tag
"""
# Draw the vertical line, then the horizontal, then the circle
cv2.line(cv_image, (self.tag_x, self.tag_y + 25),(self.tag_x, self.tag_y - 25),(255,255,0),2)
cv2.line(cv_image, (self.tag_x - 25, self.tag_y),(self.tag_x + 25, self.tag_y),(255,255,0),2)
cv2.circle(cv_image, (self.tag_x, self.tag_y), 10, (255, 255, 0), 2)
# work in progress
def heading(self, cv_image):
"""
Draws an arrow in the direction that the QC is being told to go. This
is deteremined by doing some math on the Twist commands
"""
# Draw the arrow that show the direction in which the QC is moving
vx = self.twist.linear.x
vy = self.twist.linear.y
# find the angle between the velocities
#TODO fix this if no correction to heading
angle = math.atan2(-vx, -vy)
# print("%.3f" % angle)
color = (255, 2, 255)
center = (520, 270)
radius = 50
thickness = 1
vel_end = (center[0] + int(radius * math.cos(angle)), \
center[1] + int(radius * math.sin(angle)))
# Draw the heading
heading = "Heading"
font = cv2.FONT_HERSHEY_PLAIN
if not (vx == 0 and vy ==0):
cv2.line(cv_image, center, vel_end, color, thickness)
cv2.putText(cv_image, heading, (480, 210), font, 1.25, color)
cv2.circle(cv_image, center, radius, color, thickness)
def set_battery_font(self, medium, low):
"""
Sets the color of the battery information font based on the battery
level:
green > medium, yellow > low, red < low
"""
if self.battery > medium:
battery_font_color = (0, 255, 0)
elif self.battery > low:
battery_font_color = (0, 255, 255)
else:
battery_font_color = (0, 0, 255)
return battery_font_color
def main(args):
rospy.init_node('hud', anonymous=True)
top_left = (280, 157)
bottom_right = (360, 202)
hud = HUD(top_left, bottom_right)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
# 对数据集进行处理的公共类
class Dataset(object):
@staticmethod
def normalize_data(datas, idx, mus, stds):
''' 归一化方法:减去均值再除以标准差 '''
datas[:, idx:idx+1] = (datas[:, idx:idx+1] - mus[idx]) / stds[idx]
@staticmethod
def normalize_datas(datas, mus, stds):
''' 对开盘价、最高价、最低价、收盘价等进行归一化 '''
Dataset.normalize_data(datas,
Dataset.open_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.high_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.low_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.close_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.pre_close_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.amt_chg_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.pct_chg_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.vol_idx,
mus, stds
)
StockDailySvmModelEvaluator.normalize_data(datas,
StockDailySvmModelEvaluator.amount_idx,
mus, stds
) |
from typing import List
class Solution:
def __init__(self):
self.cache = {}
def cherryPickup(self, grid: List[List[int]]) -> int:
return max(0, self.F(grid, 0, 0, 0))
def F(self, grid, r1, c1, r2):
n = len(grid)
if (r1, c1, r2) not in self.cache:
ret = float("-inf")
c2 = r1 + c1 - r2
if 0 <= r1 < n and 0 <= c1 < n and 0 <= r2 < n and 0 <= c2 < n:
if grid[r1][c1] != -1 and grid[r2][c2] != -1:
ret = 0
ret += grid[r1][c1]
if r1 != r2:
ret += grid[r2][c2]
if r1 == n - 1 and c1 == n - 1:
pass
else:
ret += max(
self.F(grid, r1+1, c1, r2+1),
self.F(grid, r1+1, c1, r2),
self.F(grid, r1, c1+1, r2+1),
self.F(grid, r1, c1+1, r2),
)
self.cache[r1, c1, r2] = ret
return self.cache[r1, c1, r2]
if __name__ == "__main__":
assert Solution().cherryPickup(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 1]]
) == 5
assert Solution().cherryPickup(
[[1, 1, -1],
[1, -1, 1],
[-1, 1, 1]]
) == 0
|
# Generated by Django 2.2.4 on 2019-08-16 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='current_cash',
field=models.FloatField(default=5000.0),
),
]
|
from django import forms
import pandas as pd
import numpy as np
from multiselectfield import MultiSelectField
from .models import EnrollmentApplication,VizInfoModel
from django import forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
class VizInfoForm(forms.ModelForm):
class Meta:
model = VizInfoModel
fields = '__all__'
def __init__(self,choice,*args,**kwargs):
super(VizInfoForm, self).__init__(*args,**kwargs)
print("Choice")
print(choice)
self.fields['tog'].choices = choice
#self.fields['vis'].choices = choice
class CountryForm(forms.ModelForm):
class Meta:
model = EnrollmentApplication
fields = [
'reasons_for_childcare',
]
def __init__(self,choice,*args,**kwargs):
super(CountryForm, self).__init__(*args,**kwargs)
self.fields['reasons_for_childcare'].choices = choice
class CountryForm2(forms.Form):
OPTIONS = (
("abc","abc"),
("abc", "abc"),
)
CHILDCARE_REASONS1 = (('Working', 'working'), ('Training', 'training'), ('Teen Parent', 'teen_parent'),
('Working W/Child With A Disability', 'child_disability'),
('Adult W/Disability', 'adult_disability'))
CHILDCARE_REASONS = (('working', 'Working'),
('training', 'Training'),
('teen_parent', 'Teen Parent'),
('child_disability', 'Working W/Child With A Disability'),
('adult_disability', 'Adult W/Disability'))
df = pd.DataFrame({'a': ['AUT', 'DEU', 'NLD', 'IND', 'JPN', 'CHN'],
'b': ['Austria', 'Germany', 'Netherland', 'India', 'Japan', 'China']})
lstOptions = str(df.values.tolist())
strOptions = (str(lstOptions).replace('[', '(')).replace(']', ')')
print("options")
print(OPTIONS)
print("strOptions")
print(strOptions)
Countries = MultiSelectField(choices=CHILDCARE_REASONS)
class CountryFormold(forms.Form):
def __init__(self, *args, **kwargs):
super(CountryForm, self).__init__(*args, **kwargs)
self.fields['my_choice_field'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=get_my_choices() )
class CountryFormold1(forms.Form):
def __init__(self, *args, **kwargs):
dict = get_my_choices()
print (dict)
super(CountryForm, self).__init__(*args, **kwargs)
OPTIONS = (
(dict.keys, dict.values),
("abc","abc"),
)
#print("options")
#print(OPTIONS)
Countries = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=OPTIONS)
class CountryFormold(forms.Form):
def __init__(self, *args, **kwargs):
super(CountryForm, self).__init__(*args, **kwargs)
self.fields['my_choice_field'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=get_my_choices() )
#
# def get_my_choices():
# df = pd.DataFrame({'a': ['AUT', 'DEU', 'NLD', 'IND', 'JPN', 'CHN'],
# 'b': ['Austria', 'Germany', 'Netherland', 'India', 'Japan', 'China']})
# lstOptions = str(df.values.tolist())
# strOptions = (str(lstOptions).replace('[', '(')).replace(']]', '),)').replace('],', '),')
# #print("df")
# #print(df)
# #return df
class CountryForm1(forms.Form):
OPTIONS = (
("AUT", "Austria"),
("DEU", "Germany"),
("NLD", "Neitherlands"),
)
Countries = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=OPTIONS)
def get_my_choices():
choices_list=["abc"]
return choices_list
class CountryFormold(forms.Form):
def __init__(self, *args, **kwargs):
super(CountryForm, self).__init__(*args, **kwargs)
self.fields['my_choice_field'] = MultiSelectField(choices=get_my_choices())
|
'''
The purpose of this function is to simulate and assess the perormance of a 4 stock portfolio
Inputs:
- Start Date
- End Date
- Symbols for the equities (eg. GOOG, AAPL, GLD, XOM)
- Allocations to the equities at the beginning of the simulation (e.g. 0.2, 0.3, 0.4, 0.1)
Outputs:
- Standard deviation of daily returns of the total portfolio
- Average daily return of the total portfolio
- Sharpe ratio (252 trading days in a year at risk free rate = 0) of the total portfolio
- Cumulative return of the total portfolio
Example execution:
vol, daily_ret, sharpe, cum_ret = simulate(startdate, enddate, ['GOOG', 'AAPL', 'GLD', 'XOM'), [0.2, 0.3. 0.4, 0.1])
'''
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
ls_symbols = ["GOOG", "APPL", "GLD", "XOM"] # Equities being passed as paramenters
dt_start = dt.datetime(2006, 1, 1) # Start date specification
dt_end = dt.datetime(2010, 12, 31) # End date specification
dt_timeofday = dt.timedelta(hours=16) #This gives us the data that was available at the close of the day
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday) #list of timestamps that represent NYSE closing times between teh start and end dates
c_dataobj = da.DataAccess('Yahoo') #Create object that reads from Yahoo datasource
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close'] #The data types that i would like to read
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys) # Data frome with all the different data
d_data = dict(zip(ls_keys, ldf_data)) # Convert dataframe to dictionary so it can be accessed easily
na_price = d_data["close"].values #List of closing prices put into 2D numpy array
plt.clf() # Clearsprevious graphs that may have been drawn on matplotlib
plt.plot(ldt_timestamps, na_price) #Plot the data in na_price
plt.legend(ls_symbols) #This line and below are just legends and colors
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('adjustedclose.pdf', format='pdf')
na_normalized_price = na_price[0, :]/na_price[0, :] #Normalize the data with respect to teh first day's price
for n in na_normalized_price:
print n
#daily returns for day t:
#(ret(t) = price(t-1))-1
na_rets = na_normalized_price.copy()
returns = tsu.returnize0(na_rets)
print returns
|
"""The base Controller API
Provides the BaseController class for subclassing.
"""
import logging
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
from paste.deploy.converters import aslist
from ppdi.model.meta import Session
from ppdi.model import ModeratorPin
from pylons import session, url, config
from pylons.controllers.util import redirect, abort
log = logging.getLogger(__name__)
class BaseController(WSGIController):
requires_auth = False
def __before__(self):
if self.requires_auth and not session.get('logged_in'):
if session.get('after_login') is None:
session.clear()
if url.current() != url(controller='auth/login', action='index'):
session['after_login'] = url.current()
else:
session['after_login'] = url('/')
session.save()
redirect(url(controller='auth/login', action='index'))
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
try:
return WSGIController.__call__(self, environ, start_response)
finally:
Session.remove()
def is_my_pin(self, pin):
"""Checks if this is my pin, if not, raise a 403"""
if self.checkAdmin(asbool=True):
return
obj = Session.query(ModeratorPin).get(pin)
if obj is not None and session.get('username') != obj.username:
abort(403, 'You are not allowed to view this page.')
def checkAdmin(self, asbool=False):
"""Check if user is really an admin"""
if (session.get('username')+'@'+session.get('domain')) in aslist(config.get('adminList'), ','):
if (asbool):
return True
return
elif (asbool):
return False
log.warning('User %s tried to acess the admin site without authorization' % session.get('username'))
abort(403)
|
import logging
from DMS.ipredictor.models import ANN, ANNI
from DMS.ipredictor import tools
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
data = tools.data_reader('WTI.xlsx', intervals=True, resample=False)
# training set, validation set, testing set partition(6/2/2)
train, valid, test = data[:-120], data[-120:-60], data[-60:]
X = data['values'].values.tolist()
mixed = []
for i in range(len(X)):
mixed.append(X[i][0])
mixed.append(X[i][1])
X = np.array(mixed)
temp = np.array(X).reshape((len(X), 1))
# Standardization
scaler = StandardScaler()
_ = scaler.fit(temp[:2 * len(train)])
X = scaler.transform(temp)
# Standardized training set, validation set and testing set with data type dataframe
norm_X = []
for i in range(0, len(X), 2):
norm_X.append(X[i:i + 2, 0])
norm_X = pd.DataFrame.from_dict({'values': norm_X})
norm_X = norm_X.set_index(data.index)
norm_train, norm_valid, norm_test = norm_X[:len(train)], norm_X[len(train):len(train) + len(valid)], \
norm_X[-len(test):]
lb = [6, 12, 24] # Hyper-parameter "lookback" range setting
h_n = [10, 20, 30, 40, 50] # Hyper-parameter "hidden_neurons" range setting
seed = [7, 12, 5, 20, 28] # Seed setting, used for experiment replication
result = np.zeros((len(h_n), (len(lb)*len(seed))))
for l in range(len(seed)):
np.random.seed(seed[l])
for j in range(len(h_n)):
for k in range((len(lb))):
lookback = lb[k]
model = ANNI(norm_train, lookback=lookback, hidden_neurons=h_n[j])
prediction1 = model.predict(steps=1) # Training
testX = norm_train[-lookback:]
testX = testX.append(norm_valid)
# Transform dataframe to array
testX = testX['values'].values.tolist()
mixed = []
for i in range(len(testX)):
mixed.append(testX[i][0])
mixed.append(testX[i][1])
testX = np.array(mixed)
testX = np.array(testX).reshape((len(testX), 1))
testingX = []
for i in range(0, len(testX) - lookback * 2, 2):
shift = i + lookback * 2
testingX.append(testX[i:shift, 0])
testingX = np.array(testingX)
# Input testingX must be an array
prediction = model.model.predict(testingX) # Testing
prediction = scaler.inverse_transform(prediction)
prediction_df = valid.copy(deep=True)
for i in range(len(prediction_df)):
prediction_df.iloc[i].values = np.array([[prediction[i][0]], [prediction[i][1]]])
result[j][l*len(lb)+k] = ANNI.mape(valid, prediction_df)
print(result)
data = pd.DataFrame(result)
writer = pd.ExcelWriter('ANNhpRaw.xlsx')
data.to_excel(writer, 'page_1', float_format='%.5f')
writer.save()
writer.close |
# fast IO
import sys
input = sys.stdin.readline
def print(x, end='\n'):
sys.stdout.write(str(x) + end)
# IO helpers
def get_int():
return int(input())
def get_list_ints():
return list(map(int, input().split()))
def get_char_list():
s = input()
return list(s[:len(s) - 1])
def get_tuple_ints():
return tuple(map(int, input().split()))
def print_iterable(p):
print(" ".join(map(str, p)))
def main():
# code goes here
# example:
a, b = get_tuple_ints()
l = [a + b, a - b]
print(a + b)
print_iterable(l)
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/python
import logging
import time
from kafka.client import KafkaClient, FetchRequest, ProduceRequest, OffsetRequest
DEBUG = True
def debug(var):
if DEBUG is True:
print(var)
class KfkClient(object):
def __init__(self, ip):
self.client = KafkaClient(ip, 9092)
self.fd = None
self.topic = None
self.partition = None
self.offset = None
def send(self, topic, partition, data):
message = self.client.create_message(data)
request = ProduceRequest(topic, partition, [message])
self.client.send_message_set(request)
def _check_offset(self, topic, partition):
if (self.topic != topic or self.partition != partition):
self.topic = topic
self.partition = partition
self._get_new_offset()
def receive(self, topic, partition):
self._check_offset(topic, partition)
while True:
request = FetchRequest(topic, partition, self.offset, 2048)
debug(request)
try:
(messages, nextRequest) = self.client.get_message_set(request)
except e:
self._check_offset(topic, partition)
continue
if len(messages) > 0:
self.offset = nextRequest.offset
self._write_offset()
return messages
else:
time.sleep(1)
def get_line(self, topic, partition):
while True:
messages = self.receive(topic, partition)
for message in messages:
yield message.payload
def close(self):
if self.fd is not None:
self.fd.close()
self.client.close()
def _get_new_offset(self):
file_name = "%s-%s.offset" % (self.topic, self.partition)
if self.fd is not None:
self.fd.close()
try:
self.fd = open(file_name, 'r+')
file_offset = self.fd.readline()
except IOError:
self.fd = open(file_name, 'w+')
file_offset = -1
self.fd.seek(0,0)
self.fd.truncate()
try:
file_offset = int(file_offset)
except:
file_offset = 0
minoffsetreq = OffsetRequest(self.topic, self.partition, -2, 1)
results = self.client.get_offsets(minoffsetreq)
minoffset = results[0]
maxoffsetreq = OffsetRequest(self.topic, self.partition, -1, 1)
results = self.client.get_offsets(maxoffsetreq)
maxoffset = results[0]
if file_offset == -1:
self.offset = minoffset
elif file_offset >= minoffset and file_offset <= maxoffset:
self.offset = file_offset
else:
self.offset = maxoffset
debug ("file%d min%d max%d using%d" % (file_offset, minoffset, maxoffset, self.offset))
self._write_offset()
def _write_offset(self):
self.fd.seek(0,0)
self.fd.write("%d" % self.offset)
def main():
DEBUG = True
client = KfkClient("10.110.0.40")
#client.send("module_log", 0, "big dog")
for message in client.get_line("va_result", 0):
print message
client.close()
if __name__ == '__main__':
main()
|
import pickle
def dump(obj, file_name, *args, **kwargs):
"""Writes the pickled representation of obj to a file."""
with open(file_name, "wb") as fp:
pickle.dump(obj, fp, *args, **kwargs)
def dumps(obj, *args, **kwargs):
"""Alias of pickle.dumps"""
return pickle.dumps(obj, *args, **kwargs)
def load(file_name, *args, **kwargs):
"""Loads data from file."""
with open(file_name, 'rb') as fp:
obj = pickle.load(fp, *args, **kwargs)
return obj
def loads(bytes_object, *args, **kwargs):
"""Alias of pickle.loads"""
return pickle.loads(bytes_object, *args, **kwargs)
class Pickable(object):
def dump(self, file_name, *args, **kwargs):
"""Writes the pickled representation of self to a file."""
dump(self, file_name, *args, **kwargs)
def dumps(self, *args, **kwargs):
"""Returns the pickled representation of self."""
return dumps(self)
@staticmethod
def load(file_name, *args, **kwargs):
"""Returns a Pickable object loaded from a file."""
return load(file_name)
@staticmethod
def loads(bytes_object, *args, **kwargs):
"""Returns a Pickable object loaded from bytes."""
return loads(bytes_object)
# Tests
import unittest
from os import remove
from os.path import isfile
class TestPickable(unittest.TestCase):
def setUp(self):
# Before each test
self.pickable = Pickable()
def tearDown(self):
# After each test
pass
def test_isinstance(self):
self.assertTrue(isinstance(self.pickable, Pickable))
def test_dump_load(self):
self.pickable.dump('test.pickle')
pickled = Pickable.load('test.pickle')
self.assertTrue(isfile('test.pickle'))
self.assertTrue(isinstance(pickled, Pickable))
remove('test.pickle')
def test_dumps_loads(self):
pickled = Pickable.loads(self.pickable.dumps())
self.assertTrue(isinstance(pickled, Pickable))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 8 17:54:44 2017
@author: Mebius
"""
def powerset_recursion_comp(l):
# Base case: the empty set
if not l:
return [[]]
# The recursive relation
# Do a powerset call for l[1:]
# Add lists of all combinations of the 1st element (l[0]) with other elements' powersets
return powerset_recursion_comp(l[1:]) + [[l[0]] + x for x in powerset_recursion_comp(l[1:])]
def powerset_recursion(l):
if not l:
return [[]]
subset = []
for x in powerset_recursion(l[1:]):
subset.append([l[0]] + x)
return powerset_recursion(l[1:]) + subset
def main():
num_items = 4
my_list = list(range(num_items))
print(my_list)
my_pset = powerset_recursion(my_list)
print("using recursion:\n", my_pset)
my_pset = powerset_recursion_comp(my_list)
print("using recursion:\n", my_pset)
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-03-07 20:53:18
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class Solution(object):
def exist(self, board, word):
if not board or not word:
return False
visit = [[0 for _ in range(len(board[0]))] for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == word[0]:
visit[i][j] = 1
if self.searchDFS(board,i,j,word[1:],visit):
return True
visit[i][j] = 0
return False
def searchDFS(self,board,i,j,word,visit):
# print(board[i][j],i,j,word)
if not word:
return True
def neighbour(i,j):
for new_i,new_j in ((i-1,j),(i,j+1),(i+1,j),(i,j-1)):
if new_i >= 0 and new_i < len(board) and new_j >=0 \
and new_j < len(board[0]):
yield new_i,new_j
for new_i,new_j in neighbour(i,j):
if visit[new_i][new_j] == 0 and board[new_i][new_j] == word[0]:
visit[new_i][new_j] = 1
if self.searchDFS(board,new_i,new_j,word[1:],visit):
return True
visit[new_i][new_j] = 0
return False
s = Solution()
board = [
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
print(s.exist(board,"ABCCED"))
|
import os
import re
try: # use C-compiled module for python 2.7 (3.3 will do that by default)
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
__AUTHOR__='Lifecell OSS group'
__COPYRIGHT__='Lifecell UA Company, 2018 Kiev, Ukraine'
__version__ = '1.2'
__license__ = "GPL"
__email__ = "oss_xxxx@lifexxxm.ua"
__status__ = "Production"
def savetoFILE(out_file, header, list_my):
with open(out_file, 'w+') as f:
f.write(header)
f.write("\n")
for each in list_my:
f.write("%s" % each)
f.write("\n")
def parseXML(xmlfile, target_parameters):
""" XML parser function """
with open(xmlfile, 'rt') as f: ## open xml file for parsing
try:
tree = ET.parse(f)
root = tree.getroot()
except:
print('It is unknown exception raised during xml parsing by ET module. The failed xml file: ', xmlfile)
list_my = []
for child_of_root in root:
for level1 in child_of_root:
for level2 in level1:
for level3 in level2:
for level4 in level3:
for level5 in level4:
for level6 in level5:
for level7 in level6:
for level8 in level7:
cell_name = None
physicalLayerSubCellId = None
crsGain = None
tac = None
mobCtrlAtPoorCovActive = None
physicalLayerCellIdGroup = None
rachRootSequence = None
cellId = None
earfcndl = None
pci = None
for level9 in level8:
key, value = level9.tag.replace('{EricssonSpecificAttributes.17.28.xsd}', ''), level9.text
if key in target_parameters:
if key == 'physicalLayerSubCellId':
physicalLayerSubCellId = value
elif key == 'crsGain':
crsGain = value
elif key == 'tac':
tac = value
elif key == 'mobCtrlAtPoorCovActive':
mobCtrlAtPoorCovActive = value
elif key == 'sectorCarrierRef':
try:
matched = re.search(r'.+?vsDataSectorCarrier=([ERBS_]*?\w{2}\d{4}L\d{2})', value)
except TypeError as e:
print('TypeError occurs2', e)
print('Exception2!')
if matched:
cell_name = matched.group(1)
cell_name = cell_name.replace('ERBS_', '').strip() # in case if we have to delete ERBS_ from the cell name
elif key == 'physicalLayerCellIdGroup':
physicalLayerCellIdGroup = value
elif key == 'rachRootSequence':
rachRootSequence = value
elif key == 'cellId':
cellId = value
elif key == 'earfcndl':
earfcndl = value
try:
pci = int(physicalLayerCellIdGroup)*3 + int(physicalLayerSubCellId)
except ValueError as e:
print('Error in convertation: ', str(e))
print('list_my2: ', list_my)
print('len2: ', len(list_my))
if (cell_name is not None) and (physicalLayerSubCellId is not None) and (crsGain is not None ) \
and (tac is not None) and (mobCtrlAtPoorCovActive is not None) and (physicalLayerCellIdGroup is not None) \
and (rachRootSequence is not None) and (cellId is not None) and (earfcndl is not None) and (pci is not None):
whole_line = cell_name + ';' + tac + ';' + cellId + ';' + earfcndl + ';' + physicalLayerCellIdGroup + ';' \
+ physicalLayerSubCellId + ';' + str(pci) + ';' + rachRootSequence + ';' + crsGain + ';' + mobCtrlAtPoorCovActive
list_my.append(whole_line)
print('whole_line: ', whole_line)
#print('list_my3: ', list_my)
return list_my
def main():
in_file = 'vsDataEUtranCellFDD.xml'
out_file = 'new_cm_exp4.csv'
abs_out_file = os.getcwd() + os.sep + 'out' + os.sep + out_file
if os.name == 'posix':
abs_in_file = '/opt/optima/Interfaces/Configuration/ftp/in/' + in_file
elif os.name == 'nt':
abs_in_file = os.getcwd() + os.sep + 'in' + os.sep + in_file
#below which exactly parameters must be found and parsed from XML file
target_parameters = ['physicalLayerSubCellId', 'crsGain', 'tac', 'mobCtrlAtPoorCovActive', 'sectorCarrierRef', 'physicalLayerCellIdGroup', 'rachRootSequence', 'cellId', 'earfcndl']
header = 'Name;TAC;CellId;earfcn;pci1;pci2;pci;prach;power;mobCtrlAtPoorCovActive' #csv file header for sqlloader
#sectorCarrierRef 260 mobCtrlAtPoorCovActive 244 physicalLayerCellIdGroup 276 rachRootSequence 278
try:
list_my = parseXML(abs_in_file, target_parameters)
except TypeError as e:
print('TypeError occurs', e)
print('Exception!')
savetoFILE(abs_out_file, header, list_my)
if __name__ == "__main__":
main()
|
# coding: utf-8
import json
import urllib
from helper import http
from helper import utils
PLATFORM_NAME = 'lewan'
APP_ID = '11111'
PAY_KEY = 'xxxxxxxxxxxxxxxxxxxxxx'
GET_USERINFO_URL = 'http://www.lewanduo.com/mobile/user/verifyToken.html'
PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC+GY2/8wJuINxzJo9uWoMRUDcx
ONuK/48Fikze8EFpKWLLr6mBpqeoDVvZQoqGhGKn5wdtHujiCUYSn6pcWKY2Fz2R
xw6/1uA1gzKcLE36KLUkqvFbA3gItSiO3ADNCwJ1ochhdfcEnH2dtbiv5+f7m+xv
5B1aEP142v2CtYKFFQIDAQAB
-----END PUBLIC KEY-----"""
PRIVATE_KEY = """-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAIX/cV2vLpxqBEFm
uPpH+c+kLUCYihy2rWbKFSi4RmsRp9adEevTju7EeGWLLLPibz3RAnwCBmeMTAIl
A/ltfIDdNMYN4dZEuZ+m+AlprXeS53hP7f8ie9iA//r4aOzhtbwU+wecYuw++JBV
85eUNbVrkALBnkDazjlWnv0A+EfFAgMBAAECgYBHUGbGRFibODUhlYj28t1569eF
nGlM1NA+d2iBbmlTzGa16oxCJSrZ2kh1Sne1GNq5XIZk9zLvYxSEw6x00BdFNSTL
ufvMhhCGvdhevdhC828UZ7vgehArZv78FSj0cSERoj5IfcCXfPlsMlj0agKKLeq5
xMHsSZEGdBkKA4e4IQJBAPSVsB5Zt4iiAx5Qun2QwtdYc0aO4sxk3cchI6H9RJqX
59Cm2BpN68tDhssODoc53u2/cjuc38W4H9lbC2jOq0kCQQCMQHTWztG8QNq8FOv7
bDItUNqbfqeuH847WLkVGsjX33VewnOEZLdO4J5xpacXmsT7p2QwOMGytAbh43aM
U1ydAkAwmSWbgjwjm/1+oo/Lr13nqB2PoYiTEF+4127bGxXsmc5n+R7raxw1ET/R
TQO5/te66dVq3urfwIwjhiGoO5hxAkBvkIZgqTwlZ+GXY30kDrkLWxnKP0HbPOms
Q7NWmmvRbKvMqRmC4yr9z6e592+nUzIGjO0hfsR2BsbCwVH35gfxAkEAy3oNygRr
Y85yheqg45lJ5gYjB9cpq8qwluCbJepLUmqOWSYovX//JmK/W/sSDiWdqHN37d0J
frObNh0xxEMB5g==
-----END PRIVATE KEY-----"""
# 返回数据0是成功的数据,1是失败的数据
RETURN_DATA = {
0: 'success',
1: 'failure',
}
def login_verify(req, params=None):
"""登录验证
Args:
req: request封装,以下是验证所需参数
session_id: session_id
params: 测试专用
Returns:
平台相关信息(openid必须有)
"""
if not params:
params = {
'session_id': req.get_argument('session_id', ''),
'user_id': req.get_argument('user_id', ''),
'nickname': req.get_argument('nickname', ''),
'channelId': req.get_argument('new_channel', ''),
}
token = params['session_id']
code = params['user_id']
password = params['nickname']
channelId = params['channelId']
query_data = urllib.quote(json.dumps({
'code': code,
'password': password,
'token': token,
'channelId': channelId,
'app_id': APP_ID,
}))
url = '%s?notifyData=%s' % (GET_USERINFO_URL, query_data)
try:
# 对方服务器不稳定
http_code, content = http.get(url)
except:
return None
#print http_code, content
if http_code != 200:
return None
obj = json.loads(content)
# userId, code, app_id, success, msg
if obj['success'] not in {'true', True}:
return None
#openid = obj['code']
return {
'openid': obj['userId'], # 平台用户ID
'openname': obj['code'], # 平台用户名字
}
def payment_verify(req, params=None):
"""支付验证
Args:
req: request封装,以下是验证所需参数
encryptkey: 使用 RSA 加密商户AESKey后生成的密钥密文
data: 对含有签名的基本业务数据JSON串加密后形成的密文
gameId: 游戏ID
gameOrderId: 订单号
gameUserId: 用户标识
payState: 支付状态
errorCode: 错误码
errorMsg: 错误信息
expandMsg: 扩展信息
paySuccessMoney: 支付金额
lewanOrderId: 乐玩系统里的定单ID
serverId: 服务的服ID
balanceAmt: 支付余额
sign: 签名
params: 测试专用
"""
if not params:
params = {
'encryptkey': req.get_argument('encryptkey', ''),
'data': req.get_argument('data', ''),
}
encryptkey = params['encryptkey']
data = params['data']
if not encryptkey or not data:
return RETURN_DATA, None
aes_key = utils.rsa_private_decrypt(PRIVATE_KEY, encryptkey)
aes_data = utils.aes_decrypt(aes_key, data)
aes_dict = json.loads(aes_data)
sign = aes_dict.pop('sign')
result = sorted(aes_dict.iteritems())
result = ('%s' % v for k, v in result)
result_str = ''.join(result)
if isinstance(result_str, unicode):
result_str = result_str.encode('utf-8')
if not utils.rsa_verify_signature(PUBLIC_KEY, result_str, sign):
return RETURN_DATA, None
if aes_dict.get('payState', -1) != 2:
return RETURN_DATA, None
pay_data = {
'app_order_id': aes_dict['gameOrderId'], # 自定义定单id
'order_id': aes_dict['lewanOrderId'], # 平台定单id
'order_money': float(aes_dict['paySuccessMoney']) / 100, # 平台实际支付money 单位元
'uin': '', # 平台用户id
'platform': PLATFORM_NAME, # 平台标识名
}
return RETURN_DATA, pay_data
if __name__ == '__main__':
encryptkey = 'l4vpFR0Xq9GJTJJvfgvbVNWh741UM8TFIyh8CWDJjBTktVd0AmE4BqpXI+s3xcwvZl+UNRO+gcTqUiWj8qOUBHNynASKrYiAladt9F22S51S3mTe5xTiW5MfAd0SlXjVq7cD8Zo2XlS6pLC6XOzG4a+9LPS7kHTxlVRsYtgOeLg='
data = 'lDAMn2UBHunBwjqLOrJ4TPFPavixMv4H+CoLsZh7JIz31KQOCiiS/Mgwm32rMROl0hCARHBKzjjafxuMADwxDP1maligHu/cXq3VKPywPQjWUDVavLU9ZsJcsfA+vBg8ypRf20lLircirE3LZO76dBO0hMzVICtZaXDM8Cgh95+plE/Jv+9YOjypD0SLK7D1XX1jFXuCSZkz/lkVTraD96cKyX9yHhZJlFsIJo4gOfCRREsUfej3NykICd6qTTBI36LSQax8PjimH+86hnaAvM4lv7E4QNt0D7Ir5NBp1Sq4t4eKt0FmidqarkEvGH2PwrPNfKeW8Slg4CX0jIidaIvgg3c6lEPOrqcykW4364VpRa/IIrYNu5ggvcsAEEus8yckExqGzJnNAPHOt3G6njsMSOanNnJJRcQMNywB3V/feMQ1FcBDEqMHrKgThi9wh7meu+Uq4xglq4gJV/gQB8dW5hbpry1ux8Z5B8UrPjs='
print payment_verify('', {'encryptkey': encryptkey, 'data': data})
|
#!/usr/bin/env python
PKG = 'seabotix'
import roslib; roslib.load_manifest(PKG)
import sys
import numpy as np
from math import *
import std_msgs.msg
import rospy
from resources import tools
from kraken_msgs.msg import thrusterData6Thruster
from kraken_msgs.msg import thrusterData4Thruster
from kraken_msgs.msg import imuData
from kraken_msgs.msg import absoluteRPY
from resources import topicHeader
# Importing messages generated by setyaw actionfiles to publish data on a topic
# for Visualization
from kraken_msgs.msg import setYawFeedback
from FuzzyControl.fuzzy import Fuzzy
from FuzzyControl import fuzzyParams as Fparam
if(len(sys.argv) < 2):
print "Enter yaw, to run this script."
exit()
# Global variables
yaw = 0.0
goal = float(sys.argv[1])
base_yaw = 0.0
FIRST_ITERATION = True
prevError = 0.0
def imuCB(dataIn):
"""
This is imu call back function.
1. Updates the Current value of yaw - Current_yaw
2. Calculates error and delta_error based on 4 quadrant tangent = arctan2()
3. Debug messages
"""
global base_yaw
global prevError
global yaw
global FIRST_ITERATION
Current_yaw = dataIn.yaw
if FIRST_ITERATION:
base_yaw = Current_yaw
FIRST_ITERATION = False
prevError = YAW.error
error = (base_yaw + goal - Current_yaw)* 3.14 / 180
YAW.error = np.arctan2(sin(error),cos(error))*180/3.14
YAW.delta_error = YAW.error - prevError
yawData.Desired_yaw = (goal + base_yaw)%360
yawData.Current_yaw = Current_yaw
yawData.Error = YAW.error
yawData.header = std_msgs.msg.Header()
yawData.header.stamp = rospy.Time.now()
# Note you need to call rospy.init_node() before this will work
# Debug messages
rospy.logdebug("--------")
rospy.logdebug("Current Yaw : %s",round(Current_yaw,3))
rospy.logdebug("Error : %s",round(YAW.error,3))
rospy.logdebug("Delta_error : %s",round(YAW.delta_error ,3))
rospy.logdebug("Goal : %s",goal)
rospy.logdebug("Thruster data L : %s",round(thruster6Data.data[4],3))
rospy.logdebug("Thruster data R : %s",round(thruster6Data.data[5],3))
if __name__ == '__main__':
"""
1. Declare YAW as a Fuzzy object and Declare it's membership function and it's Range.
2. Declares messages types for thruster4Data and thruster6Data
3. calculate the thrust from fuzzy control and send to the thruster converter.
"""
YAW = Fuzzy(Fparam.mf_types, Fparam.f_ssets)
YAW.io_ranges = Fparam.io_ranges
thruster4Data=thrusterData4Thruster();
thruster6Data=thrusterData6Thruster();
yawData = setYawFeedback();
rospy.init_node('main', log_level=(rospy.DEBUG if tools.getVerboseTag(sys.argv) else rospy.INFO))
rospy.Subscriber(topicHeader.ABSOLUTE_RPY, absoluteRPY, imuCB)
pub_thrusters4 = rospy.Publisher(topicHeader.CONTROL_PID_THRUSTER4, thrusterData4Thruster, queue_size = 2)
pub_thrusters6 = rospy.Publisher(topicHeader.CONTROL_PID_THRUSTER6, thrusterData6Thruster, queue_size = 2)
pub_FuzzyPlot = rospy.Publisher('FuzzyPlot',setYawFeedback, queue_size = 2)
r = rospy.Rate(10)
while not rospy.is_shutdown():
thrust = YAW.run()
thruster6Data.data[0] = 0.0
thruster6Data.data[1] = 0.0
thruster6Data.data[2] = 0.0
thruster6Data.data[3] = 0.0
thruster6Data.data[4] = thrust # Left Thruster
thruster6Data.data[5] = -1 * thrust # Rigt Thruster
thruster4Data.data[0] = thruster6Data.data[0]
thruster4Data.data[1] = thruster6Data.data[1]
thruster4Data.data[2] = thruster6Data.data[4]
thruster4Data.data[3] = thruster6Data.data[5]
pub_thrusters4.publish(thruster4Data)
pub_thrusters6.publish(thruster6Data)
pub_FuzzyPlot.publish(yawData)
r.sleep()
|
'''
HACKTASK 2
# I want to write a short Python script that uses regular expressions to extract information from the full transcript of the Ninth Democratic debate in Las Angeles, from February 19, 2020.
# First, turn this script from https://www.nbcnews.com/politics/2020-election/full-transcript-ninth-democratic-debate-las-vegas-n1139546 into a text file
# Find the number of instances of crosstalk for each person, displayed on csv file.
'''
import re
# load in text (should this be loaded in at end?)
with open('debate.txt', 'r') as rf:
text = rf.read() # set variable 'text' for debate.txt content
# Design a regular expression that matches names
'''
([A-Z]+?): # use regex to match names: capital letters followed by a :
'''
name_regex = re.compile(r'^[A-Z]+?:', re.M) # multi-line regex
snippets = name_regex.split(text) # take txt and return a list in which every other item is a candidate's dialogue and the other is the names # split: first item will be blank. Second item will be Holt's dialogue.
# make list that has all 11 names
name_list = ["HOLT", "TODD", "JACKSON", "HAUC", "RALSTON", "SANDERS", "KLOBUCHAR", "WARREN", "BUTTIGEG", "BIDEN", "BLOOMBERG"]
name_list = []
name_list = name_regex #???
# for loop to make machine compile everything each candidate says into one string so machine will know where crosstalk is relative to candidate
for name_list in snippets:
if name_list
# I don't know how to do this. What exactly does this give me? How do I bring (CROSSTALK) into for loop?
# Make a crosstalk counter for loop
crosstalk_count = 0 # variable to track how often (CROSSTALK) occurs
for page in snippets: # is page correct? Should I be using snippets?
if
'''
someCtr = 0
someString.find('CROSSTALK'):
someCtr = someCtr + 1
'''
# example of finding a word and then counting it into indexCount
string1 = "help me"
string2 = "help"
string3 = string1.split()
indexCount = 0
for word in string2:
if word == string3:
print("Your word was found at index point", int(indexCount)))
else:
print("Your word was not found at", int(indexCount))
indexCount += 1 # adds 1 to a count everytime loop starts again
# another old example using frequency
MCC = [] # Most common character variable
MCCFreq = 0 # Variable to track how often chracter occurs
for char in contents: # For loop to iterate through all chars in monkey.txt, one at a time.
if char.isalnum(): # If the character currently being analyzed by computer is alphanumeric...
freq = contents.count(char) #...then count this character's frequency and label the resulting number as variable 'freq'.
if freq > MCCFreq: # If this freq is greater than current number assigned to MCCFreq...
MCCFreq = freq #...then assign freq as value for MCCFreq.
elif freq == MCCFreq: # If one particular character has the same highest frequency as another character...
if char not in MCC: # ...then, if this character is not already in the MCC list...
MCC.append(char) # append MCC list by adding this character alongside the other highest frequency character.
# should now have TWO LISTS: list of 11 items, and then another list of 11 items that will display a number (representing amount of times crosstalk appears in script)
# must zip/combine the two lists.
name_list = []
crosstalk_count = []
# list of tuples from name_list and crosstalk_count # tuples??
list(zip(name_list, crosstalk_count))
# or should I use map lambda? Is map lambda only for when lengths of original lists do not match?
combined_list = map(lambda x,y: (x,y), name_list, crosstalk_count)
# Third, create a dictionary with people's names +- one everytime you see (CROSSTALK) in order to find instances of (CROSSTALK) per person.
name_to_crosstalk = {[combined_list]} # i dunno
# Fourth, put all info into csv, f string, write results into a file
with open("candidatecrosstalk.csv", 'w') as wf:
# Write a header:
wf.write("Title, HOLT, TODD, JACKSON, HAUC, RALSTON, SANDERS, KLOBUCHAR, WARREN, BUTTIGEG, BIDEN, BLOOMBERG\n")
# write the data
wf.write("frequency of crosstalk") # unsure how to do use zipped file, will work on later |
from search.model.impl.local.HillClimbingSearch import HillClimbing
from search.model.impl.local.HillClimbingSearch import StochasticHillClimber
from problems.model.impl.EightQueensProblem import EigthQueensProblem
from problems.model.impl.EightQueensProblem import EightQueensHeuristic
p = EigthQueensProblem()
h = EightQueensHeuristic()
# s = HillClimbing(p, h, 100)
# sol = s.solve()
# print(sol, print(p.goal_test(sol)))
p = EigthQueensProblem(10)
s = StochasticHillClimber(p, h, 100000000, 2000)
sol = s.solve()
print(sol, "\n", p.goal_test(sol), "\n")
p.print_board(sol)
|
from pointTransform import *
import subprocess
import glob
#files = glob.glob('image_*.png')
groundtruth=50
#files=glob.glob('images/smaller/image*.png')
#groundtruth = 36
#files = glob.glob('images/larger/image*.png')
groundtruth=36
#files=glob.glob('images/newfullres/fullres*.png')
# Files were actually mislabelled. The door files are actually wall, i.e. top
files_top =glob.glob('images/frommemory/door*.png')
files_middle = glob.glob('images/frommemory/center*.png')
files_bottom = glob.glob('images/frommemory/wall*.png')
allfiles = zip(files_top, files_middle, files_bottom)
total_detected = 0
for top, middle, bottom in allfiles:
p = subprocess.Popen(['./coordinates', top], stdout=subprocess.PIPE)
txt = p.communicate()[0]
detected_top = None
for line in txt.split('\n'):
if line.strip() == '': continue
parts = line.strip().split()
if int(parts[2]) == groundtruth:
print 'Top: ' + line
detected_top = line
break
p = subprocess.Popen(['./coordinates', middle], stdout=subprocess.PIPE)
txt = p.communicate()[0]
detected_middle = None
for line in txt.split('\n'):
if line.strip() == '': continue
parts = line.strip().split()
if int(parts[2]) == groundtruth:
print 'Middle: ' + line
detected_middle = line
break
p = subprocess.Popen(['./coordinates', bottom], stdout=subprocess.PIPE)
txt = p.communicate()[0]
detected_bottom = None
for line in txt.split('\n'):
if line.strip() == '': continue
parts = line.strip().split()
if int(parts[2]) == groundtruth:
print 'Bottom: ' + line
detected_bottom = line
break
if detected_top is None and detected_middle is None and detected_bottom is None:
detected = False
else:
detected = True
total_detected += 1
if detected_middle:
coords = inMiddle(readFile(middle))
elif detected_top:
coords = inTop(readFile(top))
elif detected_bottom:
coords = inBottom(readFile(bottom))
print bottom, detected, coords
exit(1)
print 'Detected {}/{}'.format(total_detected, len(allfiles))
|
import datetime
from QA.AppDatabaseTester import AppDatabaseTester
class AppMergeTest(AppDatabaseTester):
def __init__(self, config):
end_date = datetime.datetime.strptime(config['DATES']['END_DATE'], '%Y%m%d')
super().__init__(config, 'DATABASE', datetime.date(year=1976, month=1, day=1), end_date)
self.table_config = {"application": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"type": {"null_allowed": True, "data_type": "varchar"},
"application_number": {"null_allowed": True, "data_type": "varchar"},
"date": {"null_allowed": True, "data_type": "date"},
"country": {"null_allowed": True, "data_type": "varchar"},
"kind": {"null_allowed": True, "data_type": "varchar"},
"series_code": {"null_allowed": True, "data_type": "int"},
"invention_title": {"null_allowed": True, "data_type": "mediumtext"},
"invention_abstract": {"null_allowed": True, "data_type": "mediumtext"},
"rule_47_flag": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"botanic": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"latin_name": {"null_allowed": True, "data_type": "varchar"},
"variety": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"brf_sum_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"claim": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"sequence": {"null_allowed": False, "data_type": "int"},
"dependent": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"},
"num": {"null_allowed": True, "data_type": "varchar"}},
"cpa": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"data": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"},
"num": {"null_allowed": True, "data_type": "varchar"}},
"cpc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"},
"num": {"null_allowed": True, "data_type": "varchar"}},
"detail_desc_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"length": {"null_allowed": False, "data_type": "bigint"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"draw_desc_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"foreign_priority": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"country": {"null_allowed": True, "data_type": "varchar"},
"date": {"null_allowed": True, "data_type": "date"},
"foreign_doc_number": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"further_cpc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"ipcr": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"class_level": {"null_allowed": True, "data_type": "varchar"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"class_value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"class_status": {"null_allowed": True, "data_type": "varchar"},
"class_data_source": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"lawyer": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"organization": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"main_cpc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"pct_data": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"pct_doc_number": {"null_allowed": True, "data_type": "varchar"},
"country": {"null_allowed": True, "data_type": "varchar"},
"date": {"null_allowed": True, "data_type": "date"},
"us_371c124_date": {"null_allowed": True, "data_type": "date"},
"us_371c12_date": {"null_allowed": True, "data_type": "date"},
"kind": {"null_allowed": True, "data_type": "varchar"},
"doc_type": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"publication": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"date": {"null_allowed": True, "data_type": "date"},
"country": {"null_allowed": True, "data_type": "varchar"},
"kind": {"null_allowed": True, "data_type": "varchar"},
"filing_type": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawassignee": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"organization": {"null_allowed": False, "data_type": "varchar"},
"type": {"null_allowed": False, "data_type": "int"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawinventor" : {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"designation": {"null_allowed": False, "data_type": "varchar"},
"deceased": {"null_allowed": False, "data_type": "varchar"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawlocation": {"id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"latitude": {"null_allowed": True, "data_type": "float"},
"longitude": {"null_allowed": True, "data_type": "float"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawuspc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"classification": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rel_app_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"us_parties": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"uspc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"mainclass_id": {"null_allowed": False, "data_type": "varchar"},
"subclass_id": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"usreldoc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"related_doc_number": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"doc_type": {"null_allowed": False, "data_type": "varchar"},
"date": {"null_allowed": False, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}}
}
self.count_data = []
self.floating_entities = []
self.floating_patent = []
def test_yearly_count(self):
pass
class AppUploadTest(AppDatabaseTester):
def __init__(self, config):
start_date = datetime.datetime.strptime(config['DATES']['END_DATE'], '%Y%m%d')
end_date = datetime.datetime.strptime(config['DATES']['END_DATE'], '%Y%m%d')
super().__init__(config, 'TEMP_UPLOAD_DB', start_date, end_date)
self.table_config = {"application": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"type": {"null_allowed": True, "data_type": "varchar"},
"application_number": {"null_allowed": True, "data_type": "varchar"},
"date": {"null_allowed": True, "data_type": "date"},
"country": {"null_allowed": True, "data_type": "varchar"},
"kind": {"null_allowed": True, "data_type": "varchar"},
"series_code": {"null_allowed": True, "data_type": "int"},
"invention_title": {"null_allowed": True, "data_type": "mediumtext"},
"invention_abstract": {"null_allowed": True, "data_type": "mediumtext"},
"rule_47_flag": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"botanic": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"latin_name": {"null_allowed": True, "data_type": "varchar"},
"variety": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"brf_sum_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"claim": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"sequence": {"null_allowed": False, "data_type": "int"},
"dependent": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"},
"num": {"null_allowed": True, "data_type": "varchar"}},
"cpa": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"data": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"},
"num": {"null_allowed": True, "data_type": "varchar"}},
"cpc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"},
"num": {"null_allowed": True, "data_type": "varchar"}},
"detail_desc_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"length": {"null_allowed": False, "data_type": "bigint"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"draw_desc_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"foreign_priority": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"country": {"null_allowed": True, "data_type": "varchar"},
"date": {"null_allowed": True, "data_type": "date"},
"foreign_doc_number": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"further_cpc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"ipcr": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"class_level": {"null_allowed": True, "data_type": "varchar"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"class_value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"class_status": {"null_allowed": True, "data_type": "varchar"},
"class_data_source": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"lawyer": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"organization": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"main_cpc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"version": {"null_allowed": True, "data_type": "date"},
"section": {"null_allowed": True, "data_type": "varchar"},
"class": {"null_allowed": True, "data_type": "varchar"},
"subclass": {"null_allowed": True, "data_type": "varchar"},
"main_group": {"null_allowed": True, "data_type": "varchar"},
"subgroup": {"null_allowed": True, "data_type": "varchar"},
"symbol_position": {"null_allowed": True, "data_type": "varchar"},
"value": {"null_allowed": True, "data_type": "varchar"},
"category": {"null_allowed": True, "data_type": "varchar"},
"action_date": {"null_allowed": True, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"pct_data": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"pct_doc_number": {"null_allowed": True, "data_type": "varchar"},
"country": {"null_allowed": True, "data_type": "varchar"},
"date": {"null_allowed": True, "data_type": "date"},
"us_371c124_date": {"null_allowed": True, "data_type": "date"},
"us_371c12_date": {"null_allowed": True, "data_type": "date"},
"kind": {"null_allowed": True, "data_type": "varchar"},
"doc_type": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"publication": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"date": {"null_allowed": True, "data_type": "date"},
"country": {"null_allowed": True, "data_type": "varchar"},
"kind": {"null_allowed": True, "data_type": "varchar"},
"filing_type": {"null_allowed": True, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawassignee": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"sequence": {"null_allowed": False, "data_type": "int"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"organization": {"null_allowed": False, "data_type": "varchar"},
"type": {"null_allowed": False, "data_type": "int"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawinventor" : {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"designation": {"null_allowed": False, "data_type": "varchar"},
"deceased": {"null_allowed": False, "data_type": "varchar"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawlocation": {"id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"latitude": {"null_allowed": True, "data_type": "float"},
"longitude": {"null_allowed": True, "data_type": "float"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rawuspc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"classification": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"rel_app_text": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"text": {"null_allowed": False, "data_type": "mediumtext"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"us_parties": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"name_first": {"null_allowed": False, "data_type": "varchar"},
"name_last": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"rawlocation_id": {"null_allowed": False, "data_type": "varchar"},
"city": {"null_allowed": False, "data_type": "varchar"},
"state": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"uspc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"mainclass_id": {"null_allowed": False, "data_type": "varchar"},
"subclass_id": {"null_allowed": False, "data_type": "varchar"},
"sequence": {"null_allowed": False, "data_type": "int"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}},
"usreldoc": {"id": {"null_allowed": False, "data_type": "varchar"},
"document_number": {"null_allowed": False, "data_type": "bigint"},
"related_doc_number": {"null_allowed": False, "data_type": "varchar"},
"country": {"null_allowed": False, "data_type": "varchar"},
"doc_type": {"null_allowed": False, "data_type": "varchar"},
"date": {"null_allowed": False, "data_type": "date"},
"filename": {"null_allowed": True, "data_type": "varchar"},
"created_date": {"null_allowed": True, "data_type": "timestamp"},
"updated_date": {"null_allowed": True, "data_type": "timestamp"}}
}
self.count_data = []
self.floating_entities = []
self.floating_patent = []
def test_yearly_count(self):
pass
|
from setuptools import setup, find_packages
from exchanges import __version__
setup(
name='exchanges',
version=__version__,
description='exchange adapters',
author='Aye-Jay',
include_package_data=True,
packages=find_packages(),
install_requires=[
'Flask==0.12.2',
'pandas==0.20.1',
'requests==2.18.4',
'aj_sns==0.0.56',
'networkx==2.1',
'ethereum==2.3.1',
'rlp==0.6.0',
'python-binance',
'ccxt==1.12.10',
'selenium==3.12.0',
'pusher==2.0.1',
'python-quoine==0.1.4',
'bittrex-websocket==1.0.6.2',
'python-bittrex==0.3.0',
'websocket-client==0.48.0',
'pycrypto',
'matplotlib'])
|
#Python内置的访问数据库
import requests
#pyecharts图表库导入(Map地图,Line折线图,Bar柱形图)
from pyecharts import Map,Line,Bar
#将json导入
import json
#生成地图使用的数据--腾讯
mapUrl="https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=jQuery34100282751706540052_1583633749228&_=1583633749229"
#发送请求获取数据--地图数据
mapData=requests.get(mapUrl).text.replace('"{','{').replace('}"})','}})').replace("\\","")
mapData=mapData[mapData.index("(")+1:-1]
#print(type(mapData))
#print(mapData)
#将处理完的数据转换成Python字典
tempMapData=json.loads(mapData)
#各个省份的数据:每个省份的数据也是一个字典对象
chain_provinces=tempMapData["data"]["areaTree"][0]["children"]
print(chain_provinces)
#保存省份名称列表
province_names=[]
#保存各个省份的确诊数据
province_data=[]
for province in chain_provinces:
province_names.append(province["name"])
province_data.append(province["total"]["confirm"])
map=Map("全国疫情分布图",width=1200,height=600)
#第一参数:标题#第二参数:省份列表(list)#第三参数:数据列表(list)#visual_range:左侧颜色柱范围
# #is_visualmap:是否显示颜色柱范围#visual_text_color:颜色柱初始颜色#is_label_show:文本颜色
map.add("",province_names,province_data,maptype='china',visual_range=[0,1000],
is_visualmap=True,
visual_text_color='#000',is_label_show=True)
#地图的配置参数
map.show_config()
#渲染地图
map.render(path="output/全国疫情分布图.html")
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonpickle
from rqalpha.interface import AbstractBroker, Persistable
from rqalpha.utils import get_account_type
from rqalpha.utils.i18n import gettext as _
from rqalpha.events import EVENT
from rqalpha.const import MATCHING_TYPE, ORDER_STATUS
from rqalpha.const import ACCOUNT_TYPE
from rqalpha.environment import Environment
from rqalpha.model.account import BenchmarkAccount, StockAccount, FutureAccount
from .matcher import Matcher
def init_accounts(env):
accounts = {}
config = env.config
start_date = config.base.start_date
total_cash = 0
for account_type in config.base.account_list:
if account_type == ACCOUNT_TYPE.STOCK:
stock_starting_cash = config.base.stock_starting_cash
accounts[ACCOUNT_TYPE.STOCK] = StockAccount(env, stock_starting_cash, start_date)
total_cash += stock_starting_cash
elif account_type == ACCOUNT_TYPE.FUTURE:
future_starting_cash = config.base.future_starting_cash
accounts[ACCOUNT_TYPE.FUTURE] = FutureAccount(env, future_starting_cash, start_date)
total_cash += future_starting_cash
else:
raise NotImplementedError
if config.base.benchmark is not None:
accounts[ACCOUNT_TYPE.BENCHMARK] = BenchmarkAccount(env, total_cash, start_date)
return accounts
class Broker(AbstractBroker, Persistable):
def __init__(self, env):
self._env = env
if env.config.base.matching_type == MATCHING_TYPE.CURRENT_BAR_CLOSE:
self._matcher = Matcher(lambda bar: bar.close, env.config.validator.bar_limit)
self._match_immediately = True
else:
self._matcher = Matcher(lambda bar: bar.open, env.config.validator.bar_limit)
self._match_immediately = False
self._accounts = None
self._open_orders = []
self._board = None
self._turnover = {}
self._delayed_orders = []
self._frontend_validator = {}
# 该事件会触发策略的before_trading函数
self._env.event_bus.add_listener(EVENT.BEFORE_TRADING, self.before_trading)
# 该事件会触发策略的handle_bar函数
self._env.event_bus.add_listener(EVENT.BAR, self.bar)
# 该事件会触发策略的handel_tick函数
self._env.event_bus.add_listener(EVENT.TICK, self.tick)
# 该事件会触发策略的after_trading函数
self._env.event_bus.add_listener(EVENT.AFTER_TRADING, self.after_trading)
def get_accounts(self):
if self._accounts is None:
self._accounts = init_accounts(self._env)
return self._accounts
def get_open_orders(self):
return self._open_orders
def get_state(self):
return jsonpickle.dumps([o.order_id for _, o in self._delayed_orders]).encode('utf-8')
def set_state(self, state):
delayed_orders = jsonpickle.loads(state.decode('utf-8'))
for account in self._accounts.values():
for o in account.daily_orders.values():
if not o._is_final():
if o.order_id in delayed_orders:
self._delayed_orders.append((account, o))
else:
self._open_orders.append((account, o))
def _get_account_for(self, order_book_id):
account_type = get_account_type(order_book_id)
return self._accounts[account_type]
def submit_order(self, order):
account = self._get_account_for(order.order_book_id)
self._env.event_bus.publish_event(EVENT.ORDER_PENDING_NEW, account, order)
account.append_order(order)
if order._is_final():
return
# account.on_order_creating(order)
if self._env.config.base.frequency == '1d' and not self._match_immediately:
self._delayed_orders.append((account, order))
return
self._open_orders.append((account, order))
order._active()
self._env.event_bus.publish_event(EVENT.ORDER_CREATION_PASS, account, order)
if self._match_immediately:
self._match()
def cancel_order(self, order):
account = self._get_account_for(order.order_book_id)
self._env.event_bus.publish_event(EVENT.ORDER_PENDING_CANCEL, account, order)
# account.on_order_cancelling(order)
order._mark_cancelled(_("{order_id} order has been cancelled by user.").format(order_id=order.order_id))
self._env.event_bus.publish_event(EVENT.ORDER_CANCELLATION_PASS, account, order)
# account.on_order_cancellation_pass(order)
try:
self._open_orders.remove((account, order))
except ValueError:
try:
self._delayed_orders.remove((account, order))
except ValueError:
pass
def before_trading(self):
for account, order in self._open_orders:
order._active()
self._env.event_bus.publish_event(EVENT.ORDER_CREATION_PASS, account, order)
def after_trading(self):
for account, order in self._open_orders:
order._mark_rejected(_("Order Rejected: {order_book_id} can not match. Market close.").format(
order_book_id=order.order_book_id
))
self._env.event_bus.publish_event(EVENT.ORDER_UNSOLICITED_UPDATE, account, order)
self._open_orders = self._delayed_orders
self._delayed_orders = []
def bar(self, bar_dict):
env = Environment.get_instance()
self._matcher.update(env.calendar_dt, env.trading_dt, bar_dict)
self._match()
def tick(self, tick):
# TODO support tick matching
pass
# env = Environment.get_instance()
# self._matcher.update(env.calendar_dt, env.trading_dt, tick)
# self._match()
def _match(self):
self._matcher.match(self._open_orders)
final_orders = [(a, o) for a, o in self._open_orders if o._is_final()]
self._open_orders = [(a, o) for a, o in self._open_orders if not o._is_final()]
for account, order in final_orders:
if order.status == ORDER_STATUS.REJECTED or order.status == ORDER_STATUS.CANCELLED:
self._env.event_bus.publish_event(EVENT.ORDER_UNSOLICITED_UPDATE, account, order)
|
from collections import deque
import numpy
class LinearFeedbackShiftRegister(object):
"""
Implements a Linear Feedback Shift Register. Given some initial values and recurrence
relation coefficients, generates the sequence given by some specified recurrence relation.
"""
def __init__(self, initial_values, coeffs, base=2):
"""
Generates a LinearFeedbackShiftRegister object from a set of coefficients and initial
values.
Example:
>>> IV = numpy.array([0, 0, 1, 1, 0])
>>> coeffs = numpy.array([1, 1, 0, 0, 1])
>>> lfsr = LinearFeedbackShiftRegister(IV, coeffs)
>>> next(lfsr)
0
"""
self.initial_values = deque(initial_values)
self.current_values = initial_values
self.coeffs = coeffs
self.base = base
def __iter__(self):
return self
def __next__(self):
"""
Returns the next item in the sequence. Starts yielding values beginning
with the first given initial value.
"""
# Consume the initial values before moving on to generating new ones.
if self.initial_values:
return self.initial_values.popleft()
# Generate new values based on the old ones.
next_element = numpy.mod(numpy.dot(self.coeffs, self.current_values), self.base)
self.current_values = numpy.append(self.current_values[1:], next_element)
return next_element
|
# -*- coding:utf-8 -*-
class Solution:
def maxInWindows(self, num, size):
# write code here
if size == 0:
return []
if not num:
return None
length = len(num)
return_res = []
for i in range(length-size+1):
res = num[i:i+size]
res.sort()
return_res.append(res[-1])
return return_res
a = Solution()
print a.maxInWindows([1,3,5,7,9,11,13,15],4) |
import pandas as pd
import numpy as np
a=pd.Series([1,2,3,4])
print(a)
b=pd.Series([1,2,3,4],index=(10,20,30,40))
print(b)
print(a[2])
print()
c=pd.Series({"a":1,"b":2,"c":3})
print(c)
print(c["b"])
print()
print()
d=pd.Series(3,index=(1,2,3,4,5))
print(d)
print()
print()
|
def FindPeaks(self,norm=-1,dograph=False):
"""Returns number of 'significative peaks' in an image."""
# IMPORT STUFF
import numpy as num
from pdb import set_trace as stop
from numpy.nd_image import shift
from numpy.nd_image.filters import uniform_filter
import pyfits
import os
#from Moments.algorithms import get_stat
# END IMPORT
# INPUTS
sky = self['BACKGROUND']
image = self['STAMP'].copy() - sky
try : mask = self['MASK'].copy()
except AttributeError : mask = self['MASK']
sigma_sky = self.execpars['sigma_sky'][0]
# END INPUTS
## gauss33 = num.array([[0.54,0.73,0.54],[0.73,1.,0.73],[0.54,0.73,0.54]])
## ngauss33 = gauss33.sum()
## gauss55 = num.array([[0.09,0.21,0.29,0.21,0.09],\
## [0.21,0.54,0.73,0.54,0.21],\
## [0.29,0.73,1.,0.73,0.29],\
## [0.21,0.54,0.73,0.54,0.21],\
## [0.09,0.21,0.29,0.21,0.09]])
## ngauss55 = gauss55.sum()
## gauss77 = num.array([[0.004,0.02,0.05,0.06,0.05,0.02,0.004],\
## [0.02,0.09,0.21,0.29,0.21,0.09,0.02],\
## [0.05,0.21,0.54,0.73,0.54,0.21,0.05],\
## [0.06,0.29,0.73,1.0,0.73,0.29,0.06],\
## [0.05,0.21,0.54,0.73,0.54,0.21,0.05],\
## [0.02,0.09,0.21,0.29,0.21,0.09,0.02],\
## [0.004,0.02,0.05,0.06,0.05,0.02,0.004]])
## ngauss77 = gauss77.sum()
if mask is -1 : mask = num.zeros(shape=image.getshape(),type='Int8')
image[num.where(mask != 0)] = 0.
# active = num.where((mask == 0) & (image > 0.))
#sigma = get_stat(image,'stddev',minimum=-99)
#if norm != -1 : image /= norm
filtered3 = num.zeros(shape=image.shape,type='Float32')
filtered7 = num.zeros(shape=image.shape,type='Float32')
filtered15 = num.zeros(shape=image.shape,type='Float32')
uniform_filter(image,(3,3),output=filtered3,\
mode='constant',cval=0)
uniform_filter(image,(7,7),output=filtered7,\
mode='constant',cval=0)
uniform_filter(image,(15,15),output=filtered15,\
mode='constant',cval=0)
#detect = 100. * (filtered3 - filtered7) / num.abs(filtered3)
#+num.abs(filtered7))
# detect = (9 * filtered3 - 49 * filtered7) / sigma
#bigapper = 49*filtered7
#smallapper = 9 * filtered3
#detect = smallapper / bigapper
#detect = filtered3.copy()
#detect = filtered3 - filtered7
detect = 49. * filtered7 - (49.*(225.*filtered15-49.*filtered7)/176.)
#detsigma = get_stat(detect,'stddev',minimum=-99)
#print sigma, detsigma
maxima = num.ones(shape=image.shape,type='Bool')
#gaussianity = num.zeros(shape=image.shape,type='Float32')
for i in range(-2,3,1):
for j in range(-2,3,1):
if i==0 and j==0:
#gaussianity += image.copy()
pass
else:
tmpshift1 = detect.copy() * 0.
#tmpshift2 = image.copy() * 0.
shift(detect,(i,j),output = tmpshift1)
#shift(image,(i,j),output = tmpshift2)
maxima = maxima & (detect > tmpshift1)
#if num.abs(i) <= 1 and num.abs(j) <=1:
# maxima = maxima & (tmpshift2 > sigma)
#maxima = maxima & (num.abs(detect - tmpshift1)< \
#0.50 * num.abs(detect))
# maxima = maxima & (image > tmpshift2)
# gaussianity += (tmpshift2.copy() / gauss33[i+1,j+1])
#gaussianity += tmpsshift2.copy() / gauss55[i+2,j+2]
#effsigma = 8.
#gaussianity = (gaussianity / 9.) / filtered3
#flux3 = 9. * filtered3 - (9.*(49.*filtered7-9.*filtered3)/(49.-9.))
#relevance = (flux3 / (num.sqrt(9. * effsigma**2.))) >= 3
#fquot = (9.*filtered3 / (49.*filtered7))
#relevance = fquot >= 0.2
#relevance = relevance & (fquot <= 0.5)
relevance = filtered3 / sigma_sky > 1.
relevance2 = detect > 3. * 13.13 * sigma_sky #detsigma
maxima = maxima & relevance & relevance2
#maxima = maxima & (num.abs(gaussianity-1.5)/1.5 < 0.1)
# maxima = maxima & (detect > sigma)
# maxima = maxima & (detect < -10.0)
# maxima = maxima & (detect > -100.0)
self['MAXIMA'] = maxima
self['DETECTIMG'] = detect
self['M_NPEAKS'] = len(num.where(maxima)[0])
if dograph:
self.FindPeaks_graph()
return None
|
# Import the modules
from pathlib import Path
from datetime import datetime
import time
from textwrap import wrap
from functions import *
data_folder = Path("in/")
file_to_open = data_folder/"Python_exercise1.xlsx"
data = obtain_excel_data(file_to_open, "data_table")
for i in range(0, len(data)):
msg = data.at[i, 'post_text']
t = datetime.now()
next_date = str(data.at[i, 'post_datetime'])
date_str, hour_str = next_date.split(" ")
year, month, day = date_str.split("-")
hour, m, sec = hour_str.split(":")
next_t = datetime(int(year), int(month), int(day), int(hour), int(m), int(sec))
if next_t > datetime.now():
delta_t = next_t - datetime.now()
time.sleep(delta_t.seconds+1)
print(datetime.now())
if len(msg) > 200:
img = data.at[i, 'post_img']
chunk_size = 200 - 7
msgs = wrap(msg, chunk_size)
if not ('nan' in str(data.at[i, 'post_img'])):
for j, m in enumerate(msgs):
index = " Part " + str(j + 1)
text = str(m) + str(index)
tweet_image(img, text, i)
else:
for j, m in enumerate(msgs):
index = " Part " + str(j + 1)
text = str(m) + str(index)
print("Warning!!! Tweet with ID." + str(i + 1) + " Unable to download image")
tweet_woimage(text)
else:
if not ('nan' in str(data.at[i, 'post_img'])):
tweet_image(data.at[i, 'post_img'], data.at[i, 'post_text'], i)
else:
print("Warning!!! Tweet with ID."+str(i+1)+" Unable to download image")
tweet_woimage(data.at[i, 'post_text'])
print("Info: Tweet with ID." + str(i+1) + " Sent")
else:
print("Error!!! Tweet with ID." + str(i+1) + ". It wasn't sent!!!")
print("Time Pass Out "+str(next_t))
|
class ExitStatus:
OK = 0
INVALID_RLI_CONFIG = 1
NO_RLI_CONFIG = 2
GITHUB_EXCEPTION_RAISED = 3
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2021-TODAY Cybrosys Technologies(<https://www.cybrosys.com>)
# Author: Cybrosys Techno Solutions(<https://www.cybrosys.com>)
#
# You can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from werkzeug.exceptions import NotFound
from odoo.addons.http_routing.models.ir_http import slug
from odoo.addons.website.controllers.main import QueryURL
from odoo.addons.website_sale.controllers.main import TableCompute, WebsiteSale
from odoo import http
from odoo.http import request
from odoo import fields
import datetime
class WebsiteProduct(http.Controller):
@http.route('/get_featured_product', auth='public', type='json',
website=True)
def get_featured_products(self):
silon_configuration = request.env.ref(
'theme_silon.silon_configuration_data')
product_id = silon_configuration.featured_product_ids
rating = request.website.viewref('website_sale.product_comment').active
res = {'products': []}
for product in product_id:
combination_info = product._get_combination_info_variant()
res_product = product.read(['id', 'name', 'website_url'])[0]
res_product.update(combination_info)
if rating:
res_product['rating'] = request.env[
"ir.ui.view"]._render_template(
'portal_rating.rating_widget_stars_static', values={
'rating_avg': product.rating_avg,
'rating_count': product.rating_count,
})
else:
res_product['rating'] = 0
res['products'].append(res_product)
products = res['products']
values = {'products': products}
response = http.Response(
template='theme_silon.featured_product_snippet', qcontext=values)
return response.render()
@http.route('/get_popular_product', auth='public', type='json',
website=True)
def get_popular_products(self):
products = request.env['product.template'].sudo().search([])
for each in products:
each.qty_sold = 0
each.top_selling = False
date = fields.Datetime.now()
date_before = date - datetime.timedelta(days=7)
orders = request.env['sale.order'].sudo().search([
('date_order', '<=', date),
('date_order', '>=',
date_before),
('website_id', '!=', False),
('state', 'in', (
'sale', 'done'))])
for order in orders:
order_line = order.order_line
for product in order_line:
product.product_id.qty_sold = product.product_id.qty_sold + 1
website_product_ids = request.env['product.template'].sudo().search(
[('is_published', '=', True),
('qty_sold', '!=', 0)],
order='qty_sold desc', limit=4)
website_product_ids.top_selling = True
rating = request.website.viewref('website_sale.product_comment').active
res = {'products': []}
for product in website_product_ids:
combination_info = product._get_combination_info()
res_product = product.read(['id', 'name', 'website_url'])[0]
res_product.update(combination_info)
if rating:
res_product['rating'] = request.env[
"ir.ui.view"]._render_template(
'portal_rating.rating_widget_stars_static', values={
'rating_avg': product.rating_avg,
'rating_count': product.rating_count,
})
else:
res_product['rating'] = 0
res['products'].append(res_product)
products = res['products']
values = {'website_product_ids': products}
response = http.Response(
template='theme_silon.popular_snippet', qcontext=values)
return response.render()
@http.route('/get_trending_product', auth='public', type='json',
website=True)
def get_trending_product(self):
products = request.env['product.template'].sudo().search([])
for each in products:
each.views = 0
each.most_viewed = False
date = fields.Datetime.now()
date_before = date - datetime.timedelta(days=7)
products = request.env['website.track'].sudo().search(
[('visit_datetime', '<=', date),
('visit_datetime', '>=', date_before),
('product_id', '!=', False)])
for pro in products:
pro.product_id.views = pro.product_id.views + 1
product_ids = request.env['product.template'].sudo().search(
[('is_published', '=', True),
('views', '!=', 0)],
order='views desc', limit=8)
product_ids.most_viewed = True
rating = request.website.viewref('website_sale.product_comment').active
res = {'products': []}
for product in product_ids:
combination_info = product._get_combination_info()
res_product = product.read(['id', 'name', 'website_url'])[0]
res_product.update(combination_info)
if rating:
res_product['rating'] = request.env[
"ir.ui.view"]._render_template(
'portal_rating.rating_widget_stars_static', values={
'rating_avg': product.rating_avg,
'rating_count': product.rating_count,
})
else:
res_product['rating'] = 0
res['products'].append(res_product)
products = res['products']
values = {'product_ids': products}
response = http.Response(
template='theme_silon.trending_snippet', qcontext=values)
return response.render()
class PriceFilter(WebsiteSale):
@http.route()
def shop(self, page=0, category=None, search='', ppg=False, **post):
"""Override WebsiteSale shop for Price Filter"""
maximum = minimum = 0
add_qty = int(post.get('add_qty', 1))
Category = request.env['product.public.category']
if category:
category = Category.search([('id', '=', int(category))], limit=1)
if not category or not category.can_access_from_current_website():
raise NotFound()
else:
category = Category
if ppg:
try:
ppg = int(ppg)
post['ppg'] = ppg
except ValueError:
ppg = False
if not ppg:
ppg = request.env['website'].get_current_website().shop_ppg or 20
ppr = request.env['website'].get_current_website().shop_ppr or 4
product_ids = request.env['product.template'].search(['&', ('sale_ok', '=', True), ('active', '=', True)])
if product_ids and product_ids.ids:
request.cr.execute(
'select min(list_price),max(list_price) from product_template where id in %s',
(tuple(product_ids.ids),))
list_prices = request.cr.fetchall()
minimum = list_prices[0][0]
maximum = list_prices[0][1]
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [[int(x) for x in v.split("-")] for v in attrib_list if v]
attributes_ids = {v[0] for v in attrib_values}
attrib_set = {v[1] for v in attrib_values}
domain = self._get_search_domain(search, category, attrib_values)
if post.get('minimum') and post.get('maximum'):
domain = domain + [('list_price', '>=', float(post.get('minimum'))),
('list_price', '<=', float(post.get('maximum')))]
keep = QueryURL('/shop', category=category and int(category), search=search, attrib=attrib_list,
order=post.get('order'), minimum=post.get('minimum'), maximum=post.get('maximum'))
pricelist_context, pricelist = self._get_pricelist_context()
request.context = dict(request.context, pricelist=pricelist.id, partner=request.env.user.partner_id)
url = "/shop"
if search:
post["search"] = search
if attrib_list:
post['attrib'] = attrib_list
Product = request.env['product.template'].with_context(bin_size=True)
search_product = Product.search(domain, order=self._get_search_order(post))
website_domain = request.website.website_domain()
categs_domain = [('parent_id', '=', False)] + website_domain
if search:
search_categories = Category.search(
[('product_tmpl_ids', 'in', search_product.ids)] + website_domain).parents_and_self
categs_domain.append(('id', 'in', search_categories.ids))
else:
search_categories = Category
categs = Category.search(categs_domain)
if category:
url = "/shop/category/%s" % slug(category)
product_count = len(search_product)
pager = request.website.pager(url=url, total=product_count, page=page, step=ppg, scope=7, url_args=post)
offset = pager['offset']
products = search_product[offset: offset + ppg]
ProductAttribute = request.env['product.attribute']
if products:
# get all products without limit
attributes = ProductAttribute.search([('product_tmpl_ids', 'in', search_product.ids)])
else:
attributes = ProductAttribute.browse(attributes_ids)
layout_mode = request.session.get('website_sale_shop_layout_mode')
if not layout_mode:
if request.website.viewref('website_sale.products_list_view').active:
layout_mode = 'list'
else:
layout_mode = 'grid'
values = {
'search': search,
'category': category,
'attrib_values': attrib_values,
'attrib_set': attrib_set,
'pager': pager,
'pricelist': pricelist,
'add_qty': add_qty,
'products': products,
'search_count': product_count, # common for all searchbox
'bins': TableCompute().process(products, ppg, ppr),
'ppg': ppg,
'ppr': ppr,
'categories': categs,
'attributes': attributes,
'keep': keep,
'search_categories_ids': search_categories.ids,
'layout_mode': layout_mode,
'minimum': minimum,
'maximum': maximum,
}
if category:
values['main_object'] = category
return request.render("website_sale.products", values)
|
figure_type = str(input())
if figure_type == "square":
side = float(input())
result = side * side
elif figure_type == "rectangle":
sideA = float(input())
sideB = float(input())
result = sideA * sideB
elif figure_type == "circle":
radius = float(input())
from math import pi
result = pi * (radius * radius)
elif figure_type == "triangle":
base = float(input())
height = float(input())
result = (base * height) / 2
print(f"{result:.3f}")
|
import csv
import psycopg2
import os
"""
This is a ONE TIME run script to enter in the SREW station details into the database.
Enter all SREW stations into database for which we have a lat long for
In other words enter all valid SREW stations into the local database
Valid station IDs are contained within a static file.
SREW stations details are contained within a second file (this is a dump from the MO Midas database)
"""
#Open csv file that contains the ids for all the valid SREW stations
#Read in the ids for all the valid stations
def read_in_stations(validstationsfile, stationdetailsfile):
if not os.path.isfile(validstationsfile):
exit('Valid stations file not found.')
if not os.path.isfile(stationdetailsfile):
exit('Stations details file not found')
if not validstationsfile.endswith('.csv'):
exit('Valid stations file must be a .csv')
if not stationdetailsfile.endswith('.csv'):
exit('Stations details file must be a .csv')
validSrewStations = open(validstationsfile)
validIds = csv.reader(validSrewStations, delimiter=',')
ids = []
#get the id for each station and put into list
for row in validIds:
ids.append(row[0])
validSrewStations.close()
#try to connect to the database
connection = 'dbname=trout user=postgres password=67a256 host=localhost port=5432'
try:
dbconn = psycopg2.connect(connection)
cur = dbconn.cursor()
except:
exit('Connection to the database could not be established')
#Read in the SREW midas dump (this is the file that contains the details on each SREW station)
with open(stationdetailsfile) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[4] in ids: #If the station is in the list of valid stations then collect its details
data = {}
data['id'] = row[4]
data['id_type'] = None
data['met_domain_name'] = row[7]
data['src_id'] = None
data['lat'] = row[1]
data['long'] = row[2]
data['src_name'] = row[0]
#enter the details of each valid station into the database
try:
srewSQL = "INSERT INTO SrewStations" \
"(id, id_type, met_domain_name, src_id, lat, long, src_name)" \
"VALUES (%(id)s, %(id_type)s, %(met_domain_name)s, %(src_id)s, %(lat)s, %(long)s, %(src_name)s);"
cur.execute(srewSQL, data)
dbconn.commit()
except psycopg2.IntegrityError:
#reached here if the station has already been entered
dbconn.rollback()
continue
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='argument handler')
parser.add_argument('validstationsfile', help='Path to the csv file containing valid srew station ids')
parser.add_argument('srewstationdetails', help='Path to the csv file which contains the srew station details')
args = parser.parse_args()
read_in_stations(args.validstationsfile, args.srewstationdetails)
|
# -*- coding: utf-8 -*-
"""
MathSlider.py
Created on Wed Dec 28 07:45:24 2016
@author: slehar
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.widgets import RadioButtons
from matplotlib import animation
import numpy as np
import sys
from collections import deque
x = 0.001
t = 0.
lastX = 0.
lastT = 0.
dt = .5
dArrayPos = deque([0.])
dArrayVel = deque([0.])
dArrayAcc = deque([0.])
tArray = deque([0.])
plotHeight = 30
# Open figure window
winXSize = 10
winYSize = 6
winAspect = winXSize/winYSize
plt.close('all')
fig = plt.figure(figsize=(winXSize, winYSize))
fig.canvas.set_window_title('MathSlider')
# Keypress 'q' to quit callback function
def press(event):
global ptList, data
sys.stdout.flush()
if event.key == 'q':
plt.close()
# Connect keypress event to callback function
fig.canvas.mpl_connect('key_press_event', press)
ySpace = np.linspace(.05, .24, 3) # Vertical spacing sliders
# sliders
axSlider1 = fig.add_axes([0.2, ySpace[0], 0.7, 0.05])
axSlider1.set_xticks([])
axSlider1.set_yticks([])
axSlider2 = fig.add_axes([0.2, ySpace[1], 0.7, 0.05])
axSlider2.set_xticks([])
axSlider2.set_yticks([])
axSlider3 = fig.add_axes([0.2, ySpace[2], 0.7, 0.05])
axSlider3.set_xticks([])
axSlider3.set_yticks([])
posSlider = Slider(axSlider1, 'position', -1., 1., valinit=0.)
velSlider = Slider(axSlider2, 'velocity', -1., 1., valinit=0.)
accSlider = Slider(axSlider3, 'accel', -1., 1., valinit=0.)
posSlider.poly.set_facecolor('red')
velSlider.poly.set_facecolor('green')
accSlider.poly.set_facecolor('blue')
(pos, vel, acc) = (posSlider.val, velSlider.val, accSlider.val)
# Radio buttons to select Pos Vel Acc
rax = plt.axes([0.01, .05, 0.1, 0.25])
radio = RadioButtons(rax, ('Acc', 'Vel', 'Pos'), active=2)
#radio.circles[2].set_fc('red')
#radio.circles[1].set_fc('green')
#radio.circles[0].set_fc('blue')
#def radioFunc(label):
# print 'Radio button = %s'%radio.value_selected
# selection = radio.value_selected
# if selection == 'pos':
# radio.activecolor = 'red'
# radio.circles[2].set_fc('red')
# elif selection == 'vel':
# radio.activecolor = 'green'
# radio.circles[1].set_fc('green')
# if selection == 'acc':
# radio.activecolor = 'blue'
# radio.circles[0].set_fc('blue')
#
#radio.on_clicked(radioFunc)
# Global
time = 0.
delT = 0.1
lastPos, lastVel, lastAcc = 0, 0, 0
lastTime = time
# Add axes 2 for plot trace
axTime = fig.add_axes([.1,.4,.8,.5])
axTime.set_ylim(0, 1)
axTime.set_xlim(-1, 1)
t = 0.
dt = .4
x = .1
# Set up plot lines in axes 2
linePos, = axTime.plot(t, pos, color='red', linewidth=1,
linestyle='-', alpha=1.0)
lineVel, = axTime.plot(t, vel, color='green', linewidth=1,
linestyle='-', alpha=1.0)
lineAcc, = axTime.plot(t, acc, color='blue', linewidth=1,
linestyle='-', alpha=1.0)
def animate(i):
global time, t, pos, vel, acc, lastPos, lastVel, lastAcc, lastUpdated
# time += delT
if radio.value_selected == 'Pos':
pos = posSlider.val
lastPos, lastVel, lastAcc = pos, vel, acc
# lastUpdated = None
elif radio.value_selected == 'Vel':
vel = velSlider.val
pos += vel * delT
pos = np.clip(pos, -1., 1.)
posSlider.set_val(pos)
lastPos, lastVel, lastAcc = pos, vel, acc
# lastUpdated = None
elif radio.value_selected == 'Acc':
acc = accSlider.val
vel += acc
vel = np.clip(vel, -1., 1.)
velSlider.set_val(vel)
pos += vel * dt
pos = np.clip(pos, -1., 1.)
posSlider.set_val(pos)
lastPos, lastVel, lastAcc = pos, vel, acc
t += dt
dArrayPos.appendleft(pos)
if len(dArrayPos) >= plotHeight/dt:
dArrayPos.pop()
dArrayVel.appendleft(vel)
if len(dArrayVel) >= plotHeight/dt:
dArrayVel.pop()
dArrayAcc.appendleft(acc)
if len(dArrayAcc) >= plotHeight/dt:
dArrayAcc.pop()
tArray.appendleft(t)
if len(tArray) >= plotHeight/dt:
tArray.pop()
lineAcc.set_data(dArrayAcc, tArray)
lineVel.set_data(dArrayVel, tArray)
linePos.set_data(dArrayPos, tArray)
axTime.axis((-1, 1., t, t-plotHeight))
plt.pause(.001)
anim = animation.FuncAnimation(fig, animate)
# Pop fig window to top]]
figmgr=plt.get_current_fig_manager()
figmgr.canvas.manager.window.raise_()
geom=figmgr.window.geometry()
(xLoc,yLoc,dxWidth,dyHeight)=geom.getRect()
figmgr.window.setGeometry(10,10,dxWidth,dyHeight)
|
import unittest
from common import read_file
from ele_operation.FMS.sys_manage import cust_payway_set
from ele_operation import py_operation
import start_program
import time
import os
from HTMLTestReportYIF import HTMLTestRunner
from ddt import ddt,data,unpack
from common.log_decorator import *
o = py_operation.operation()
excel_name = o.testdata # 测试数据文件名
rf = read_file.read_excel(excel_name)
testdata = rf.data_to_list("财务系统")
for i in testdata: # 案列执行
if i == "系统管理/客户支付方式设置/查询":
testdata = testdata[i]
print("用例模块路径" + i + "用例模块路径结束")
op = cust_payway_set.query()
start_program.login("admin", "123456") # 登录
op.enter_page() # 进入对应模块
#print(testdata)
@ddt
class FMSTest(unittest.TestCase):
def setUp(self):
pass
@data(*testdata)
@log_testnow("正在执行运单查询:")
def test_paywayquery(self,value):
print(value)
if value[1] == "是":
print("测试")
print("用例模块路径" + i + "用例模块路径结束")
print("用例名称" + value[0] + "用例名称结束" )
op.query_list(value)
data = o.assert_pic(value[3])
self.assertEqual(data,1,msg="图片不一致")
def tearDown(self):
pass
print("end")
if __name__ == 'main':
now = time.strftime("%Y_%m_%d_%H_%M_%S")
print(now)
reportdir = r"C:\Users\Administrator\PycharmProjects\untitled\report"
casedir = r"C:\Users\Administrator\PycharmProjects\untitled\testcase"
discover = unittest.defaultTestLoader.discover(casedir,pattern="test_fms*.py")
print(discover)
filename = now + ".html"
print(filename)
fp = open(os.path.join(reportdir,filename),"wb")
runner = HTMLTestRunner(stream = fp,title = "test")
runner.run(discover)
#unittest.main() |
import ksensors
import time
import messageboard
k = ksensors.ksensors()
state = False # False = STOP!; True = GO
mb = messageboard.MessageBoard("collision")
# print(str(mb))
def goodPosition(x, y):
# area1 =
# bottom left : 108, 85px
# top right : 132, 67px
# area2 =
# bottom left: 180, 85px
# top right: 206, 68px
# area3 =
# bottom left: 251, 85px
# top right: 281, 67px
if y >= 67 and y <= 85:
if x >= 108 and x <= 132: # area1
return True
else if x >= 180 and x <= 206: # area2
return True
else if x >= 251 and x <= 281:
return True
return False
while True:
target = ["state", "updated_state"] # read from State
# begin_ts = 1200
msg_list = mb.readMsg(target) # is ts necessary?
if len(msg_list) <= 0:
continue
msg = msg_list[-1]
# first, check if in "good" place to calculate angle
x = msg['x_pos_pixels']
y = msg['y_pos_pixels']
if goodPosition(x,y):
angle = k.get_data()
mb.postMsg("angle_correction", {"angle": theta})
time.sleep(0.1)
# “x_pos_meters”: float,
# “y_pos_meters”: float,
# “x_pos_pixels”: float,
# “y_pos_pixels”: float,
# “orientation”: x / -x,
# “angle”: float (radians),
# “status”: delivering / idle / dead
|
import logging
import time
from multiprocessing.dummy import RLock
from operator import itemgetter
import psycopg2
from decorators import synchronized
class Database:
PROFILES_FIELDS = ['owner_id', 'first_name', 'last_name', 'sex',
'screen_name', 'last_seen', 'bdate', 'verified',
'followers_count', 'country', 'city', 'processed']
PHOTOS_FIELDS = ['owner_id', 'photo_id', 'likes', 'date',
'face_boundary', 'photo_path', 'photo_url', 'embedding']
def __init__(self):
self._conn = psycopg2.connect(user='postgres', password='password',
database='users', host='localhost')
self._conn.autocommit = True
self._cursor = self._conn.cursor()
self._lock = RLock()
def __del__(self):
self._conn.commit()
self._cursor.close()
self._conn.close()
@synchronized
def profiles_pagination(self, offset, limit, columns=None,
skip_processed_ids=False):
if skip_processed_ids:
self._cursor.execute(
'SELECT * FROM profiles '
'WHERE processed = FALSE '
'ORDER BY owner_id '
'LIMIT {limit} OFFSET {offset}'.format(
limit=limit, offset=offset
)
)
else:
self._cursor.execute(
'SELECT * FROM profiles '
'ORDER BY owner_id '
'LIMIT {limit} OFFSET {offset}'.format(
limit=limit, offset=offset
)
)
rows = self._cursor.fetchall()
if columns is None:
return rows
else:
return [itemgetter(*columns)(row) for row in rows]
@synchronized
def get_all_photos(self, columns=None):
self._cursor.execute(
'SELECT * FROM photos '
'WHERE embedding != CAST(ARRAY[0] as double precision[]) '
'ORDER BY photo_id'
)
rows = self._cursor.fetchall()
if columns is None:
return rows
else:
return [itemgetter(*columns)(row) for row in rows]
@synchronized
def remove_profiles(self, remove_ids):
self._conn.commit()
if len(remove_ids) == 0:
return
rows = []
for owner_id in remove_ids:
row = self._mogrify((owner_id,))
rows.append(row.decode('utf-8'))
self._cursor.execute(
'DELETE FROM profiles '
'WHERE owner_id IN (VALUES {remove_ids})'.format(
remove_ids=','.join(rows)
)
)
logging.info('Deleted {} profiles'.format(self._cursor.rowcount))
@synchronized
def clean_wrong_photos(self):
self._cursor.execute(
'DELETE FROM photos photo WHERE '
'NOT EXISTS (SELECT owner_id FROM profiles profile '
'WHERE profile.owner_id=photo.owner_id)'
)
logging.info('Deleted {} photos'.format(self._cursor.rowcount))
@synchronized
def mark_processed_profiles(self, mark_ids):
if len(mark_ids) == 0:
return
rows = []
for owner_id in mark_ids:
row = self._mogrify((owner_id,))
rows.append(row.decode('utf-8'))
self._cursor.execute(
'UPDATE profiles SET processed = TRUE '
'WHERE owner_id IN (VALUES {mark_ids})'.format(
mark_ids=','.join(rows)
)
)
@synchronized
def get_photos_without_embeddings(self, limit):
self._cursor.execute(
'SELECT photo_id, photo_path FROM photos '
'WHERE embedding = CAST(ARRAY[0] as double precision[]) '
'LIMIT {limit}'.format(
limit=limit
)
)
rows = self._cursor.fetchall()
return rows
@synchronized
def update_embeddings(self, embeddings):
embeddings = self._transform_input_data(embeddings)
self._cursor.execute(
'UPDATE photos AS photo '
'SET embedding = photo_new.embedding '
'FROM (VALUES {embeddings}) '
'AS photo_new(photo_id, embedding) '
'WHERE photo_new.photo_id = photo.photo_id'.format(
embeddings=','.join(embeddings)
)
)
def _mogrify(self, params):
return self._cursor.mogrify(
'({})'.format(','.join(['%s'] * len(params))), params)
def _transform_input_data(self, data):
rows = []
for row in data:
row = self._mogrify(row)
rows.append(row.decode('utf-8'))
return rows
@synchronized
def insert_photos(self, rows):
rows = self._transform_input_data(rows)
start_time = time.time()
self._cursor.execute(
'WITH new_rows ({fields}) AS (VALUES {rows}) '
'INSERT INTO photos ({fields}) '
'SELECT {fields} '
'FROM new_rows '
'WHERE NOT EXISTS (SELECT photo_id FROM photos up '
'WHERE up.photo_id=new_rows.photo_id)'.format(
fields=u','.join(self.PHOTOS_FIELDS), rows=u','.join(rows)
)
)
elapsed_time = time.time() - start_time
logging.info('New profiles inserted in {} ms'
.format(int(elapsed_time * 1000)))
@synchronized
def insert_profiles(self, rows):
rows = self._transform_input_data(rows)
start_time = time.time()
self._cursor.execute(
'WITH new_rows ({fields}) AS (VALUES {rows}) '
'INSERT INTO profiles ({fields}) '
'SELECT {fields} '
'FROM new_rows '
'WHERE NOT EXISTS (SELECT owner_id FROM profiles up '
'WHERE up.owner_id=new_rows.owner_id)'.format(
fields=u','.join(self.PROFILES_FIELDS), rows=u','.join(rows)
)
)
elapsed_time = time.time() - start_time
logging.info('New profiles inserted in {} ms'
.format(int(elapsed_time * 1000)))
|
import sys
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn import metrics
from utils import *
if __name__ == "__main__":
if len(sys.argv) <= 2:
print ("Usage: python kmean.py [creditCard|MNIST] [nonReduced|PCA|ICA|RP|MI]")
exit(1)
file_path = ""
if sys.argv[1] == "creditCard":
if sys.argv[2] == "nonReduced":
file_path = "data/creditCard/size-5000_porp-0.1.csv"
elif sys.argv[2] == "PCA":
file_path = "data/creditCard/PCA.csv"
elif sys.argv[2] == "ICA":
file_path = "data/creditCard/ICA.csv"
elif sys.argv[2] == "RP":
file_path = "data/creditCard/RP.csv"
elif sys.argv[2] == "MI":
file_path = "data/creditCard/MI.csv"
elif sys.argv[1] == "MNIST":
if sys.argv[2] == "nonReduced":
file_path = "data/MNIST/MNIST_4_9_size-1000.csv"
elif sys.argv[2] == "PCA":
file_path = "data/MNIST/PCA.csv"
elif sys.argv[2] == "ICA":
file_path = "data/MNIST/ICA.csv"
elif sys.argv[2] == "RP":
file_path = "data/MNIST/RP.csv"
elif sys.argv[2] == "MI":
file_path = "data/MNIST/MI.csv"
X, y, _, _ = load_data(file_path, is_shuffle=True, is_split=False)
pca_full = PCA(random_state=10)
pca_full.fit(X)
print("Precentage of covarence preserved: %0.03f" % np.sum(pca_full.explained_variance_ratio_[:2]))
pca = PCA(n_components=2, random_state=10)
pca.fit(X)
X_vis = pca.transform(X)
print (X_vis.shape, X.shape)
range_n_clusters = [2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 24, 28, 32, 36, 40, 45, 50]
sse_score, h_score, c_score, v_score = [], [], [], []
ari_score, ami_score, nmi_score, fms_score, sil_score, chi_score, dbi_score = [], [], [], [], [], [], []
for n_clusters in range_n_clusters:
print ("============")
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
sse_score.append(clusterer.inertia_)
# figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename=("%d.png" % n_clusters))
# silhouette_analysis(X, cluster_labels, n_clusters, figname)
centers = pca.transform(clusterer.cluster_centers_)
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename=("%d_vis.png" % n_clusters))
visualize_cluster(X_vis, cluster_labels, n_clusters, centers, figname)
ari = metrics.adjusted_rand_score(y, cluster_labels)
ami = metrics.adjusted_mutual_info_score(y, cluster_labels)
nmi = metrics.normalized_mutual_info_score(y, cluster_labels)
fms = metrics.fowlkes_mallows_score(y, cluster_labels)
sil = metrics.silhouette_score(X, cluster_labels, metric='euclidean')
chi = metrics.calinski_harabaz_score(X, cluster_labels)
dbi = metrics.davies_bouldin_score(X, cluster_labels)
print ("Adjusted Rand index: %.6f" % ari)
print ("Adjusted Mutual Information: %.6f" % ami)
print ("Normalized Mutual Information: %.6f" % nmi)
print ("Fowlkes-Mallows score: %.6f" % fms)
print ("Silhouette Coefficient: %.6f" % sil)
print ("Calinski-Harabaz Index: %.6f" % chi)
print ("Davies-Bouldin Index: %.6f" % dbi)
ari_score.append(ari)
ami_score.append(ami)
nmi_score.append(nmi)
fms_score.append(fms)
sil_score.append(sil)
chi_score.append(chi)
dbi_score.append(dbi)
print ("SSE score: %.6f" % clusterer.inertia_)
print ("V Measure for n_clusters = %d: " % n_clusters)
h, c, v = v_measure(cluster_labels, y)
h_score.append(h)
c_score.append(c)
v_score.append(v)
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_ari")
plot_and_save(range_n_clusters,
[ari_score],
[],
"KMeans Adjusted Rand index", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_mi")
plot_and_save(range_n_clusters,
[ami_score, nmi_score],
["Adjusted Mutual Information", "Normalized Mutual Information"],
"KMeans Mutual Information", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_fms")
plot_and_save(range_n_clusters,
[fms_score],
[],
"KMeans Fowlkes-Mallows score", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_sil")
plot_and_save(range_n_clusters,
[sil_score],
[],
"KMeans Silhouette Coefficient", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_chi")
plot_and_save(range_n_clusters,
[chi_score],
[],
"KMeans Calinski-Harabaz Index", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_dbi")
plot_and_save(range_n_clusters,
[dbi_score],
[],
"KMeans Davies-Bouldin Index", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_score")
plot_and_save(range_n_clusters,
[sse_score],
["SSE"],
"KMeans Score", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="kmeans_v_measure")
plot_and_save(range_n_clusters,
[h_score, c_score, v_score],
["Homogeneity", "Completeness", "V Measure"],
"KMeans V Measure", "n_clusters", "score",
fig_path=figname, format='png')
figname = create_path("fig", sys.argv[1], "KMeans", sys.argv[2], filename="true.png")
visualize_cluster(X_vis, y, 2, [], figname)
|
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import torch
import torch.nn as nn
from torchvision.datasets import mnist
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from torch.autograd import Variable
import os
curdir = "./weights/"
num_classes = 10
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return x * self.sigmoid(x)
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 2, 1, 1, 0)
self.hsigm = h_sigmoid()
self.fc = nn.Linear(1568, 10)
self.softmax = nn.Softmax()
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.fc.weight)
nn.init.zeros_(self.conv1.bias)
nn.init.zeros_(self.fc.bias)
def forward(self, x):
out = self.conv1(x)
out = self.hsigm(out)
# print(out.shape)
out = out.view(out.size(0), -1)
# print(out.shape)
out = self.fc(out)
out = self.softmax(out)
return out
def CrossEntropy(y, target):
ones = torch.sparse.torch.eye(num_classes)
t = ones.index_select(0, target).type(y.data.type())
t = Variable(t)
loss = (-t * torch.log(y)).sum() / y.size(0)
return loss, y
def predict(test_loader, model):
correct = 0
total = 0
# ~ with torch.no_grad():
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(
"Accuracy of the network on the 10000 test images: {:.2f} %".format(
100 * correct / total
)
)
def printModel(model, file):
for i in model.state_dict():
file.write(len(model.state_dict()[i]).to_bytes(4, byteorder="big"))
np.ndarray.tofile(model.state_dict()[i].detach().numpy(), file, format="%f")
if __name__ == "__main__":
batch_size = 50
train_dataset = mnist.MNIST(root="./train", train=True, transform=ToTensor())
test_dataset = mnist.MNIST(root="./test", train=False, transform=ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
model = Model()
sgd = SGD(model.parameters(), lr=1e-2)
cross_error = CrossEntropyLoss()
epoch = 1
predict(test_loader, model)
for _epoch in range(epoch):
for i, (images, labels) in enumerate(train_loader):
outputs = model(images)
loss, lossInput = CrossEntropy(outputs, labels)
sgd.zero_grad()
loss.backward()
sgd.step()
"""if i % 100 == 0:
with open(curdir + 'loss.txt', 'a') as outfile:
print(loss.item(), file=outfile)"""
if i % 100 == 0:
print("Step [{:4d}], Loss: {:.6f}".format(i, loss.item()))
print("Epocha: ", _epoch)
predict(test_loader, model)
with open("dump.bin", "wb") as file:
printModel(model, file)
|
import psycopg2 as pg2, psycopg2.extras as pg2_extras
import web
conn = pg2.connect(host="localhost", port=5432, dbname="test_db", user="b")
cur = conn.cursor(cursor_factory=pg2_extras.DictCursor)
stories = web.get_eastmoney_stories()
web.write_to_db_china_news(stories, cur, conn)
cur.close()
conn.close()
|
import unittest
import anuga
import numpy
import os
boundaryPolygon=[ [0., 0.], [0., 100.], [100.0, 100.0], [100.0, 0.0]]
verbose=False
class Test_boundary_flux_integral_operator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
try:
os.remove('test_boundaryfluxintegral.msh')
except:
pass
try:
os.remove('test_boundaryfluxintegral.sww')
except:
pass
def create_domain(self, flowalg):
# Riverwall = list of lists, each with a set of x,y,z (and optional QFactor) values
# Make the domain
domain = anuga.create_domain_from_regions(boundaryPolygon,
boundary_tags={'left': [0],
'top': [1],
'right': [2],
'bottom': [3]},
mesh_filename='test_boundaryfluxintegral.msh',
maximum_triangle_area = 200.,
minimum_triangle_angle = 28.0,
use_cache=False,
verbose=verbose)
# 05/05/2014 -- riverwalls only work with DE0 and DE1
domain.set_flow_algorithm(flowalg)
domain.set_name('test_boundaryfluxintegral')
domain.set_store_vertices_uniquely()
def topography(x,y):
return -x/150.
# NOTE: Setting quantities at centroids is important for exactness of tests
domain.set_quantity('elevation',topography,location='centroids')
domain.set_quantity('friction',0.03)
domain.set_quantity('stage', topography,location='centroids')
# Boundary conditions
Br=anuga.Reflective_boundary(domain)
Bd=anuga.Dirichlet_boundary([0., 0., 0.])
domain.set_boundary({'left': Br, 'right': Bd, 'top': Br, 'bottom':Br})
return domain
def test_boundary_flux_operator_DE0(self):
"""
A (the) boundary flux operator is instantiated when a domain is created.
This tests the calculation for euler timestepping
"""
flowalg = 'DE0'
domain=self.create_domain(flowalg)
#domain.print_statistics()
for t in domain.evolve(yieldstep=1.0,finaltime=5.0):
if verbose: domain.print_timestepping_statistics()
if verbose: print(domain.get_water_volume())
pass
# The domain was initially dry
vol=domain.get_water_volume()
boundaryFluxInt=domain.get_boundary_flux_integral()
if verbose: print(flowalg, vol, boundaryFluxInt)
assert(numpy.allclose(vol,boundaryFluxInt))
def test_boundary_flux_operator_DE1(self):
"""
A (the) boundary flux operator is instantiated when a domain is created.
This tests the calculation for rk2 timestepping
"""
flowalg = 'DE1'
domain=self.create_domain(flowalg)
#domain.print_statistics()
for t in domain.evolve(yieldstep=1.0,finaltime=5.0):
if verbose: domain.print_timestepping_statistics()
if verbose: print(domain.get_water_volume())
pass
# The domain was initially dry
vol=domain.get_water_volume()
boundaryFluxInt=domain.get_boundary_flux_integral()
if verbose: print(flowalg, vol, boundaryFluxInt)
assert(numpy.allclose(vol,boundaryFluxInt))
def test_boundary_flux_operator_DE2(self):
"""
A (the) boundary flux operator is instantiated when a domain is created.
This tests the calculation for rk3 timestepping
"""
flowalg = 'DE2'
domain=self.create_domain(flowalg)
#domain.print_statistics()
for t in domain.evolve(yieldstep=1.0,finaltime=5.0):
if verbose: domain.print_timestepping_statistics()
if verbose: print(domain.get_water_volume(), domain.get_boundary_flux_integral())
pass
# The domain was initially dry
vol=domain.get_water_volume()
boundaryFluxInt=domain.get_boundary_flux_integral()
if verbose: print(flowalg, vol, boundaryFluxInt)
assert(numpy.allclose(vol,boundaryFluxInt))
if __name__ == "__main__":
suite = unittest.makeSuite(Test_boundary_flux_integral_operator, 'test')
runner = unittest.TextTestRunner(verbosity=1)
runner.run(suite)
|
import cv2
def redim(img, largura): # função para redimensionar uma imagem
alt = int(img.shape[0] / img.shape[1] * largura)
img = cv2.resize(img, (largura, alt), interpolation=cv2.INTER_AREA)
return img
# Cria o detector de faces baseado no XML
df = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml')
# Abre um vídeo gravado em disco
camera = cv2.VideoCapture('video.mp4')
# Também é possível abrir a próprio webcam
# do sistema para isso segue código abaixo
#camera = cv2.VideoCapture(0)
while True:
# read() retorna 1-Se houve sucesso e 2-O próprio frame
(sucesso, frame) = camera.read()
if not sucesso: # final do vídeo
print("nao achou a camera")
break
# reduz tamanho do frame para acelerar processamento
frame = redim(frame, 320)
# converte para tons de cinza
frame_pb = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detecta as faces no frame
faces = df.detectMultiScale(frame_pb, scaleFactor=1.1, minNeighbors=3, minSize=(20, 20), flags=cv2.CASCADE_SCALE_IMAGE)
frame_temp = frame.copy()
for (x, y, lar, alt) in faces:
cv2.rectangle(frame_temp, (x, y), (x + lar, y + alt), (0, 255, 255), 2)
# Exibe um frame redimensionado (com perca de qualidade)
cv2.imshow("Encontrando faces...", redim(frame_temp, 640))
# Espera que a tecla 's' seja pressionada para sair
if cv2.waitKey(1) & 0xFF == ord("s"):
break
# fecha streaming
camera.release()
cv2.destroyAllWindows()
|
import unittest
import main
class TestMain(unittest.TestCase):
def test_2(self):
self.assertEqual(main.calc(111111), True)
self.assertEqual(main.calc(223450), False)
self.assertEqual(main.calc(123789), False)
self.assertEqual(main.calcAdv(112233), True)
self.assertEqual(main.calcAdv(123444), False)
self.assertEqual(main.calcAdv(111122), True)
self.assertEqual(main.calcAdv(221111), False)
self.assertEqual(main.calcAdv(222111), False)
self.assertEqual(main.calcAdv(222222), False)
self.assertEqual(main.calcAdv(123456), False)
self.assertEqual(main.calcAdv(123455), True)
self.assertEqual(main.calcAdv(123555), False)
self.assertEqual(main.calcAdv(112222), True)
self.assertEqual(main.calcAdv(222222), False)
self.assertEqual(main.calcAdv(222333), False)
self.assertEqual(main.calcAdv(446665), False)
if __name__ == "__main__":
unittest.main()
|
class Solution:
# @param A, a list of integers
# @return an integer
def firstMissingPositive(self, A):
arrayLen = len(A)
for idx in range(0, arrayLen) :
if A[idx] < 1 :
continue
temp = A[idx]
A[idx] = -1
if temp <= arrayLen :
self.sort(A, temp)
for idx in range(0, arrayLen) :
if A[idx] < 1 :
return idx + 1
return arrayLen + 1
def sort(self, A, startIdx):
currentIdx = startIdx
while True :
if A[currentIdx - 1] != currentIdx and A[currentIdx - 1] > 0 :
temp = A[currentIdx - 1]
A[currentIdx - 1] = currentIdx
currentIdx = temp
else :
A[currentIdx - 1] = currentIdx
return
|
import md5
from sys import exit
class Position(object):
def __init__(self, x, y, path_taken):
self.x = x
self.y = y
self.path_taken = path_taken
self.hash = "pgflpeqp" + path_taken
def test_new_direction(self):
if self.x == 4 and self.y == 4:
solution = self.path_taken
# Need to return here to prevent from continuing on from (4, 4)
return solution
hash_test = md5.new(self.hash).hexdigest()
if hash_test[0] in ['b', 'c', 'd', 'e', 'f'] and self.y > 1:
new_position = Position(self.x, self.y - 1, self.path_taken + 'U')
legal_next_steps.append(new_position)
if hash_test[1] in ['b', 'c', 'd', 'e', 'f'] and self.y < 4:
new_position = Position(self.x, self.y + 1, self.path_taken + 'D')
legal_next_steps.append(new_position)
if hash_test[2] in ['b', 'c', 'd', 'e', 'f'] and self.x > 1:
new_position = Position(self.x - 1, self.y, self.path_taken + 'L')
legal_next_steps.append(new_position)
if hash_test[3] in ['b', 'c', 'd', 'e', 'f'] and self.x < 4:
new_position = Position(self.x + 1, self.y, self.path_taken + 'R')
legal_next_steps.append(new_position)
return None
starting_position = Position(1, 1, '')
current_positions = [starting_position]
solution = ''
step = 0
while True:
legal_next_steps = []
for position in current_positions:
solution_check = position.test_new_direction()
if solution_check:
solution = solution_check
if legal_next_steps == []:
print "Final step reached! It is %s: length is %d" % (
solution, len(solution))
exit()
current_positions = legal_next_steps
step += 1
|
def readline(filepath):
input = []
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
input.append(line)
line = fp.readline()
cnt += 1
return input
def main():
stopwords = ['bags.','no','other',',','bag.','1','2','3','4','5','6','7','8','9','0','bag','bags']
input = readline("input.txt")
bags = {}
visited = set()
ret = set()
queue = []
for line in input:
valuebags = []
input = line.split("contain")
key = input[0].strip()[:-5]
values = input[1].split(',')
for value in values:
splitval = value.split()
resvalue = [word for word in splitval if word.lower() not in stopwords]
result = ' '.join(resvalue)
#print(result)
valuebags.append(result)
bags[key] = valuebags
for i in bags:
if 'shiny gold' in bags[i]:
ret.add(i)
queue.append(i)
#print(ret)
#print(queue)
while len(queue) > 0 :
key = queue.pop()
if key not in visited:
visited.add(key)
for i in bags:
#print(key)
if key in bags[i]:
ret.add(i)
if i not in visited:
queue.append(i)
print(ret)
print(len(ret))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 30 00:32:03 2017
@author: mmr
"""
import bs4
import sys
import requests
def get_result():
df = requests.get('http://ketqua.net').text
soup = bs4.BeautifulSoup(df, 'lxml')
number_award = {0: 1, 1: 1, 2: 2, 3: 6, 4: 4, 5: 6, 6: 3, 7: 4}
result = {}
for number, value in number_award.items():
temp = []
for i in range(value):
id_award = "rs_{}_{}".format(number, i)
td = soup.findAll('td', {'id': id_award})
temp.append(td[0].contents[0])
result[number] = temp
list_lo = []
for number, value in result.items():
for element in value:
lo = element[-2:]
if lo not in list_lo:
list_lo.append(lo)
return result, list_lo
def main():
list_input = sys.argv[1:]
result, list_lo = get_result()
counter = 0
for element in list_input:
if str(element) in list_lo:
print('You are so lucky with number: {}'.format(element))
counter = counter + 1
list_name = {0: "Dac Biet", 1: "Nhat", 2: "Nhi",
3: "Ba", 4: "Tu", 5: "Nam", 6: "Sau", 7: "Bay"}
if counter == 0:
for number, value in result.items():
name = list_name[number]
print('Giai {} la: {}'.format(name, '-'.join(value)))
if __name__ == "__main__":
main()
|
import random
def quick_sort(arr):
def sort(low, high):
if high <= low:
return
mid = partition(low, high)
sort(low, mid - 1)
sort(mid, high)
def partition(low, high):
pivot = arr[(low + high) // 2]
while low <= high:
while arr[low] < pivot:
low += 1
while arr[high] > pivot:
high -= 1
if low <= high:
arr[low], arr[high] = arr[high], arr[low]
low, high = low + 1, high - 1
return low
return sort(0, len(arr) - 1)
def binary_search(random_list, wanted_data):
first = 0
last = len(random_list) - 1
while first <= last:
mid = (first + last) // 2
if random_list[mid] == wanted_data:
return mid
elif random_list[mid] < wanted_data:
first = mid + 1
else :
last = mid -1
return None
if __name__ == '__main__':
list = []
for i in range(10):
list.append(random.randint(1, 10))
print('<정렬 전>')
print(list)
print('<정렬 후>')
quick_sort(list)
print(list)
index = binary_search(list, 4)
if index:
print(list[index])
else:
print('찾는 숫자가 없어요')
binary_search_recursive(list, 4) |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 21:17:10 2021
@author: HP
"""
import pickle
import math
import numpy as np
import json
import numpy as np
from scipy.stats import entropy
from math import log, e
import pandas as pd
mem_event_1=pickle.load(open("mem_event_1","rb"))
hcount=pickle.load(open("hcount","rb"))
dict1={}
for key in mem_event_1:
i=0
list1=[]
for i in range(len(mem_event_1[key])):
group_id=mem_event_1[key][i]
list1.append(group_id)
j=0
list2=[]
for j in range(len(list1)):
try:
list2.append(hcount[list1[j]])
except:
print("")
dict1[key]=list2
pickle.dump( dict1, open( "m10_attendance_distribution_of_members_protocol_2", "wb" ), protocol=2) |
from typing import Union
from probability.custom_types.external_custom_types import AnyFloatMap
from probability.custom_types.internal_custom_types import AnyBetaMap, \
AnyDirichletMap
from probability.distributions import Beta, Dirichlet
class BayesRuleMixin(object):
_prior: Union[float, Beta, AnyFloatMap, Dirichlet]
_likelihood: Union[float, AnyFloatMap, AnyBetaMap, AnyDirichletMap]
@property
def prior(self) -> Union[float, Beta, AnyFloatMap, Dirichlet]:
return self._prior
@property
def likelihood(self) -> Union[
float, AnyFloatMap, AnyBetaMap, AnyDirichletMap
]:
return self._likelihood
|
# Lowest common ancestor in binary tree or BST
# https://www.geeksforgeeks.org/lowest-common-ancestor-binary-tree-set-1/ - For python BST code
# https://www.youtube.com/watch?v=13m9ZCB8gjw&t=15s - Video
# Key learnings :
# 1: Implement Binary tree fast in python
# 2: Two approaches for this problem
# 1: Print path and then find latest common for two nodes (How will you print path from node to root)
# 2: Return null or return node to its parent as explained in Video
# 3: Remember why we added return in find_path function. In True case you need to return all the way to previous callers
# 4: To get address of any object in python use id(object) function
class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def get_value(self):
return self.value
def set_value(self,value):
self.value = value
def get_children(self):
children = []
if self.left is not None:
children.append(self.left)
if self.right is not None:
children.append(self.right)
return children
class BST(object):
def __init__(self):
self.root = None
def _set_root(self, value):
self.root = Node(value)
def insert(self, value):
if self.root is None:
self._set_root(value)
else:
self._insert_node(self.root, value)
def _insert_node(self, curr_node, value):
if curr_node.value >= value:
if (curr_node.left):
self._insert_node(curr_node.left, value)
else:
curr_node.left = Node(value)
else:
if (curr_node.right):
self._insert_node(curr_node.right, value)
else:
curr_node.right = Node(value)
def print_tree(self):
self._print_tree_inorder(self.root)
def _print_tree_inorder(self, curr_node):
# printing tree in inorder fashion
# call using root node
if curr_node is not None:
self._print_tree_inorder(curr_node.left)
print curr_node.value,
self._print_tree_inorder(curr_node.right)
def find_path(node, val, path):
if node == None:
return False
if node.value == val:
path.append(node.value)
return True
elif val < node.value:
# go to left of the tree
path.append(node.value)
return find_path(node.left, val, path)
else:
path.append(node.value)
return find_path(node.right, val, path)
# Run time O(n)
# Space O(n)
def LCA_1(tree, val1, val2):
path1 = []
path2 = []
if find_path(tree.root, val1, path1) and find_path(tree.root, val2, path2):
# now find LCA by traversing from two paths
index = 0
while index < len(path1) and index < len(path2):
if path1[index] == path2[index]:
lca = path1[index]
index += 1
else:
raise Exception('Invalid input')
return lca
# Run time O(n)
# Space O(constant)
def LCA_2(tree, val1, val2):
# this function returns Node object
return _LCA_2(tree.root, val1, val2)
def _LCA_2(node, val1, val2):
if node is None:
return None
if node.value == val1 or node.value == val2:
return node
left = _LCA_2(node.left, val1, val2)
right = _LCA_2(node.right, val1, val2)
if left != None and right != None:
return node
if left == None and right == None:
return None
if left != None:
return left
elif right != None:
return right
if __name__ == '__main__':
tree = BST()
tree.insert(3)
tree.insert(6)
tree.insert(2)
tree.insert(11)
tree.insert(9)
tree.insert(5)
tree.insert(8)
tree.insert(13)
tree.insert(7)
tree.print_tree()
print ''
print LCA_2(tree, 7, 11).value
|
from django.contrib import admin
from qbeats_home.models import mrnStream
admin.site.register(mrnStream)
|
import re
import subprocess
ROOT_FILENAME = 'throwback'
f_p8 = open(ROOT_FILENAME + '.p8', 'r+', newline='\n')
lua = open(ROOT_FILENAME + '.lua', 'r', newline='\n').read()
p8 = f_p8.read()
new_p8 = re.sub(r'__lua__\n.*\n__gfx__', '__lua__\n{}\n__gfx__'.format(lua), p8, flags=re.DOTALL)
f_p8.seek(0)
f_p8.write(new_p8)
f_p8.truncate()
f_p8.close()
# subprocess.run(['C:\Program Files (x86)\PICO-8\pico8.exe', '-run', ROOT_FILENAME + '.p8'])
|
# Generated by Django 2.0.2 on 2018-02-16 19:28
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rango', '0002_auto_20180215_1656'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='profile_images')),
('country_code', models.CharField(max_length=5)),
('phone_number', models.CharField(max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('date', models.DateField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
words = input().split()
pal_word = input()
pal_list = [el for el in words if el == el[::-1]]
found_word = [word for word in pal_list if word == pal_word]
counter = 0
for el in pal_list:
if el == pal_word:
counter += 1
print(pal_list)
print(f"Found palindrome {counter} times")
|
__author__="Aurelija"
__date__ ="$2010-07-15 12.27.32$"
import re
from os.path import join
from Utilities.ReleaseScripts.cmsCodeRules.pathToRegEx import pathsToRegEx, pathToRegEx
def getFilePathsFromWalk(osWalkResult, file, exceptPaths = []):
listOfFiles = []
file = pathToRegEx(file)
for root, dirs, files in osWalkResult:
for name in files:
excepted = False
fullPath = join(root,name)
for path in pathsToRegEx(exceptPaths):
if re.match(path, fullPath):
excepted = True
break
if not excepted and re.match(file, name):
listOfFiles.append(fullPath)
return listOfFiles
|
from django.conf import settings
from django.utils.hashcompat import md5_constructor
def get_anticaptcha_token():
# The purpose of this anticaptcha token is just to generate
# a random value so we simply hash something that's always
# available, but different in most django installs
return md5_constructor(settings.MEDIA_ROOT).hexdigest() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import opal.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('anaesthetic', '0021_auto_20171022_1651'),
]
operations = [
migrations.CreateModel(
name='AnaestheticPlan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('consistency_token', models.CharField(max_length=8)),
('Procedure_Risks', models.TextField(null=True, blank=True)),
('Proposed_Procedure_ft', models.CharField(default=b'', max_length=255, null=True, blank=True)),
],
options={
'abstract': False,
},
bases=(opal.models.UpdatesFromDictMixin, opal.models.ToDictMixin, models.Model),
),
migrations.CreateModel(
name='ASA',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='Dentition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='FrailtyScale',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='Malampati',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='PreOPbloods',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('consistency_token', models.CharField(max_length=8)),
('Hb', models.FloatField(null=True, blank=True)),
('Plt', models.FloatField(null=True, blank=True)),
('WBC', models.FloatField(null=True, blank=True)),
('INR', models.FloatField(null=True, blank=True)),
('CRP', models.FloatField(null=True, blank=True)),
('Urea', models.FloatField(null=True, blank=True)),
('Creat', models.FloatField(null=True, blank=True)),
('Na', models.FloatField(null=True, blank=True)),
('K', models.FloatField(null=True, blank=True)),
('created_by', models.ForeignKey(related_name='created_anaesthetic_preopbloods_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('episode', models.ForeignKey(to='opal.Episode')),
('updated_by', models.ForeignKey(related_name='updated_anaesthetic_preopbloods_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
},
bases=(opal.models.UpdatesFromDictMixin, opal.models.ToDictMixin, models.Model),
),
migrations.CreateModel(
name='PreOPvisit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(null=True, blank=True)),
('updated', models.DateTimeField(null=True, blank=True)),
('consistency_token', models.CharField(max_length=8)),
('Assessment', models.TextField(null=True, blank=True)),
('General_Risks', models.TextField(null=True, blank=True)),
('AdditionalRisks', models.TextField(null=True, blank=True)),
('TimeSeen', models.DateTimeField(null=True, blank=True)),
('previous_anaesthetics_ft', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('ASA_ft', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('Frailty_ft', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('Malampati_ft', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('Dentition_ft', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('ASA_fk', models.ForeignKey(blank=True, to='anaesthetic.ASA', null=True)),
('Dentition_fk', models.ForeignKey(blank=True, to='anaesthetic.Dentition', null=True)),
('Frailty_fk', models.ForeignKey(blank=True, to='anaesthetic.FrailtyScale', null=True)),
('Malampati_fk', models.ForeignKey(blank=True, to='anaesthetic.Malampati', null=True)),
('created_by', models.ForeignKey(related_name='created_anaesthetic_preopvisit_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('episode', models.ForeignKey(to='opal.Episode')),
],
options={
'abstract': False,
},
bases=(opal.models.UpdatesFromDictMixin, opal.models.ToDictMixin, models.Model),
),
migrations.CreateModel(
name='PreviousAnaesthetics',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='ProposedProcedure',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='Risks',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.AddField(
model_name='preopvisit',
name='previous_anaesthetics_fk',
field=models.ForeignKey(blank=True, to='anaesthetic.PreviousAnaesthetics', null=True),
),
migrations.AddField(
model_name='preopvisit',
name='updated_by',
field=models.ForeignKey(related_name='updated_anaesthetic_preopvisit_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='anaestheticplan',
name='Proposed_Procedure_fk',
field=models.ForeignKey(blank=True, to='anaesthetic.ProposedProcedure', null=True),
),
migrations.AddField(
model_name='anaestheticplan',
name='created_by',
field=models.ForeignKey(related_name='created_anaesthetic_anaestheticplan_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='anaestheticplan',
name='episode',
field=models.ForeignKey(to='opal.Episode'),
),
migrations.AddField(
model_name='anaestheticplan',
name='updated_by',
field=models.ForeignKey(related_name='updated_anaesthetic_anaestheticplan_subrecords', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
]
|
#:1 Video to pictures converter
import ctypes
frame = 1
FrameNumber = str(frame)
directory = "C:\CuratedWallpaper" + "\\"
imagename = "000"
imageformat = ".png"
imagePath = directory + imagename + FrameText + imageformat
def changeBG(imagePath):
ctypes.windll.user32.SystemParametersInfoW(20, 0, imagePath, 3)
print(imagePath,'\n',FrameNumber)
while frame < 21:
changeBG(imagePath)
frame += 1
FrameText = str(frame)
imagePath = directory + imagename + FrameText + imageformat
if frame == 20:
frame -= 20 |
#*- coding: utf-8 -*- #
"""Tickets System
Usage:
tickets [-dgkzt] <from> <to> <date>
Options:
-h --help Show this screen.
-d 动车
-g 高铁
-k 快速
-z 直达
-t 特快
"""
import requests
import colorama
from docopt import docopt
from stations import Stations
import stations
from prettytable import PrettyTable
from colorama import Fore
def cli():
arguments = docopt(__doc__, version='Tickets System 1.0')
from_station = Stations.get(arguments.get('<from>'), None)
to_station = Stations.get(arguments.get('<to>'), None)
date = arguments.get('<date>')
url = '''https://kyfw.12306.cn/otn/leftTicket/queryO?leftTicketDTO.train_date={}&leftTicketDTO.from_station={}&leftTicketDTO.to_station={}&purpose_codes=ADULT'''.format(date, from_station, to_station)
r = requests.get(url)
raw_trains = r.json()['data']['result']
pt = PrettyTable(["车次", "车站", "时间", "历时", "商务座", "一等座", "二等座", "高级软卧", "软卧", "硬卧", "软座", "硬座", "无座"])
pt.align["车次"] = "l"
for raw_train in raw_trains:
data_list = raw_train.split('|')
train_no = data_list[3]
start_station = stations.get_station(data_list[6])
end_station = stations.get_station(data_list[7])
start_time = data_list[8]
arrive_time = data_list[9]
lishi = data_list[10]
swz_num = data_list[32]
ydz_num = data_list[31]
edz_num = data_list[30]
gjrw_num = data_list[21]
tdz_num = data_list[25]
rw_num = data_list[23]
dw_num = data_list[27]
yw_num = data_list[28]
rz_num = data_list[24]
yz_num = data_list[29]
wz_num = data_list[26]
qt_num = data_list[22]
pt.add_row([
train_no,
'\n'.join((Fore.GREEN + start_station + Fore.RESET, Fore.RED + end_station + Fore.RESET)),
'\n'.join((Fore.GREEN + start_time + Fore.RESET, Fore.RED + arrive_time + Fore.RESET)),
lishi,
swz_num,
ydz_num,
edz_num,
gjrw_num,
rw_num,
yw_num,
rz_num,
yz_num,
wz_num])
colorama.init()
print(pt)
if __name__ == '__main__':
cli()
|
import pygame
from plane_sprites import *
class PlaneGame(object):
"""飞机大战主游戏"""
def __init__(self):#时间地点人
print("游戏初始化")
# 1. 创建游戏的窗口
self.screen = pygame.display.set_mode((480, 700))
# 2. 创建游戏的时钟
self.clock = pygame.time.Clock()
# 3. 调用私有方法,精灵和精灵组的创建
self.__create_sprites()
def start_game(self):
print("开始游戏...")
while True:
pass
def __create_sprites(self):
pass
if __name__ == '__main__':
# 创建游戏对象
game = PlaneGame()
# 开始游戏
game.start_game() |
import geopandas as gpd
import pandas as pd
import requests
import click
@click.command()
@click.option('--shapefile', prompt='Please point to associative shapefile',
default='BoundaryShapefiles/Ecological Sub-sections/tx_subsection.shp',
help='Original Shapefile Geometries.')
def find_bounding_box(shapefile):
gdf = gpd.read_file(shapefile)
gdf = gdf[~pd.isnull(gdf['FORESTNAME'])]
x = gdf[gdf['MAP_UNIT_N'].duplicated(keep=False)].copy()
x['ecoregion'] = x['MAP_UNIT_N'].str.cat(x['MAP_UNIT_S'], sep=" ")
y = gdf[~gdf['MAP_UNIT_N'].duplicated(keep=False)].copy()
y['ecoregion'] = y['MAP_UNIT_N']
z = pd.concat([y, x])
z['bbox'] = z['geometry'].apply(lambda a: str(a.bbox).replace(" ", "").replace("(", "").replace(")", ""))
z[['bbox', 'ecoregion', 'FORESTNAME']].copy()
z.groupby(by='FORESTNAME').apply(lambda b: b[['ecoregion', 'bbox']].sort_values(by='ecoregion').to_json(orient="records")).to_json('output.json', orient='index')
# after this you'll have to un-escape the inner json arrays
if __name__ == '__main__':
find_bounding_box()
|
import logging
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
def init_sentry_sdk(dsn, environment):
sentry_sdk.init(
dsn=dsn,
integrations=[
FlaskIntegration(),
LoggingIntegration(
level=logging.ERROR,
event_level=logging.ERROR
),
SqlalchemyIntegration(),
RedisIntegration(),
CeleryIntegration(),
],
environment=environment,
release="epay-version-v1.0",
)
|
# Generated by Django 3.1 on 2020-10-23 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0086_auto_20201023_1002'),
]
operations = [
migrations.AddField(
model_name='ruggroup',
name='type',
field=models.CharField(choices=[('a', 'Age (e.g. Vintage)'), ('t', 'Type (e.g. Runner)')], default='a', max_length=1),
),
]
|
from .base import *
DEBUG = True
SECRET_KEY = '-5og^w3c^tcsp^n)9wk+2bvb(2j_vm=8o38j8t8@r4q%b&j=y_'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ALLOWED_HOSTS = ['localhost', 'crypstation.test']
# to execute tasks locally synchronous
# CELERY_ALWAYS_EAGER = True
|
"""model utils"""
from PIL import Image
import numpy as np
import keras.backend as K
def resize_image(image, size):
"""Resize image with aspect ratio kept
image: Image, input image
size: tuple of int, (w, h) target size
Return: resized image
"""
iw, ih = image.size
w, h = size
ratio = min(w / iw, h / ih)
new_w = int(iw * ratio)
new_h = int(ih * ratio)
image = image.resize((new_w, new_h), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
new_image.paste(image, ((w - new_w) // 2, (h - new_h) // 2))
return new_image
def serial_apply(*funcs):
"""serial apply a list of functions
funcs: a list of functions
Returns: the function after serially applied
"""
def squeezed(*args, **kwargs):
first_func = funcs[0]
remain_funcs = funcs[1:]
result = first_func(*args, **kwargs)
for f in remain_funcs:
result = f(result)
return result
if funcs:
return squeezed
else:
raise ValueError('funcs can not be empty!')
def rand(a=0, b=1):
"""Sample random value between a and b
a: random low bound
b: random high bound
return: random value [a, b]
"""
return np.random.rand() * (b - a) + a
def sigmoid_focal_loss(_sentinel=None, y=None, y_true=None, gama=0.0):
""" Calculate focal loss, element wise focal loss
param _sentinel: Used to prevent positional parameters. Internal, do not use.
param y: tensor, the predict, value should be in (0, 1), shape=(N1, N2, ..., 1)
param y_true: tensor, ground truth, value should be 0 or 1, has the same shape with y
param gama: float, focal factor
return: tensor, focal loss, has the same shape with y and y_true
"""
y_true = K.cast(y_true, dtype=K.dtype(y))
clipped_y = K.clip(y, K.epsilon(), 1 - K.epsilon())
loss = y_true * K.pow(1.0 - clipped_y, gama) * K.log(clipped_y) + \
(1.0 - y_true) * K.pow(clipped_y, gama) * K.log(1 - clipped_y)
return -loss
|
from datetime import datetime
import factory
import pytz
from factory import fuzzy
from factory.django import DjangoModelFactory
from .models import Temperature
# Defining a factory
class TemperatureFactory(DjangoModelFactory):
class Meta:
model = Temperature
time = factory.fuzzy.FuzzyDateTime(
datetime(2020, 1, 1, 0, 0, 0, 0, pytz.UTC),
force_minute=0,
force_second=0,
force_microsecond=0,
)
temperature = factory.fuzzy.FuzzyDecimal(0, 40, 1)
|
# Create your models here.
from __future__ import unicode_literals
from django.db import models
from django_mailbox.signals import message_received
from django.dispatch import receiver
class MailStorage(models.Model):
sender = models.CharField(max_length=255)
subject = models.CharField(max_length=255)
date = models.DateTimeField('date_recieved')
body = models.TextField('Body')
def __str__(self):
return self.subject
class Login(models.Model):
username = models.EmailField(max_length=254)
password = models.CharField(max_length=255)
def __str__(self):
return self.username
|
import sys
import os
import hashlib
#read from terminal
input = sys.argv
binaryFile = input[1]
#convert to hex: search for "FFD8FFE0"
print("type", type(binaryFile))
f = open(binaryFile, "rb")
data = f.read()
# print(data)
path = os.getcwd()
print ("The current working directory is %s" % path)
try:
os.mkdir(path + "/Sayles")
except OSError:
print ("Creation of the directory %s failed" % path)
# else:
# print ("Successfully created the directory %s " % path)
count = 0
def writeToFolder(filetype, data, offset):
name = filetype + str(count) + "." + filetype
f = open(os.getcwd()+"/Sayles/" + name, "wb")
f.write(data)
print("File Type Found: " + filetype)
path = os.getcwd()+"/Sayles/"
size = os.path.getsize(name)
print("File size: ", size)
print("Location offset: ", offset)
f.close()
def createHash(data):
md5_returned = hashlib.md5(data).hexdigest()
# print("MD%", md5_returned)
f = open(path + "/Sayles/hashes.txt", "a")
f.write(md5_returned + "\n")
f.close
pdfStart = data.find(b'%PDF')
# print(pdfStart)
if(pdfStart != -1):
pdfEnd = data.find(b'EOF')
# print("pdfEnd", pdfEnd)
writeToFolder("pdf", data[pdfStart:pdfEnd], pdfStart)
createHash(data[pdfStart:pdfEnd])
count += 1
#loop till end of file
|
# Generated by Django 2.2.7 on 2020-02-18 11:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MatricResult',
fields=[
('s_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254)),
('dob', models.DateField()),
('img', models.ImageField(upload_to='media/')),
('roll_no', models.IntegerField()),
('roll_code', models.IntegerField()),
('sst', models.IntegerField()),
('sci', models.IntegerField()),
('math', models.IntegerField()),
('hindi', models.IntegerField()),
('eng', models.IntegerField()),
],
),
]
|
import asyncio
from utils.vars import *
from utils.helper import get_nonce
from web3 import Web3, exceptions
from utils.notification import send_notification
from utils.helper import check_balance, check_slp_balance
import json
w3 = Web3(Web3.HTTPProvider(RONIN_PROVIDER_FREE))
with open("entity/abis/slp_abi.json") as f:
slb_abi = json.load(f)
async def produce_transaction(txtype):
if txtype == 'SLP':
use_contract = SLP_CONTRACT
amount = check_slp_balance()
else:
use_contract = WETH_CONTRACT
amount = check_balance()
slp_contract = w3.eth.contract(
address=Web3.toChecksumAddress(use_contract),
abi=slb_abi
)
transaction = slp_contract.functions.transfer(
Web3.toChecksumAddress(TO_ADDR),
amount
).buildTransaction({
"gas": 500000,
"gasPrice": w3.toWei("0", "gwei"),
"nonce": get_nonce(FROM_ADDR)
})
signed = w3.eth.account.sign_transaction(
transaction,
private_key=PRIV_KEY
)
try:
w3.eth.send_raw_transaction(signed.rawTransaction)
return signed, amount, txtype
except Exception as e:
send_notification(amount, txtype, failed=True, desc=str(e))
async def execute_signed_transaction(signed, amount, txtype):
tx_hash = w3.toHex(w3.keccak(signed.rawTransaction))
print("https://explorer.roninchain.com/txs/" + str(tx_hash))
while True:
try:
recepit = w3.eth.get_transaction_receipt(tx_hash)
if recepit["status"] == 1:
success = True
send_notification(amount, txtype, tx_hash=tx_hash, failed=False)
else:
success = False
break
except exceptions.TransactionNotFound:
print(f"Waiting for transfer '{tx_hash}' to finish")
await asyncio.sleep(5)
|
from Testing import testPenData, testCarData, average, stDeviation
import csv
with open('Pen0.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Data Type', 'Perceptrons', 'Average Accuracy', 'Standard Deviation', 'Maximum Accuracy'])
penAccuracyList = []
carAccuracyList = []
for k in range(5):
penAccuracyList.append(testPenData(hiddenLayers=[])[1])
#carAccuracyList.append(testCarData(hiddenLayers=[])[1])
avgPen = average(penAccuracyList)
stDevPen = stDeviation(penAccuracyList)
maxPen = max(penAccuracyList)
spamwriter.writerow(['PenData', '0', str(avgPen), str(stDevPen), str(maxPen)])
"""
avgCar = average(carAccuracyList)
stDevCar = stDeviation(carAccuracyList)
maxCar = max(carAccuracyList)
print "writing"
"""
#spamwriter.writerow(['Car Data', '0', str(avgCar), str(stDevCar), str(maxCar)])
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
class SearchForm(FlaskForm):
searchfield = StringField('Search field', validators=[DataRequired()])
searchbutton = SubmitField('Search') |
from django.shortcuts import render, redirect
#from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm,ContactsUpdateForm
from .models import Profile
import re
# Create your views here.
def register(request):
if request.method == 'POST':
#form = UserCreationForm(request.POST)
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'your Account has been created! you can now login!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html',{'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'your Account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form':u_form,
'p_form':p_form
}
return render(request,'users/profile.html',context)
@login_required
def Contacts(request):
user_obj = str(request.user.profile.contacts_list)
clist = re.split('([A-Za-z : ]+[+]?[\d]+)',user_obj)
user_contacts_list = []
for i in clist:
if(i!='\r\n' and i!=""):
user_contacts_list.append(i)
print(user_contacts_list)
user_info = request.user
contacts_dict = { "user_contacts_list":user_contacts_list,"user_info":user_info, }
return render(request,'users/contacts_list.html',context=contacts_dict)
def updateContacts(request):
if request.method == 'POST':
p_form = ContactsUpdateForm(request.POST,
request.FILES,instance=request.user.profile)
if p_form.is_valid():
p_form.save()
messages.success(request, f'your Account has been updated!')
return redirect('my-contacts')
else:
p_form = ContactsUpdateForm(instance=request.user.profile)
context = {
'p_form':p_form
}
return render(request,'users/contacts_update.html',context)
|
"""
Module for Linear N-Dimensional Interpolation
"""
import numpy as np
from scipy.interpolate import LinearNDInterpolator as LinearNDInterp
from scipy.interpolate import interp1d
from .approximation import Approximation
class Linear(Approximation):
"""
Multidimensional linear interpolator.
:param float fill_value: value used to fill in for requested points outside
of the convex hull of the input points. If not provided, then the
default is numpy.nan.
"""
def __init__(self, fill_value=np.nan):
self.fill_value = fill_value
self.interpolator = None
def fit(self, points, values):
"""
Construct the interpolator given `points` and `values`.
:param array_like points: the coordinates of the points.
:param array_like values: the values in the points.
"""
# the first dimension is the list of parameters, the second one is
# the dimensionality of each tuple of parameters (we look for
# parameters of dimensionality one)
as_np_array = np.array(points)
if not np.issubdtype(as_np_array.dtype, np.number):
raise ValueError('Invalid format or dimension for the argument'
'`points`.')
if as_np_array.shape[-1] == 1:
as_np_array = np.squeeze(as_np_array, axis=-1)
if as_np_array.ndim == 1 or (as_np_array.ndim == 2
and as_np_array.shape[1] == 1):
self.interpolator = interp1d(as_np_array, values, axis=0)
else:
self.interpolator = LinearNDInterp(points,
values,
fill_value=self.fill_value)
def predict(self, new_point):
"""
Evaluate interpolator at given `new_points`.
:param array_like new_points: the coordinates of the given points.
:return: the interpolated values.
:rtype: numpy.ndarray
"""
return self.interpolator(new_point)
|
from test_sort import Tests
# Elements on the left of the pivot should be lower and the elements on
# the right side of the pivot should be greater
def quicksort(l):
left = []
right = []
equal = []
if len(l) > 1:
pivot = l[0]
for elem in l:
if elem < pivot:
left.append(elem)
elif elem > pivot:
right.append(elem)
else:
equal.append(elem)
return quicksort(left) + equal + quicksort(right)
return l
test = Tests(quicksort)
test.sort_random_inputs()
test.show_results()
|
import os, uuid
class TempFile:
def __init__(self, directory):
self.filename = os.path.join(directory, uuid.uuid4().hex)
self.f = open(self.filename, 'w+b')
def __del__(self):
self.f.close()
os.remove(self.filename)
def get(self):
return self.f
def read(self):
self.f.seek(0)
return self.f.read() |
'''Convert a grammar to CNF and print it to stdout.'''
from cfg import core, cnf
CFG = core.ContextFreeGrammar
CNF = cnf.ChomskyNormalForm
G = CFG('''
S -> ASA | aB
A -> B | S
B -> b |
''')
print 'G:'
print G
print
print 'G\':'
print CNF(G)
|
"""
Title: Linked list random node
Problem:
Given a singly linked list, return a random node's value from the linked
list. Each node must have the same probability of being chosen.
Follow up: What if the linked list is extremely large and its length is
unknown to you? Could you solve this efficiently without using extra space?
Execution: python random_node.py
"""
from random import random
import unittest
class ListNode:
def __init__(self, x: int) -> None:
self.val = x
self.next = None
class LinkedList():
def __init__(self, head: ListNode) -> None:
self.head = head
def get_random(self) -> int:
cur_node = self.head
counter = 1
choice = -1
while cur_node is not None:
if random() < 1/counter:
choice = cur_node.val
cur_node = cur_node.next
counter += 1
return choice
def check_random_node(self, node_val: int) -> bool:
return node_val in [1, 2, 3]
class TestRandomNode(unittest.TestCase):
"""Unit test for get_random."""
def test_1(self):
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
ll = LinkedList(head)
self.assertEqual(ll.check_random_node(ll.get_random()), True)
print("Explanation: .")
if __name__ == '__main__':
unittest.main()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import shutil
import synthtool as s
import synthtool.gcp as gcp
from synthtool.languages import python
from synthtool.sources import git
GOOGLEAPIS_REPO = "googleapis/googleapis"
# ----------------------------------------------------------------------------
# Get gapic metadata proto from googleapis
# ----------------------------------------------------------------------------
# Clean up googleapis
shutil.rmtree('googleapis', ignore_errors=True)
# Clone googleapis
googleapis_url = git.make_repo_clone_url(GOOGLEAPIS_REPO)
subprocess.run(["git", "clone", googleapis_url])
# This is required in order for s.copy() to work
s._tracked_paths.add("googleapis")
# Gapic metadata proto needed by gapic-generator-python
# Desired import is "from google.gapic.metadata import gapic_metadata_pb2"
s.copy("googleapis/gapic", "google/gapic", excludes=["lang/", "packaging/", "**/BUILD.bazel"],)
s.copy("googleapis/google/api/*.proto", "google/api")
s.copy("googleapis/google/cloud/extended_operations.proto", "google/cloud")
s.copy("googleapis/google/cloud/location/locations.proto", "google/cloud/location")
s.copy("googleapis/google/logging/type/*.proto", "google/logging/type")
s.copy("googleapis/google/longrunning/*.proto", "google/longrunning")
s.copy("googleapis/google/rpc/*.proto", "google/rpc")
s.copy("googleapis/google/rpc/context/*.proto", "google/rpc/context")
s.copy("googleapis/google/type/*.proto", "google/type")
# Clean up googleapis
shutil.rmtree('googleapis')
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
common = gcp.CommonTemplates()
templated_files = common.py_library()
# TODO: use protoc-docs-plugin to add docstrings to protos
s.move(templated_files / ".kokoro", excludes=["docs/**/*", "publish-docs.sh"])
s.move(templated_files / "setup.cfg")
s.move(templated_files / "LICENSE")
s.move(templated_files / "MANIFEST.in")
s.move(templated_files / "renovate.json")
s.move(templated_files / ".github", excludes=["workflows"])
# Generate _pb2.py files and format them
s.shell.run(["nox", "-s", "generate_protos"], hide_output=False)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
# Add license headers
python.fix_pb2_headers()
LICENSE = """
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
PB2_GRPC_HEADER = r"""(\# Generated by the gRPC Python protocol compiler plugin\. DO NOT EDIT!$)
(.*?$)"""
s.replace(
"**/*_pb2_grpc.py",
PB2_GRPC_HEADER,
fr"{LICENSE}\n\n\g<1>\n\n\g<2>", # add line breaks to avoid stacking replacements
)
|
# Standard imports
import os.path
import datetime
import time
# Django imports
from django.test import TestCase
from django.test import Client
from django.conf import settings
from django.utils import timezone
# App imports
from imageboard.models import Board, Thread, Post
import imageboard.exceptions as i_ex
from captcha.models import Captcha
from moderation.models import Ban, BanReason, ImageFilter, WordFilter
import moderation.exceptions as m_ex
class PostingExceptionsTestCase(TestCase):
def setUp(self):
# Init testing client
self.client = Client()
# Create a board
self.board = Board.objects.create(
hid='t',
name='testing',
default_max_posts_num=100,
)
# Create a thread
self.thread = Thread.objects.create(
hid=0,
board=self.board,
max_posts_num=self.board.default_max_posts_num,
)
# Create a captcha
Captcha.objects.create(
public_id='100500',
solution='swordfish',
image='null',
)
# Update session with captcha info with this request
self.client.get('/captcha/')
# Base post content dict
self.base_post_content = {
'form_type': 'new_post',
'board_id': '1',
'thread_id': '1',
'captcha_0': 'swordfish',
'captcha_1': '100500',
'title': 'Test title',
'author': 'Tester',
'email': '',
'text': 'Test test test test',
'password': 'swordfish',
}
# Prepare upload dirs
(settings.STORAGE_DIR / 'test').mkdir(parents=True, exist_ok=True)
(settings.STORAGE_DIR / 'test' / 'images').mkdir(parents=True, exist_ok=True)
(settings.STORAGE_DIR / 'test' / 'thumbs').mkdir(parents=True, exist_ok=True)
def make_bad_request(self, post_content_mixin, exception, **extra_client_kwargs):
post_data = self.base_post_content.copy()
post_data.update(post_content_mixin)
response = self.client.post('/create/', post_data, **extra_client_kwargs)
# Error template will be used with 403 status code
self.assertEqual(response.status_code, 403)
self.assertTemplateUsed(response, 'imageboard/posting_error_page.html')
# Get exception from context
e = response.context.get('exception')
self.assertIsInstance(e, exception)
def test_form_validation(self):
self.make_bad_request({'form_type': 'bad'}, i_ex.FormValidationError)
self.make_bad_request({'board_id': 'bad'}, i_ex.FormValidationError)
self.make_bad_request({'thread_id': 'bad'}, i_ex.FormValidationError)
def test_board_not_found(self):
self.make_bad_request({'board_id': '100500'}, i_ex.BoardNotFound)
def test_thread_not_found(self):
self.make_bad_request({'thread_id': '100500'}, i_ex.ThreadNotFound)
def test_board_is_locked(self):
locked_board = Board.objects.create(
hid='b',
name='random',
default_max_posts_num=100,
is_locked=True,
)
locked_thread = Thread.objects.create(
hid=1,
board=self.board,
max_posts_num=1,
is_locked=True,
)
self.make_bad_request({'board_id': '2', 'thread_id': '2'}, i_ex.BoardIsLocked)
def test_thread_is_locked(self):
locked_thread = Thread.objects.create(
hid=1,
board=self.board,
max_posts_num=1,
is_locked=True,
)
self.make_bad_request({'thread_id': '2'}, i_ex.ThreadIsLocked)
def test_make_get_request(self):
post_data = self.base_post_content.copy()
response = self.client.get('/create/', post_data)
# Error template will be used with 403 status code
self.assertEqual(response.status_code, 403)
self.assertTemplateUsed(response, 'imageboard/posting_error_page.html')
# Get exception from context
e = response.context.get('exception')
self.assertIsInstance(e, i_ex.BadRequestType)
def test_attached_non_image(self):
filename = os.path.join(os.path.dirname(__file__), 'not_image.txt')
with self.settings(MEDIA_ROOT=str(settings.STORAGE_DIR / 'test')):
with open(filename, 'rb') as fp:
self.make_bad_request({'images': fp}, i_ex.BadFileType)
def test_attached_large_image(self):
filename = os.path.join(os.path.dirname(__file__), 'noise_big.png')
with self.settings(MEDIA_ROOT=str(settings.STORAGE_DIR / 'test')):
with open(filename, 'rb') as fp:
self.make_bad_request({'images': fp}, i_ex.FileIsTooLarge)
def test_attached_too_many_images(self):
filename = os.path.join(os.path.dirname(__file__), 'noise.png')
with self.settings(MEDIA_ROOT=str(settings.STORAGE_DIR / 'test')):
with open(filename, 'rb') as fp1, open(filename, 'rb') as fp2, open(filename, 'rb') as fp3, open(filename, 'rb') as fp4, open(filename, 'rb') as fp5, open(filename, 'rb') as fp6, open(filename, 'rb') as fp7, open(filename, 'rb') as fp8:
self.make_bad_request({'images': [fp1, fp2, fp3, fp4, fp5, fp6, fp7, fp8]}, i_ex.TooManyFiles)
def test_wordfilter(self):
WordFilter.objects.create(expression='nomad')
WordFilter.objects.create(expression='huita')
self.make_bad_request({'text': 'nomad huita'}, m_ex.BadMessage)
def test_advanced_wordfilter(self):
WordFilter.objects.create(expression='^huit(a|ariy)')
WordFilter.objects.create(expression='^nomad')
self.make_bad_request({'text': 'huitariy'}, m_ex.BadMessage)
self.make_bad_request({'text': 'nomadia'}, m_ex.BadMessage)
def test_imagefilter(self):
# Use noise.png
ImageFilter.objects.create(checksum='023943b7771ab11604a64ca306cc0ec4', size='82633')
filename = os.path.join(os.path.dirname(__file__), 'noise.png')
with self.settings(MEDIA_ROOT=str(settings.STORAGE_DIR / 'test')):
with open(filename, 'rb') as fp:
self.make_bad_request({'images': fp}, m_ex.BadImage)
def test_ban_ip(self):
reason = BanReason.objects.create(description='Trolling')
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
banned_ip = '93.184.216.34'
Ban.objects.create(type=Ban.BAN_TYPE_IP, value=banned_ip, reason=reason, active_until=tomorrow)
self.make_bad_request({}, m_ex.Banned, REMOTE_ADDR=banned_ip)
def test_ban_session(self):
reason = BanReason.objects.create(description='Trolling')
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
banned_session = self.client.session.session_key
Ban.objects.create(type=Ban.BAN_TYPE_SESSION, value=banned_session, reason=reason, active_until=tomorrow)
self.make_bad_request({}, m_ex.Banned)
def test_ban_network(self):
reason = BanReason.objects.create(description='Trolling')
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
banned_network = '93.184.216.0/24'
banned_ip = '93.184.216.34'
Ban.objects.create(type=Ban.BAN_TYPE_NET, value=banned_network, reason=reason, active_until=tomorrow)
self.make_bad_request({}, m_ex.ModerationError, REMOTE_ADDR=banned_ip)
def test_rapid_posting(self):
post_data = self.base_post_content.copy()
self.client.post('/create/', post_data)
self.make_bad_request({}, i_ex.NotSoFast)
|
"""
This module contains tools for handling dataset specifications.
"""
import copy
from typing import Union
import platform
version = platform.python_version()
if float(version[:3]) <= 3.6:
raise EnvironmentError('At least Python 3.7 is needed for ordered dict functionality.')
from ruamel.yaml import YAML
class DatasetSpec(object):
"""
This class creates a dataset specification from a YAML specification file, so properties
in the specification are easily accessed. Moreover, it provides defaults and specification checking.
Specification attribute fields:
- l: list of str, the names of the scene-level semantic classes
- l_things: list of str, the names of the scene-level things classes
- l_stuff: list of str, the names of the scene-level stuff classes
- l_parts: list of str, the names of the scene-level classes with parts
- l_noparts: list of str, the names of the scene-level classes without parts
- scene_class2part_classes: dict, mapping for scene-level class name to part-level class names,
the ordering of elements in scene_class2part_classes.keys() and scene_class2part_classes.values()
implicitly defines the sid and pid respectively, which can be retrieved with the functions below
- sid2scene_class: dict, mapping from sid to scene-level semantic class name
- sid2scene_color: dict, mapping from sid to scene-level semantic class color
- sid_pid2scene_class_part_class: dict, mapping from sid_pid to a tuple of
(scene-level class name, part-level class name)
Specification attribute functions:
- scene_class_from_sid(sid)
- sid_from_scene_class(name)
- part_classes_from_sid(sid)
- part_classes_from_scene_class(name)
- scene_color_from_scene_class(name)
- scene_color_from_sid(sid)
- scene_class_part_class_from_sid_pid(sid_pid)
- sid_pid_from_scene_class_part_class(scene_name, part_name)
Examples (from Cityscapes Panoptic Parts):
- for the 'bus' scene-level class and the 'wheel' part-level class it holds:
- 'bus' in l_things → True
- 'bus' in l_parts → True
- sid_from_scene_class('bus') → 28
- scene_color_from_scene_class('bus') → [0, 60, 100]
- part_classes_from_scene_class('bus') → ['UNLABELED', 'window', 'wheel', 'light', 'license plate', 'chassis']
- sid_pid_from_scene_class_part_class('bus', 'wheel') → 2802
Experimental (format/API may change):
- l_allparts: list of str, a list of all parts in str with format f"{scene_class}-{part_class}",
contains at position 0 the special 'UNLABELED' class
Notes:
- A special 'UNLABELED' semantic class is defined for the scene-level and part-level abstractions.
This class must have sid/pid = 0 and is added by befault to the attributes of this class if
it does not exist in yaml specification.
- It holds that:
- the special 'UNLABELED' class ∈ l, l_stuff, l_noparts
- l = l_things ∪ l_stuff
- l = l_parts ∪ l_noparts
- sids are continuous and zero-based
- iids do not need to be continuous
- pids are continuous and zero-based per sid
"""
def __init__(self, spec_path):
"""
Args:
spec_path: a YAML panoptic parts dataset specification
"""
with open(spec_path) as fd:
spec = YAML().load(fd)
self._spec_version = spec['version']
self._dataset_name = spec['name']
# describes the semantic information layer
self._scene_class2part_classes = spec['scene_class2part_classes']
# describes the instance information layer
self._scene_classes_with_instances = spec['scene_classes_with_instances']
self._scene_class2color = spec.get('scene_class2color')
if self._scene_class2color is None:
raise ValueError(
'"scene_class2color" in dataset_spec must be provided for now. '
'In the future random color assignment will be implemented.')
self._countable_pids_groupings = spec.get('countable_pids_groupings')
self._extract_attributes()
def _extract_attributes(self):
self.dataset_name = self._dataset_name
def _check_and_append_unlabeled(seq: Union[dict, list], unlabeled_dct=None):
seq = copy.copy(seq)
if 'UNLABELED' not in seq:
if isinstance(seq, dict):
seq_new = unlabeled_dct
seq_new.update(seq)
elif isinstance(seq, list):
seq_new = ['UNLABELED'] + seq
if list(seq_new)[0] != 'UNLABELED':
raise ValueError(
f'"UNLABELED" class exists in seq but not at position 0. seq: {seq}')
return seq_new
# check and append (if doesn't exist) the special UNLABELED key to
# scene_class2part_classes and scene_class2color attributes
self.scene_class2part_classes = _check_and_append_unlabeled(self._scene_class2part_classes,
{'UNLABELED': []})
self.scene_class2part_classes = dict(
zip(self.scene_class2part_classes.keys(),
map(_check_and_append_unlabeled,
self.scene_class2part_classes.values())))
self.scene_class2color = _check_and_append_unlabeled(self._scene_class2color,
{'UNLABELED': [0, 0, 0]})
# self.sid_pid2scene_class_part_class is a coarse mapping (not all 0-99_99 keys are present)
# from sid_pid to Tuple(str, str), it contains sid_pid with format S, SS, S_PP, SS_PP
# where S >= 0, SS >= 0, S_PP >= 1_01, SS_PP >= 10_01, and PP >= 1
self.sid_pid2scene_class_part_class = dict()
for sid, (scene_class, part_classes) in enumerate(self.scene_class2part_classes.items()):
for pid, part_class in enumerate(part_classes):
sid_pid = sid if pid == 0 else sid * 100 + pid
self.sid_pid2scene_class_part_class[sid_pid] = (scene_class, part_class)
self.scene_class_part_class2sid_pid = {
v: k for k, v in self.sid_pid2scene_class_part_class.items()}
self.l = list(self.scene_class2part_classes)
self.l_things = self._scene_classes_with_instances
self.l_stuff = list(set(self.l) - set(self.l_things))
self.l_parts = list(filter(lambda k: len(self.scene_class2part_classes[k]) >= 2,
self.scene_class2part_classes))
self.l_noparts = list(set(self.l) - set(self.l_parts))
self.l_allparts = ['UNLABELED']
for scene_class, part_classes in self.scene_class2part_classes.items():
if scene_class == 'UNLABELED':
continue
for part_class in part_classes:
if part_class == 'UNLABELED':
continue
self.l_allparts.append(f'{scene_class}-{part_class}')
self.sid2scene_class = dict(enumerate(self.l))
self.sid2scene_color = {sid: self.scene_class2color[name] for sid, name in self.sid2scene_class.items()}
self.sid2part_classes = {sid: part_classes
for sid, part_classes in enumerate(self.scene_class2part_classes.values())}
# self._sid_pid_file2sid_pid is a sparse mapping (not all 0-99_99 keys are present), with
# sid_pid s in the annotation files mapped to the official sid_pid s of the dataset.
# This can be used to remove the part-level instance information layer
# from the uids in the annotation files (this only applies to PASCAL Panoptic Parts for now).
if self._countable_pids_groupings is not None:
self._sid_pid_file2sid_pid = {k: k for k in self.sid_pid2scene_class_part_class}
for scene_class, part_class2pids_grouping in self._countable_pids_groupings.items():
sid = self.sid_from_scene_class(scene_class)
for part_class, pids_file in part_class2pids_grouping.items():
for pid_file in pids_file:
assert pid_file != 0, 'Unhandled case (pid_file = 0), raise an issue to maintainers.'
sid_pid_file = sid if pid_file == 0 else sid * 100 + pid_file
self._sid_pid_file2sid_pid[sid_pid_file] = self.scene_class_part_class2sid_pid[(scene_class, part_class)]
def sid_from_scene_class(self, name):
return self.l.index(name)
def scene_class_from_sid(self, sid):
return self.l[sid]
def scene_color_from_scene_class(self, name):
return self._scene_class2color[name]
def scene_color_from_sid(self, sid):
return self.sid2scene_color[sid]
def part_classes_from_sid(self, sid):
return self.sid2part_classes[sid]
def part_classes_from_scene_class(self, name):
return self.scene_class2part_classes[name]
def scene_class_part_class_from_sid_pid(self, sid_pid):
return self.sid_pid2scene_class_part_class[sid_pid]
def sid_pid_from_scene_class_part_class(self, scene_name, part_name):
return self.scene_class_part_class2sid_pid[(scene_name, part_name)]
if __name__ == '__main__':
spec = DatasetSpec('panoptic_parts/specs/dataset_specs/ppp_datasetspec.yaml')
print(*sorted(filter(lambda t: t[0] != t[1],
spec._sid_pid_file2sid_pid.items())), sep='\n')
# spec = DatasetSpec('panoptic_parts/specs/dataset_specs/cpp_datasetspec.yaml')
breakpoint()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from datetime import datetime
import argparse
from os import path
import sys
import tensorflow as tf
import captcha_model as captcha
from tensorflow.python.client import device_lib
FLAGS = None
def run_train():
"""Train CAPTCHA for a number of steps."""
with tf.Graph().as_default():
images, labels = captcha.inputs(train=True, batch_size=FLAGS.batch_size)
logits = captcha.inference(images, keep_prob=0.5)
loss = captcha.loss(logits, labels)
train_op = captcha.training(loss)
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables())
init_op = tf.group(tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer())
sess = tf.compat.v1.Session()
sess.run(init_op)
initial_step = 0
print(device_lib.list_local_devices())
if path.exists(FLAGS.checkpoint_dir):
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
print('')
print('')
print('=======================================================')
print('=======================================================')
print('= =')
print('= =')
print('= =')
print('= Loading from ' +FLAGS.checkpoint_dir + ' =')
print('= =')
print('= =')
print('= =')
print('=======================================================')
print('=======================================================')
print('')
print('')
print(tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
last_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
initial_step = int(last_checkpoint[last_checkpoint.rfind('-') + 1:])
else:
os.mkdir(FLAGS.checkpoint_dir)
coord = tf.train.Coordinator()
threads = tf.compat.v1.train.start_queue_runners(sess=sess, coord=coord)
try:
step = initial_step
while not coord.should_stop():
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
if step % 10 == 0:
print('>> Step %d run_train: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
if step != initial_step and step % 100 == 0:
print('>> %s Saving in %s' % (datetime.now(), FLAGS.checkpoint))
saver.save(sess, FLAGS.checkpoint, global_step=step)
step += 1
except Exception as e:
print('>> %s Saving in %s' % (datetime.now(), FLAGS.checkpoint))
saver.save(sess, FLAGS.checkpoint, global_step=step)
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
sess.close()
def main(_):
if tf.io.gfile.exists(FLAGS.train_dir):
tf.io.gfile.rmtree(FLAGS.train_dir)
tf.io.gfile.makedirs(FLAGS.train_dir)
run_train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=128,
help='Batch size.'
)
parser.add_argument(
'--train_dir',
type=str,
default='./captcha_train',
help='Directory where to write event logs.'
)
parser.add_argument(
'--checkpoint',
type=str,
default='./captcha_train/captcha',
help='Directory where to write checkpoint.'
)
parser.add_argument(
'--checkpoint_dir',
type=str,
default='./captcha_train',
help='Directory where to restore checkpoint.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
from ..generate_data_structures import *
from ..main import *
from algorithm import *
import argparse
import re
import os
parser = argparse.ArgumentParser('Multiple sections extension')
parser.add_argument('constraints')
parser.add_argument('students')
parser.add_argument('schedule')
args = parser.parse_args()
constraints_txt = args.constraints
students_txt = args.students
schedule_txt = args.schedule
student_preferences = extract_info(students_txt)
constraints = extract_info(constraints_txt)
ADTs = generate(student_preferences, constraints) # this generates all data structures
schedule = main(ADTs) # returns the schedule which we will write
write_info(schedule, schedule_txt)
subprocess.call(['perl', 'cs340_project/sections-ext/is_valid.pl', constraints_txt, students_txt, schedule_txt])
|
import unittest
from html_page import HtmlPage
class TestHtmlPage(unittest.TestCase):
def test_default(self):
html_page = HtmlPage()
rendered = html_page.render()
self.assertEqual(rendered, "<p></p><p><img src=\"\"/></p><p></p>")
def test_with_content(self):
html_page = HtmlPage()
rendered = html_page.render("img_url", "text")
self.assertEqual(rendered, "<p>Your image url is img_url</p><p><img src=\"img_url\"/></p><p>text</p>")
if __name__ == "__main__":
unittest.main()
|
import requests
import time
import json
def SendMsg(host: str, current_qq: int, send_content: str, to_user: int):
url = "%s/v1/LuaApiCaller?qq=%d&funcname=SendMsg&timeout=10" % (host, current_qq)
payload = "{\n \"toUser\":%d,\n \"sendToType\":1,\n \"sendMsgType\":\"TextMsg\",\n \"content\":\"%s\"," \
"\n \"groupid\":0,\n \"atUser\":0,\n \"replayInfo\":null\n}" % (to_user, send_content)
headers = {'Content-Type': 'application/json'}
try:
time.sleep(1)
requests.request("POST", url, headers=headers, data=payload.encode('utf-8'))
return 0
except BaseException as e:
print(e)
return 1
def SendMsg_nowait(host: str, current_qq: int, send_content: str, to_user: int):
url = "%s/v1/LuaApiCaller?qq=%d&funcname=SendMsg&timeout=10" % (host, current_qq)
payload = "{\n \"toUser\":%d,\n \"sendToType\":1,\n \"sendMsgType\":\"TextMsg\",\n \"content\":\"%s\"," \
"\n \"groupid\":0,\n \"atUser\":0,\n \"replayInfo\":null\n}" % (to_user, send_content)
headers = {'Content-Type': 'application/json'}
try:
requests.request("POST", url, headers=headers, data=payload.encode('utf-8'))
return 0
except BaseException as e:
print(e)
return 1
def GetGroupUserList_nowait(host: str, current_qq: int, group_id: int):
url = "%s/v1/LuaApiCaller?qq=%d&funcname=GetGroupUserList&timeout=10" % (host, current_qq)
payload = "{\n \"GroupUin\":%d,\n \"LastUin\":0\n}" % group_id
headers = {
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data=payload.encode('utf-8'))
return response
except BaseException as e:
print(e)
return 1
def GetFileUrl(host: str, current_qq: int, file_id: str):
url = "%s/v1/LuaApiCaller?qq=%d&funcname=OfflineFilleHandleSvr.pb_ftn_CMD_REQ_APPLY_DOWNLOAD-1200&timeout=10" % (
host, current_qq)
payload = "{\r\n \"FileID\": \"" + file_id + "\"\r\n}"
headers = {'Content-Type': 'application/json'}
try:
response = requests.request("POST", url, headers=headers, data=payload.encode('utf-8'))
return response
except BaseException as e:
print(e)
return 1
def send_private_msg_v2(host: str, current_qq: int, to_usr_id: int, group_id: int, send_msg: str):
time.sleep(1)
url = "%s/v1/LuaApiCaller?qq=%d&funcname=SendMsgV2" % (host, current_qq)
payload = {"ToUserUid": to_usr_id, "GroupID": group_id, "SendToType": 3, "SendMsgType": "TextMsg",
"Content": send_msg}
try:
data = json.dumps(payload)
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=data)
return response
except BaseException as e:
print(e)
return 1
|
# Change making
coins = [25,10,5,2,1]
n = 10
def make_change(coins, target):
paths = []
memo = set()
def explore_paths(total, combo={}):
nonlocal paths
# Base cases
if total > target:
return False
if total == target:
paths.append(combo)
# Recursion
for coin in coins:
# New combo to send
new_combo = combo.copy()
new_combo[coin]+=1
# memo code
combo_code = ":".join(
["{}x{}".format(c,q) for c,q in new_combo.items()]
)
# Check if not in memo
if combo_code not in memo:
memo.add(combo_code)
explore_paths(total=total+coin,combo=new_combo)
combo_init = {coin:0 for coin in coins}
explore_paths(0, combo_init)
return paths
if __name__ == '__main__':
combos = make_change(coins, n)
print(len(combos))
|
import utime
import machine
import json
import ubinascii
from app.motor import motor
from app.halleffect import halleffect
class train():
def __init__(self, mqtt):
#variables
self.status = None
self.hops = 1
self.speed = -0.3
#self.direction = 0
self.on_checkpoint = False
self.i2c = machine.I2C(scl = machine.Pin(5) ,sda = machine.Pin(4))
#setup mpu6050
self.i2c.writeto_mem(104,107,b'\x00')
self.mqtt = mqtt
self.m = motor()
self.h = halleffect()
self.battery = machine.ADC(0)
self.battery_scalar = 4.3 #set by voltage divider at ADC input (Rb+Rt)/Rb
def read_battery(self):
#TODO enable some battery measurement circuit?
return self.battery_scalar * self.battery.read() / 1024 * 3.3 #wemos1D has internal divider......
def calibrate(self, message = "all"):
if message in ["all", "hall-effect"]:
self.h.calibrate()
self.mqtt.pub("calibration", "hall-effect low: {}".format(self.h.sensor_low))
self.mqtt.pub("calibration", "hall-effect high: {}".format(self.h.sensor_high))
def set_status(self, status):
self.mqtt.pub("status", status)
self.status = status
def read_mpu(self):
data = self.i2c.readfrom_mem(104,0x3b,14)
return {
"x" : data[0]<<8|data[1],
"y" : data[2]<<8|data[3],
"z" : data[4]<<8|data[5],
"temperature" : data[6]<<8|data[7],
"roll" : data[8]<<8|data[9],
"pitch" : data[10]<<8|data[11],
"yaw" : data[12]<<8|data[13]
}
def update(self):
self.mqtt.pub("status",self.status)
self.mqtt.pub("hops",self.hops)
self.mqtt.pub("speed",self.speed)
self.mqtt.pub("checkpoint",[False,True][self.h.trigger.value()])
self.mqtt.pub("battery",self.read_battery())
self.mqtt.pub("timestamp",utime.ticks_ms())
def set_speed(self, message):
self.speed = float(message)
#update movement speed if moving
if self.status == "moving":
self.move("")
def move(self, message):
if message == "stop":
self.set_status("stopped")
self.m.move(0)
else:
#try to set hops in case a value was given in the message
try:
self.hops = int(message)
except:
pass
self.set_status("moving")
if self.on_checkpoint:
self.hops += 1
self.m.move(self.speed)
def statemachine(self, level = None):
#check for stop
if self.h.sensor_triggered:
self.mqtt.pub("info", "checkpoint")
#states
if self.status == "moving":
self.hops -= 1
if self.hops < 1:
#stop and reverse back to checkpoint
self.set_status("homing")
self.m.move(0)
utime.sleep(1)
self.m.move(-self.speed)
elif self.status == "homing":
#stop on checkpoint
self.m.move(0)
self.hops = 1
self.set_status("stopped")
#clear flag
utime.sleep_ms(100)
self.h.sensor_triggered = False
mqtt = None
def run(mqtt_obj, parameters):
#Make mqtt object global, so it can be called from interrupts
global mqtt
mqtt = mqtt_obj
#Set project name as prefix so we can easily filter topics
#Final topic will be in form:
#UID/prefix/user_topic
mqtt.set_prefix("train")
t = train(mqtt)
t.set_status("stopped")
mqtt.sub("move", t.move)
mqtt.sub("calibrate", t.calibrate)
mqtt.sub("speed", t.set_speed)
next_message = utime.ticks_ms()
#Main loop
while True:
#Call periodicaly to check if we have recived new messages.
loop_time = utime.ticks_ms()
if next_message < loop_time:
mqtt.check_msg()
t.update()
next_message = loop_time + 1000
t.statemachine()
|
from worldbankapp import app
import json, plotly
from flask import render_template, request, Response, jsonify
from scripts.data import return_figures
@app.route('/', methods=['POST', 'GET'])
@app.route('/index', methods=['POST', 'GET'])
def index():
country_codes = [['Lithuania', 'LTU'], ['Estonia', 'EST'], ['Latvia', 'LVA'],
['Euro Area', 'XC'], ['Central Europe and the Baltics', 'B8']]
if (request.method == 'POST') and request.form:
figures = return_figures(request.form)
countries_selected = []
for country in request.form.lists():
countries_selected.append(country[1][0])
else:
figures = return_figures()
countries_selected = []
for country in country_codes:
countries_selected.append(country[1])
ids = ['figure-{}'.format(i) for i, _ in enumerate(figures)]
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('index.html', ids=ids,
figuresJSON=figuresJSON,
all_countries=country_codes,
countries_selected=countries_selected) |
# -*- coding: utf-8 -*-
from flask_assets import Environment
css_cdnjs = ('https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.css',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.css',
'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.11.2/jquery-ui.min.css',
'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.11.2/jquery-ui.structure.min.css',
'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.11.2/jquery-ui.theme.min.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.3.0/css/font-awesome.css')
css_main = ('css/fogspoon.css',)
js_cdnjs = ('https://cdnjs.cloudflare.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.11.2/jquery-ui.js',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.min.js',)
js_main = ('js/main.js',)
def init_app(app):
webassets = Environment(app)
webassets.register('css', *(css_cdnjs + css_main))
webassets.register('js', *(js_cdnjs + js_main))
webassets.manifest = 'cache' if not app.debug else False
webassets.cache = not app.debug
webassets.debug = app.debug
|
import utils
# quicksort
def swap(A,i,j):
tmp=A[i]
A[i]=A[j]
A[j]=tmp
def quicksort(A,p,q):
if p<q:
i=partition(A,p,q)
quicksort(A,p,i-1)
quicksort(A,i+1,q)
def quicksort_iterative(A,p,q):
S=[] # stack
S.push((p,q))
while (len(S)>0):
p,q = S.pop()
if (p<q):
i=partition(A,p,q)
S.push(A,i+1,q)
S.push(A,p,i-1) # this one is the next to be called (first in the recursive implementation)
# NB stack depth may be O(n) if we push the smallest one first
# if we push the largest one first, stack depth is O(log n)
# maintain p[1...i] [i+1...j] [...rest]
# <=x >=x
def partition(A,p,q):
swap(A,p,randint(p,q)) # put a random element at the front
x=A[p]
i=p
for j in xrange(p+1,q+1):
if A[j] <= x:
i=i+1
swap(A,i,j)
swap(A,p,i) # put the pivot in its right place
return i
# randomized select
def randselect(A,p,q,i): # ith smallest in A[p..q]
if (p==q): return A[p]
r=partition(A,p,q)
k=r-p+1 # rank of A[r] in A[p..q]
if (i==k): return A[r]
if (i<k): return randselect(A,p,r-1,i)
if (i>k): return randselect(A,r+1,q,(i-k))
|
import logging
from loader import db
# задаем логи для того что-бы код дебажить
logging.basicConfig(level=logging.INFO)
async def on_startup(dp):
import filters
import middlewares
filters.setup(dp) # Устанавливает фильтры
middlewares.setup(dp) # Устанавливает middleware
await db.create_table() # Создает базу данных
# запуск лонгполлинга
if __name__ == '__main__':
from aiogram import executor
from handlers import dp
executor.start_polling(dp, on_startup=on_startup, skip_updates=True)
# dp=dispatcher(диспетчер с помощью которого происходит обработка сообщений),
# on_startup=фунция которая запускается при старте,
# skip_updates=пропустить старые входящие обновления
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.