index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,700 | e7f1a1dcd42c22345522ad3162a05769dcd78e0f | /home/runner/.cache/pip/pool/21/ba/d4/9081c03433cfa7a8c6f9469405b08168172c6eff9f6aae0bf3ab9ee7fb |
17,701 | f8bca82da94f996f97f5e733fc1e33d82e89a450 | def main():
print(str(sum([i**i for i in range(1, 1001)]))[-10:])
if __name__ == '__main__':
main() |
17,702 | e718b690cee27f5e93b1bc863734ad0f7525c5de | import pytest
from tests.conftest import day_02
@pytest.mark.parametrize('rule,password,expected', [
('1-3 a', 'abcde', True),
('1-3 b', 'cdefg', False),
('2-9 c', 'ccccccccc', True),
])
def test_is_valid_by_count(rule: str, password: str, expected: bool):
assert day_02.is_valid_by_count(rule, password) == expected
@pytest.mark.parametrize('rule,password,expected', [
('1-3 a', 'abcde', True),
('1-3 b', 'cdefg', False),
('2-9 c', 'ccccccccc', False),
('16-17 k', 'nphkpzqswcltkkbkk', False),
('8-11 l', 'qllllqllklhlvtl', True),
])
def test_is_valid_by_existence(rule: str, password: str, expected: bool):
assert day_02.is_valid_by_existence(rule, password) == expected
|
17,703 | 7eb982d024de090ed48deed2faab7cdd1211d13e | print("You")
print("are")
print("_Super")
print("_143awesome")
print("sumONe")
|
17,704 | 91f6a4d995846301bd66e733186a853f56825787 | # -*- coding:UTF-8 -*-
from . import api
from flask_httpauth import HTTPBasicAuth
from flask import jsonify
from flask import g
from flask_login import login_required
from app.models import User
from app import db
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
if len(email_or_token) == 0:
return False
if len(password) == 0:
g.current_user = User.check_api_token(email_or_token)
return g.current_user is not None
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
return user.check_user_password(password)
@auth.error_handler
def error_handler():
return jsonify({'static':401})
@api.route('/')
@auth.login_required
def test_devices():
return 'devices'
@api.route('/token')
@auth.login_required
def token():
return jsonify({'token': g.current_user.api})
@api.route('/devices')
@auth.login_required
def devices():
ds = g.current_user.devices
ds_list = []
for d in ds:
ds_list.append(d.to_json())
return jsonify({'status': 200, 'devices' : ds_list})
@api.route('/device/<int:id>')
def device(id):
pass
@api.route('/device/<int:id>/sensors')
def sensors(id):
pass
@api.route('/device/<int:device_id>/sensor/<int:sensor_id>')
def sensor(device_id,sensor_id):
pass
@api.route('/device/<int:device_id>/sensor/<int:sensor_id>/datas')
def datas(device_id,sensor_id):
pass
from app.models import Data,Device,Sensor
from flask import request
@api.route('/device/<int:device_id>/sensor/<int:sensor_id>/data',methods=['GET','POST'])
def data(device_id,sensor_id):
dev = Device.query.filter_by(id=device.id).first()
if dev is None:
return jsonify({'status': 404,'info':'device not found'})
sen = dev.sensors.filter_by(id = sensor_id).first()
if sen is None:
return jsonify({'status': 404, 'info': 'sensor not found'})
if request.method =='POST':
upload_data = request.json
if 'data' in upload_data.keys():
data = Data()
data.data= upload_data['data']
data.sensor_id = sen.id
db.session.add(data)
db.session.commit()
#baojingchuli
return jsonify({'status':200})
else:
return jsonify({'status':404,'info':'data not found'})
else:
return jsonify({'status': 200})
|
17,705 | 9a1e084598cf7b774a452f2a30de49aaebdaf3f5 | print("Robot object \n________________________________________________________________________________\n \n name: <{}> |color: <{}> |language: <{}>| is mobile?: <{}> \n________________________________________________________________________________")
|
17,706 | 1ebfd36d84262e0037aea2d83fcf13402a55b79a | from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from imghost.models import Picture
from imghost.models import Document
import time
# Create your views here.
def index(request):
latest_picture = Picture.objects.latest('id')
latest_document = Document.objects.latest('id')
return render_to_response('index.html', {'latest_picture':latest_picture, 'latest_document':latest_document}, context_instance=RequestContext(request))
def picture_detail(request, picture_id):
picture = get_object_or_404(Picture, id=picture_id)
return render_to_response('image_detail.html', {'picture':picture})
def document_detail(request, document_id):
document = get_object_or_404(Document, id=document_id)
return render_to_response('document_detail.html', {'document':document})
def upload(request):
if 'file' in request.FILES:
extension = request.FILES['file'].name.split(".")[-1]
filename = request.FILES['file'].name.split(".")[0] + "_" + str(int(time.time())) + "." + request.FILES['file'].name.split(".")[-1]
if extension in ['bmp', 'jpg', 'jpeg', 'png']:
picture = Picture()
picture.image.save(filename, request.FILES['file'])
return HttpResponseRedirect(reverse('imghost.views.picture_detail', args=[picture.id]))
else:
document = Document()
document.document.save(filename,request.FILES['file'])
return HttpResponseRedirect(reverse('imghost.views.document_detail', args=[document.id]))
return HttpResponseRedirect(reverse('imghost.views.index', args=[]))
def gallery(request):
latest_picture = Picture.objects.latest('id')
album = Picture.objects.all()
return render_to_response('gallery.html', {'album':album, 'latest_picture':latest_picture})
def gallery_doc(request):
latest_document = Document.objects.latest('id')
album = Document.objects.all()
return render_to_response('gallery_doc.html',{'album':album, 'latest_document':latest_document}) |
17,707 | 1d71d39afc2bc0c7f674ecc9ed37c3aa558b55b4 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 10:28:54 2017
@author: kb202
"""
import tensorflow as tf
import numpy as np
import time
import sys
#sys.path.append('/home/kb202/code/python/tensorflow/cifar10/')
sys.path.append('cifar10')
import cifar10, cifar10_input
max_steps = 3000
batch_size = 128
data_dir = 'cifar10/cifar10_data/cifar-10-batches-bin'
def weight(shape, stddev, w1):
var = tf.Variable(tf.truncated_normal(shape,stddev=stddev))
if w1 is not None:
weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weigth_loss')
tf.add_to_collection('losses', weight_loss)
return var
def bias(shape):
return tf.Variable(tf.constant(0.1, shape=shape))
def conv_2d(x, w):
return tf.nn.conv2d(x, w, strides=[1,1,1,1],padding='SAME')
cifar10.maybe_download_and_extract
images_train, labels_train = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)
image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3])
label_holder = tf.placeholder(tf.int32, [batch_size])
w_conv1 = weight([5,5,3,64], 5e-2, 0)
b_conv1 = bias([64])
h_conv1 = tf.nn.relu(conv_2d(image_holder, w_conv1)+b_conv1)
pool1 = tf.nn.max_pool(h_conv1, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')
norm1 = tf.nn.lrn(pool1, 4, bias=1,alpha=0.001/9,beta=0.75)
w_conv2 = weight([5,5,64,64], 5e-2, 0)
b_conv2 = bias([64])
h_conv2 = tf.nn.relu(conv_2d(norm1, w_conv2)+b_conv2)
pool2 = tf.nn.max_pool(h_conv2, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')
norm2 = tf.nn.lrn(pool2, 4, bias=1, alpha=0.001/9, beta=0.75)
reshape = tf.reshape(norm2, [batch_size, -1])
dim = reshape.get_shape()[1].value
w_fc1 = weight([dim, 384], 0.04, 0.004)
b_fc1 = bias([384])
h_fc1 = tf.nn.relu(tf.matmul(reshape, w_fc1)+b_fc1)
w_fc2 = weight([384, 192], 0.04, 0.004)
b_fc2 = bias([192])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, w_fc2)+b_fc2)
w_fc3 = weight([192,10], 1/192.0, 0)
b_fc3 = bias([10])
h_fc3 = tf.add(tf.matmul(h_fc2, w_fc3),b_fc3)
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='reoss_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
loss = loss(h_fc3, label_holder)
train = tf.train.AdamOptimizer(1e-4).minimize(loss)
top_k_op = tf.nn.in_top_k(h_fc3, label_holder, 1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
for step in range(max_steps):
strat_time = time.time()
image_batch, label_batch = sess.run([images_train, labels_train])
_, loss_value = sess.run([train, loss], feed_dict={image_holder:image_batch, label_holder:label_batch})
duration = time.time()-strat_time
if step%10 == 0:
examples_per_sec = batch_size/duration
sec_per_batch = float(duration)
format_str = ('step %d, loss=%.2f (%.1f examples/sec; %.3f sec/batch)')
print format_str % (step, loss_value, examples_per_sec, sec_per_batch)
num_examples = 10000
import math
num_iter = int(math.ceil(num_examples/batch_size))
true_count = 0
total_sample_count = num_iter * batch_size
step = 0
while step < num_iter:
image_batch, label_batch = sess.run([images_test, labels_test])
predictions = sess.run([top_k_op], feed_dict={image_holder:image_batch, label_holder:label_batch})
true_count ==np.sum(predictions)
step += 1
precision = true_count/total_sample_count
print 'precision @ 1= %.3f'%precision
|
17,708 | ad357ade23f6a7f81183160e7412d1e6ea90453d | a = 1
b = 2
c = true
d = false
print (a>b and c or d)
|
17,709 | 9f5c665997253a2245c76d4efc6d807e22481250 | #!/usr/bin/env python3
import argparse
import logging
import matplotlib.pyplot as plt
import numpy as np
import pickle
def plot(ax, path, label):
max_steps = 5e6
with open(path, 'rb') as f:
data = pickle.load(f)
timestep = np.array(data['timestep'])
mean_episode_reward = np.array(data['mean_episode_reward'])
best_mean_episode_reward = np.array(data['best_mean_episode_reward'])
indeces = np.where(timestep < max_steps)
timestep = timestep[indeces]
mean_episode_reward = mean_episode_reward[indeces]
best_mean_episode_reward = best_mean_episode_reward[indeces]
ax.plot(timestep, mean_episode_reward, label=label+'_mean')
ax.plot(timestep, best_mean_episode_reward, label=label+'_best')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--dqn', type=str, default='')
parser.add_argument('--ddqn', type=str, default='')
args = parser.parse_args()
logging.basicConfig(
format='%(levelname)s: %(filename)s, %(lineno)d: %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO,
)
fig, ax = plt.subplots()
if args.dqn:
plot(ax, args.dqn, 'DQN')
if args.ddqn:
plot(ax, args.ddqn, 'DDQN')
ax.set_xlabel('Timestep')
ax.set_ylabel('Reward')
if args.dqn:
if args.ddqn:
ax.set_title('DQN vs Double DQN')
else:
ax.set_title('DQN')
else:
ax.set_title('Double DQN')
ax.ticklabel_format(style='sci', scilimits=(-3, 4), axis='x')
ax.legend()
fig.savefig('data/dqn.pdf')
if __name__ == '__main__':
main()
|
17,710 | 795ee8276a2deb8a164397c78c27e0236dfe7d7d | from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from django.template.loader import render_to_string
from .models import Job
from .forms import JobForm
def job_list(request):
jobs = Job.objects.all()
return render(request, "jobs/job_list.html", {"jobs": jobs})
def save_job_form(request, form, template_name):
data = dict()
if request.method == "POST":
if form.is_valid():
form.save()
data["form_is_valid"] = True
jobs = Job.objects.all()
data["html_job_list"] = render_to_string(
"jobs/includes/partial_job_list.html", {"jobs": jobs}
)
else:
data["form_is_valid"] = False
context = {"form": form}
data["html_form"] = render_to_string(template_name, context, request=request)
return JsonResponse(data)
def job_create(request):
if request.method == "POST":
form = JobForm(request.POST)
else:
form = JobForm()
return save_job_form(request, form, "jobs/includes/partial_job_create.html")
def job_update(request, pk):
job = get_object_or_404(Job, pk=pk)
if request.method == "POST":
form = JobForm(request.POST, instance=job)
else:
form = JobForm(instance=job)
return save_job_form(request, form, "jobs/includes/partial_job_update.html")
def job_delete(request, pk):
job = get_object_or_404(Job, pk=pk)
data = dict()
if request.method == "POST":
job.delete()
data["form_is_valid"] = True
jobs = Job.objects.all()
data["html_job_list"] = render_to_string(
"jobs/includes/partial_job_list.html", {"jobs": jobs}
)
else:
context = {"job": job}
data["html_form"] = render_to_string(
"jobs/includes/partial_job_delete.html", context, request=request
)
return JsonResponse(data)
|
17,711 | ac7633fe9304268adb9f84a1d866d618e807ff49 | #First = 5
#Second = 3
#print First + Second
#Third = First + Second
#Third
#MyTeacher = "Mrs. Goodyear"
#YourTeacher = MyTeacher
#print MyTeacher
#print YourTeacher
first = '5'
second = '5'
print first + second |
17,712 | 0095efe9effde7caf4b4676df27779accfab44aa | import numpy as np
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
from PIL import Image
import glob
X = np.array([np.array(Image.open(im)).flatten() for im in glob.glob('Images/*.pgm')], 'f')
n_samples, n_features = X.shape
# PCA computation
X -= np.mean(X, axis = 0)
X -= X.mean(axis=1).reshape(n_samples,-1)
cov = np.dot(X.T, X) / X.shape[0] # get the data covariance matrix
U,S,V = np.linalg.svd(cov)
print(U.shape)
Xrot_reduced = np.dot(X, U[:,:3]) # Xrot_reduced becomes [N x 100]
fig = pylab.figure()
ax = Axes3D(fig)
sequence_x_vals = Xrot_reduced[:,0]
sequence_y_vals = Xrot_reduced[:,1]
sequence_z_vals = Xrot_reduced[:,2]
ax.scatter(sequence_x_vals, sequence_y_vals, sequence_z_vals)
plt.title('pca projection')
#np.savetxt('pca_eigen_vector.txt', U[:,:3].T, delimiter = '\n\n')
#plt.show()
#w,h = 64, 60
#data = U[:, 2].reshape((60,64))
#rescaled = (255.0 / data.max() * (data - data.min())).astype(np.uint8)
#img = Image.fromarray(rescaled)
#img.save('fig2.png')
width = 1
fig1 = plt.figure()
eigen1 = np.dot(X, U[:,0])
x = np.arange(eigen1.shape[0])
ax = fig1.add_subplot(111)
rects = ax.bar(x, eigen1.T, width)
plt.title('pca eigen vector 1')
fig2 = plt.figure()
eigen2 = np.dot(X, U[:,1])
x = np.arange(eigen2.shape[0])
ax = fig2.add_subplot(111)
rects = ax.bar(x, eigen2.T, width)
plt.title('pca eigen vector 2')
fig3 = plt.figure()
eigen3 = np.dot(X, U[:,2])
x = np.arange(eigen3.shape[0])
ax = fig3.add_subplot(111)
rects = ax.bar(x, eigen3.T, width)
plt.title('pca eigen vector 3')
plt.show()
|
17,713 | 5a39a298cffe5b29fe58fdc3ae8081edd412a51d | states = {
'Alabama' : {'Capital': 'Montgomery', 'Flower': 'Camellia', 'Population': '4903185'},
'Alaska' : {'Capital': 'Juneau', 'Flower': 'Forget Me Not', 'Population': '731545'},
'Arizona' : {'Capital': 'Phoenix', 'Flower': 'Saguaro cactus blossom', 'Population': '7278717'},
'Arkansas' : {'Capital': 'Little Rock', 'Flower': 'Apple blossom', 'Population': '3017804'},
'California' : {'Capital': 'Sacramento', 'Flower': 'Golden poppy', 'Population': '39512223'},
'Colorado' : {'Capital': 'Denver', 'Flower': 'Rocky Mountain columbine', 'Population': '5758736'},
'Connecticut' : {'Capital': 'Hartford<br>', 'Flower': 'Mountain laurel', 'Population': '3565287'},
'Delaware' : {'Capital': 'Dover', 'Flower': 'Peach blossom', 'Population': '973764'},
'Florida' : {'Capital': 'Honolulu', 'Flower': 'Orange blossom', 'Population': '21477737'},
'Georgia' : {'Capital': 'Tallahassee', 'Flower': 'Cherokee rose', 'Population': '10617423'},
'Hawaii' : {'Capital': 'Atlanta', 'Flower': 'Hibiscus', 'Population': '1415872'},
'Idaho' : {'Capital': 'Boise', 'Flower': 'Syringa', 'Population': '1787065'},
'Illinois' : {'Capital': 'Springfield', 'Flower': 'Violet', 'Population': '12671821'},
'Indiana' : {'Capital': 'Indianapolis', 'Flower': 'Peony', 'Population': '6732219'},
'Iowa' : {'Capital': 'Des Moines', 'Flower': 'Wild rose', 'Population': '3155070'},
'Kansas' : {'Capital': 'Topeka', 'Flower': 'Sunflower', 'Population': '2913314',},
'Kentucky' : {'Capital': 'Frankfort', 'Flower': 'Goldenrod', 'Population': '4467673'},
'Louisiana' : {'Capital': 'Baton Rouge', 'Flower': 'Magnolia', 'Population': '4648794'},
'Maine' : {'Capital': 'Augusta', 'Flower': 'White pine cone and tassel', 'Population': '1344212'},
'Maryland' : {'Capital': 'Annapolis', 'Flower': 'Black-eyed susan', 'Population': '6045680'},
'Massachusetts' : {'Capital': 'Boston', 'Flower': 'Mayflower', 'Population': '6892503'},
'Michigan' : {'Capital': 'Lansing', 'Flower': 'Apple blossom', 'Population': '9986857'},
'Minnesota' : {'Capital': 'St. Paul', 'Flower': 'Lady slipper', 'Population': '5639632'},
'Mississippi' : {'Capital': 'Jackson', 'Flower': 'magnolia', 'Population': '2976149'},
'Missouri' : {'Capital': 'Jefferson City', 'Flower': 'Hawthorn', 'Population': '6137428'},
'Montana' : {'Capital': 'Helena', 'Flower': 'Bitterroot', 'Population': '1068778'},
'Nebraska' : {'Capital': 'Lincoln', 'Flower': 'Goldenrod', 'Population': '1934408'},
'Nevada' : {'Capital': 'Carson City', 'Flower': 'Sagebrush', 'Population': '3080156'},
'New Hampshire' : {'Capital': 'Concord', 'Flower': 'Purple lilac', 'Population': '1359711'},
'New Jersey' : {'Capital': 'Trenton', 'Flower': 'Purple violet', 'Population': '8882190'},
'New Mexico' : {'Capital': 'Santa Fe', 'Flower': 'Yucca', 'Population': '2096829'},
'New York' : {'Capital': 'Raleigh', 'Flower': 'Rose', 'Population': '19453561'},
'North Carolina' : {'Capital': 'Bismarck', 'Flower': 'Dogwood', 'Population': '10488084'},
'North Dakota' : {'Capital': 'Albany', 'Flower': 'Wild prairie rose', 'Population': '762062'},
'Ohio' : {'Capital': 'Columbus', 'Flower': 'Scarlet carnation', 'Population': '11689100'},
'Oklahoma' : {'Capital': 'Oklahoma City', 'Flower': 'Mistletoe', 'Population': '3956971'},
'Oregon' : {'Capital': 'Salem', 'Flower': 'Oregon grape', 'Population': '4217737'},
'Pennsylvania' : {'Capital': 'Harrisburg', 'Flower': 'Mountain laurel', 'Population': '12801989'},
'Rhode Island' : {'Capital': 'Providence', 'Flower': 'Violet', 'Population': '1059361'},
'South Carolina' : {'Capital': 'Columbia', 'Flower': 'Carolina yellow jessamine', 'Population': '5148714'},
'South Dakota' : {'Capital': 'Pierre', 'Flower': 'American pasqueflower', 'Population': '884659'},
'Tennessee' : {'Capital': 'Nashville', 'Flower': 'Iris', 'Population': '6829174'},
'Texas' : {'Capital': 'Austin', 'Flower': 'Bluebonnet', 'Population': '28995881'},
'Utah' : {'Capital': 'Salt Lake City', 'Flower': 'Sego lily', 'Population': '3205958'},
'Vermont' : {'Capital': 'Montpelier', 'Flower': 'Red clover', 'Population': '623989'},
'Virginia' : {'Capital': 'Richmond', 'Flower': 'American dogwood', 'Population': '8535519'},
'Washington' : {'Capital': 'Olympia', 'Flower': 'Coast rhododendron', 'Population': '7614893'},
'West Virginia' : {'Capital': 'Charleston', 'Flower': 'Rhododendron', 'Population': '1792147'},
'Wisconsin' : {'Capital': 'Madison', 'Flower': 'Wood violet', 'Population': '5822434'},
'Wyoming' : {'Capital': 'Cheyenne', 'Flower': 'Indian paintbrush', 'Population': '578759'},
} |
17,714 | 4e4c2b3bd0e006292b04cdb0216d79a568cf3295 | import random, csv, copy
from solution import Solution
class Hill_climber(object):
def __init__(self, houses, batteries, number_of_times, number_of_runs):
self.houses = copy.deepcopy(houses)
self.batteries = copy.deepcopy(batteries)
self.number_of_times = number_of_times
self.n = number_of_runs
self.results = []
self.multi_results = []
self.hill_climbers()
def hill_climbers(self):
''' Run '''
swaps_made = 0
loopcounter = 0
for count in range(self.n):
temp_batteries = copy.deepcopy(self.batteries)
temp_houses = copy.deepcopy(self.houses)
swap_counter = 0
while True:
swap_list = self.possible_swaps(temp_houses, temp_batteries)
random_swap = random.choice(swap_list)
random_house_in_battery = random_swap[0]
random_house_in_battery2 = random_swap[1]
random_battery = random_house_in_battery.connected
random_battery2 = random_house_in_battery2.connected
solution = Solution(temp_houses, temp_batteries)
# calc old distance
old_distance = solution.distance_calc(random_house_in_battery, random_battery)
old_distance2 = solution.distance_calc(random_house_in_battery2, random_battery2)
# calc new distance
new_distance = solution.distance_calc(random_house_in_battery, random_battery2)
new_distance2 = solution.distance_calc(random_house_in_battery2, random_battery)
if old_distance + old_distance2 > new_distance + new_distance2:
print("Swap reduces cable costs!")
# sort battery list
temp_batteries = sorted(temp_batteries, key=lambda battery: battery.id)
# remove house from old battery"
temp_batteries[random_battery.id].remove(random_house_in_battery)
temp_batteries[random_battery2.id].remove(random_house_in_battery2)
# add house to new battery
temp_batteries[random_battery.id].add(random_house_in_battery2)
temp_batteries[random_battery2.id].add(random_house_in_battery)
# reconnect to propper objects
random_house_in_battery.connect(temp_batteries[random_battery2.id])
random_house_in_battery2.connect(temp_batteries[random_battery.id])
# reassign values
temp_houses[random_house_in_battery.id] = random_house_in_battery
temp_houses[random_house_in_battery2.id] = random_house_in_battery2
# Save solution & append costs to self.results
solution = Solution(temp_houses, temp_batteries)
self.results.append(solution.calculate_costs(loopcounter))
swaps_made += 1
else:
#print("Swap does not reduce cable costs!")
pass
swap_counter += 1
print("Run: ", loopcounter, ", Iteration: ", swap_counter)
if swap_counter == self.number_of_times:
print(swap_counter)
break
loopcounter += 1
# EINDE HILL CLIMBER
eind_oplossing = Solution(temp_houses, temp_batteries)
self.multi_results.append(eind_oplossing.calculate_costs(count))
def possible_swaps(self, houses, batteries):
''' Check if swap is possible '''
# list of connected houses for each battery
b0, b1, b2, b3, b4 = batteries[0].connected, batteries[1].connected, \
batteries[2].connected, batteries[3].connected, \
batteries[4].connected
battery_lists = [b0, b1, b2, b3, b4]
possible_swaps = []
for index, battery in enumerate(batteries):
for house in battery.connected:
counter = 0
for list in battery_lists[(index + 1):-1]:
counter += 1
for swap_house in list:
# append swap if enough capacity
if (battery.check_amp() + house.amp) >= swap_house.amp and (batteries[index + counter].check_amp() + swap_house.amp) >= house.amp:
possible_swaps.append([house, swap_house])
return possible_swaps
|
17,715 | cad963198843241b0e9d506ef01ab9efed43646b | import pygame
from pygame import *
from random import *
class Crepes:
def __init__(self, difficulte): #Crée une crêpe en fonction de la difficulté renvoie une liste d'ingrédients
ingredientsDisponibles = ["Oeuf","Fromage","Cornichon", "Champignons", "Tomate", "Nutella", "Sucre", "Miel", "Confiture", "Citron", "Chantilly"]
ingredientsSales = ingredientsDisponibles[:5]
ingredientsSucres = ingredientsDisponibles[5:]
self.ingredients = []
self.difficulte = difficulte
a = choice(ingredientsDisponibles)
if a in ingredientsSales: #RESPECTE LES CREPES SALES
ingredientsDisponibles=ingredientsSales #ET LES CREPES SUCRES
else: #PAS DE MELANGE ptdr
ingredientsDisponibles=ingredientsSucres
b = a
self.ingredients.append(a)
while b in self.ingredients:
b = choice(ingredientsDisponibles)
self.ingredients.append(b)
if difficulte >= 1: #Si la difficulte c'est normal ou difficile : 3 ingredients
c = b
while c in self.ingredients:
c = choice(ingredientsDisponibles)
self.ingredients.append(c)
if difficulte>=2: #Si c'est difficile : 4 ingredients
d = c
while d in self.ingredients:
d = choice(ingredientsDisponibles)
self.ingredients.append(d)
def afficherCrepe(self):
if self.difficulte==1:
difstr="normale"
elif self.difficulte>=2:
difstr="difficile"
else:
difstr="facile"
print("Recette "+difstr + " :")
print("Ingrédients : ")
for i in range(0,len(self.ingredients)):
print("\t- "+self.ingredients[i])
def rectangle(win, couleur, hitbox):
pygame.draw.rect(win, couleur, hitbox, 2)
def AfficheRecette(win, x, y, crepe):
#COULEURS
noir = (0,0,0)
blanc = (255,255,255)
rouge = (255,0,0)
vert = (0,255,0)
jaune = (255,215,0)
#GESTION DES DIFFICULTES
if crepe.difficulte==1:
difstr="Normal"
couldif = jaune
elif crepe.difficulte>=2:
difstr="Difficile"
couldif = rouge
else:
difstr="Facile"
couldif = vert
#DIMENSIONS + TEXTES
crect1 = (x,y,150,50)
crect2 = (x, y+50, 150,50*len(crepe.ingredients))
rectangle(win, blanc, crect1)
rectangle(win, blanc, crect2)
police = pygame.font.SysFont("freesans",20)
texte = police.render(difstr, True, couldif)
positDiff = texte.get_rect()
positDiff.centerx = x+crect1[2]/2
positDiff.centery = y+crect1[3]/2
win.blit(texte, positDiff)
nb = 25
for ing in crepe.ingredients:
"""
if update[0] and update[1]==ing :
couling = vert
else:
couling = blanc
"""
texte = police.render(ing, True, blanc)
position = texte.get_rect()
position.centerx = crect2[0]+crect2[2]/2
position.centery = crect2[1]+nb
win.blit(texte, position)
nb += 50
pygame.display.flip()
|
17,716 | 10926877cab76073354b355a79d62c0bfa4352b8 | # BEGIN: Copyright
# Copyright (C) 2020 - 2021 Rector and Visitors of the University of Virginia
# All rights reserved
# END: Copyright
# BEGIN: License
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# END: License
import os
import json
import sys
from jsonschema import validate
class Configuration:
def __init__(self, configurationDirectory):
self.configurationDirectory = os.path.abspath(configurationDirectory)
os.environ['REDDIEGO_ConfigurationDirectory'] = self.configurationDirectory
def loadJsonFile(self, fileName, schema = None):
try:
jsonFile = open(os.path.join(self.configurationDirectory, fileName),"r")
except:
sys.exit("ERROR: File '" + os.path.join(self.configurationDirectory, fileName) + "' does not exist.")
dictionary = json.load(jsonFile)
if schema != None:
validate(dictionary, schema)
jsonFile.close()
return dictionary
|
17,717 | 4e33dc11aa35d60548859c777b6460145bda9486 | import numpy as np
import matplotlib.pyplot as plt
import pandas
import gc
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import OrdinalEncoder
from sklearn import preprocessing
from sklearn import neural_network
def main():
df = pandas.read_csv("data/tcd-ml-1920-group-income-train.csv", index_col='Instance')
trainingDataLength = len(df.index)
y_train = df['Total Yearly Income [EUR]']
# y_train = y_train[:trainingDataLength]
# print(trainingDataLength)
tdf = pandas.read_csv("data/tcd-ml-1920-group-income-test.csv", index_col='Instance')
fulldf = pandas.concat([df, tdf], sort = True)
# fulldf.to_csv("CombinedParams.csv")
RentalIncome = fulldf['Yearly Income in addition to Salary (e.g. Rental Income)']
RentalIncome = RentalIncome.str.split(" ", n = 1, expand = True)[0].astype(float)
RentalIncome.to_csv("RentalIncome.csv", header = True)
y_train = y_train - RentalIncome[:trainingDataLength ]
y_train.to_csv("TrainingResults.csv", header = True)
fulldf.drop('Yearly Income in addition to Salary (e.g. Rental Income)', axis = 1, inplace = True)
gc.collect()
#clear some memory used for df and tdf as they're no longer needed
print("Read data")
fulldf['Year of Record'] = pandas.to_numeric(fulldf['Year of Record'], errors='coerce').fillna(fulldf['Year of Record'].mean())
# fulldf['Housing Situation'] = pandas.to_numeric(fulldf['Housing Situation'], errors='coerce').fillna(0)
fulldf['Crime Level in the City of Employement'] = pandas.to_numeric(fulldf['Crime Level in the City of Employement'], errors='coerce').fillna(fulldf['Crime Level in the City of Employement'].mean())
fulldf['Age'] = pandas.to_numeric(fulldf['Age'], errors='coerce').fillna(fulldf['Age'].mean())
fulldf['Work Experience in Current Job [years]'] = pandas.to_numeric(fulldf['Work Experience in Current Job [years]'], errors='coerce')
fulldf['Work Experience in Current Job [years]'].fillna(fulldf['Work Experience in Current Job [years]'].mean(), inplace = True)
fulldf['Size of City'] = pandas.to_numeric(fulldf['Size of City'], errors='coerce').fillna(fulldf['Size of City'].mean())
fulldf['Body Height [cm]'] = pandas.to_numeric(fulldf['Body Height [cm]'], errors='coerce').fillna(fulldf['Body Height [cm]'].mean())
print("Coerced Numeric data")
# TODO Sanitized Work Experience
# params = fulldf[['Year of Record', 'Crime Level in the City of Employement', 'Age', 'Body Height [cm]', 'Size of City', 'Wears Glasses']].copy()
gender_df = pandas.get_dummies(fulldf['Gender'])
fulldf['Male'] = gender_df['male'].copy()
fulldf['Female'] = gender_df['female'].copy()
fulldf.drop('Gender', axis = 1, inplace = True)
fulldf['Profession'] = fulldf['Profession'].map(fulldf[:trainingDataLength].groupby('Profession')['Total Yearly Income [EUR]'].mean())
fulldf["Country"] = fulldf["Country"].map(fulldf[:trainingDataLength].groupby("Country")["Total Yearly Income [EUR]"].mean())
fulldf["Hair Color"] = fulldf["Hair Color"].map(fulldf[:trainingDataLength].groupby("Hair Color")["Total Yearly Income [EUR]"].mean())
fulldf['Profession'] = fulldf['Profession'].fillna(fulldf['Profession'].mean())
fulldf['Country'] = fulldf['Country'].fillna(fulldf['Country'].mean())
fulldf['Hair Color'] = fulldf['Hair Color'].fillna(fulldf['Hair Color'].mean())
# fulldf['Profession'] = replaceWithMeans(fulldf[['Profession', 'Total Yearly Income [EUR]']].copy(), trainingDataLength)
# fulldf['Country'] = replaceWithMeans(fulldf[['Country', 'Total Yearly Income [EUR]']].copy(), trainingDataLength)
# fulldf['Hair Color'] = replaceWithMeans(fulldf[['Hair Color', 'Total Yearly Income [EUR]']].copy(), trainingDataLength)
fulldf['Satisfation with employer'].replace({'Unhappy': -1, 'Average': 0, 'Happy': 2, 'Somewhat Happy': 1}, inplace = True)
fulldf['Satisfation with employer'] = pandas.to_numeric(fulldf['Satisfation with employer'], errors='coerce').fillna(0)
fulldf.drop('Housing Situation', axis = 1, inplace = True)
#iterate through all professions and replace them with the average for that profession
#get all rows with profession X
#get mean income for that professions
#replace that profession with the mean in all instances in original DataFrame
#reset, next profession
# jobs_df = pandas.get_dummies(fulldf['Profession'])
# fulldf = pandas.merge(fulldf, jobs_df, on = 'Instance', copy = False)
# fulldf.drop('Profession', axis = 1, inplace = True)
#
# Country_df = pandas.get_dummies(fulldf['Country'])
# fulldf = pandas.merge(fulldf, Country_df, on = 'Instance', copy = False)
# fulldf.drop('Country', axis = 1, inplace = True)
#
# HairColor_df = pandas.get_dummies(fulldf['Hair Color'])
# fulldf = pandas.merge(fulldf, HairColor_df, on = 'Instance', copy = False)
# fulldf.drop('Hair Color', axis = 1, inplace = True)
#
Degree_df = pandas.get_dummies(fulldf['University Degree'])
fulldf['Bachelor'] = Degree_df['Bachelor'].copy()
fulldf['Master'] = Degree_df['Master'].copy()
fulldf['PhD'] = Degree_df['PhD'].copy()
fulldf.drop('University Degree', axis = 1, inplace = True)
print("Replaced Categorical Values")
gc.collect()
print(fulldf.head())
print("Merged into params")
# Normalizing is not providing enough of an improvement to justify the added run-time
# min_max_scaler = preprocessing.MinMaxScaler()
# scaled_values = min_max_scaler.fit_transform(params)
# params.loc[:,:] = scaled_values
# print("normalized")
# stan_scaler = preprocessing.StandardScaler()
# scaled_values = stan_scaler.fit_transform(params)
# params.loc[:,:] = scaled_values
# print("standardized")
fulldf.drop('Total Yearly Income [EUR]', axis = 1, inplace = True)
x_train = fulldf[:trainingDataLength]
x_train.to_csv("TrainingSet.csv")
x_test = fulldf[trainingDataLength:]
x_test.to_csv("TestSet.csv")
gc.collect()
# x_data.to_csv("Sanitized.csv")
# # print(x_train)
#
# # Create linear regression object
# # regr = linear_model.MLPRegression()
# regr = neural_network.MLPRegressor()
#
# # Train the model using the training sets
# regr.fit(x_train, y_train)
# print("Trained models")
# # Make predictions using the testing set
# x_test['Income'] = regr.predict(x_test)
# print("Made predictions")
# # y_test = tdf['Income']
# # results = pd.DataFrame()
# results = x_test['Income'].copy()
# # results.columns = ['Income']
#
# # print(results)
# results.to_csv("groupIncomePredSubmission.csv", header = "Instance, Income")
def replaceWithMeans(cols, trainingDataLength):
cols = cols[0, trainingDataLength]
vals = cols.iloc[:, 0].unique()
for x in vals:
# x = vals[0]
# print(x)
allOfProfX = cols.loc[cols.iloc[:, 0] == x]
meanIncome = allOfProfX['Total Yearly Income [EUR]'].mean()
cols.iloc[:, 0].replace(x, meanIncome, inplace = True)
return cols.iloc[:,0]
if __name__ == '__main__':
main()
|
17,718 | a632bff0016f9b82e4aa4e98ea98fa752fe79f74 | # Generated by Django 2.1.8 on 2020-12-09 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='users',
fields=[
('user_id', models.CharField(max_length=15, primary_key=True, serialize=False, verbose_name='user_id')),
('name', models.CharField(max_length=15, verbose_name='name')),
('mails', models.EmailField(max_length=254, verbose_name='mails')),
('password', models.CharField(max_length=15, verbose_name='password')),
('reason', models.TextField(max_length=100, verbose_name='reason')),
],
),
]
|
17,719 | fc863901492b2a6befd7c229702f4c4fb29b3bfa | class Solution:
def equationsPossible(self, equations: "List[str]") -> "bool":
# union-find
uf = {}
def union(i, j):
fi = find(i)
fj = find(j)
if fi == fj:
return
if fj < fi:
fj, fi = fi, fj
uf[fj] = fi
def find(i):
if i not in uf:
uf[i] = i
return i
f = uf[i]
while uf[f] != f:
f = uf[f]
uf[i] = f
return f
# convert a character to a number
def char2num(c):
return ord(c) - ord("a")
# union the equalities
for eq in equations:
if eq[1:3] == "==":
union(char2num(eq[0]), char2num(eq[3]))
# find out if there is any inconsistency
# which can only happen if two numbers that should be equal are not
for eq in equations:
if eq[1:3] == "!=":
if find(char2num(eq[0])) == find(char2num(eq[3])):
return False
return True
|
17,720 | 9ec837d67ec2be4a9be739582c6bad75f47c5adc | import discord, os
from discord.ext import commands
from discord.ext.commands import bot
owner_id = [181386549525479424, 744205759214256233]
client = commands.Bot(command_prefix='s?')
client.remove_command('help')
@client.event
async def on_ready():
print("Bot is online.")
@client.command()
async def load(ctx, extension):
client.load_extension(f'cogs.{extension}')
await ctx.send(f'> Loaded {extension}.')
@client.command()
async def unload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
await ctx.send(f'> Unloaded {extension}.')
@client.command()
async def reload(ctx, extension):
client.reload_extension(f'cogs.{extension}')
await ctx.send(f'> Reloaded {extension}.')
def load_cogs():
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
def is_admin(ctx):
if ctx.message.author.guild_permissions.administrator:
return True
else:
return False
load_cogs()
client.run('TOKEN HERE') |
17,721 | 7b3edd799a437ccf3e454accc4d4c77f3d9d01a9 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, defaults, htmllib, pprint, time
from lib import *
import wato
# Python 2.3 does not have 'set' in normal namespace.
# But it can be imported from 'sets'
try:
set()
except NameError:
from sets import Set as set
loaded_with_language = False
builtin_dashboards = {}
# Declare constants to be used in the definitions of the dashboards
GROW = 0
MAX = -1
# These settings might go into the config module, sometime in future,
# in order to allow the user to customize this.
header_height = 60 # Distance from top of the screen to the lower border of the heading
screen_margin = 5 # Distance from the left border of the main-frame to the dashboard area
dashlet_padding = 21, 5, 5, 0 # Margin (N, E, S, W) between outer border of dashlet and its content
corner_overlap = 22
title_height = 0 # Height of dashlet title-box
raster = 10, 10 # Raster the dashlet choords are measured in
# Load plugins in web/plugins/dashboard and declare permissions,
# note: these operations produce language-specific results and
# thus must be reinitialized everytime a language-change has
# been detected.
def load_plugins():
global loaded_with_language
if loaded_with_language == current_language:
return
# Permissions are currently not being defined. That will be the
# case as soon as dashboards become editable.
# Load plugins for dashboards. Currently these files
# just may add custom dashboards by adding to builtin_dashboards.
load_web_plugins("dashboard", globals())
# This must be set after plugin loading to make broken plugins raise
# exceptions all the time and not only the first time (when the plugins
# are loaded).
loaded_with_language = current_language
# In future there will be user editable dashboards just like
# views which will be loaded. Currently we only use the builtin
# dashboads.
global dashboards
dashboards = builtin_dashboards
# HTML page handler for generating the (a) dashboard. The name
# of the dashboard to render is given in the HTML variable 'name'.
# This defaults to "main".
def page_dashboard():
name = html.var("name", "main")
if name not in dashboards:
raise MKGeneralException("No such dashboard: '<b>%s</b>'" % name)
render_dashboard(name)
def add_wato_folder_to_url(url, wato_folder):
if not wato_folder:
return url
elif '/' in url:
return url # do not append wato_folder to non-Check_MK-urls
elif '?' in url:
return url + "&wato_folder=" + htmllib.urlencode(wato_folder)
else:
return url + "?wato_folder=" + htmllib.urlencode(wato_folder)
# Actual rendering function
def render_dashboard(name):
board = dashboards[name]
# The dashboard may be called with "wato_folder" set. In that case
# the dashboard is assumed to restrict the shown data to a specific
# WATO subfolder or file. This could be a configurable feature in
# future, but currently we assume, that *all* dashboards are filename
# sensitive.
wato_folder = html.var("wato_folder")
# When an empty wato_folder attribute is given a user really wants
# to see only the hosts contained in the root folder. So don't ignore
# the root folder anymore.
#if not wato_folder: # ignore wato folder in case of root folder
# wato_folder = None
# The title of the dashboard needs to be prefixed with the WATO path,
# in order to make it clear to the user, that he is seeing only partial
# data.
title = board["title"]
global header_height
if title is None:
# If the title is none, hide the header line
html.set_render_headfoot(False)
header_height = 0
title = ''
elif wato_folder is not None:
title = wato.api.get_folder_title(wato_folder) + " - " + title
stylesheets = ["pages", "dashboard", "status", "views"]
stylesheets += ["theme/css/cloud-admin",
"theme/css/themes/default",
"theme/css/responsive",
"theme/font-awesome/css/font-awesome.min",
"theme/js/bootstrap-daterangepicker/daterangepicker-bs3",
"theme/js/hubspot-messenger/css/messenger.min",
"theme/js/hubspot-messenger/css/messenger-spinner.min",
"theme/js/hubspot-messenger/css/messenger-theme-flat.min",
"theme/js/jquery-ui-1.10.3.custom/css/custom-theme/jquery-ui-1.10.3.custom.min",
"theme/js/bootstrap-switch/bootstrap-switch.min",
"theme/css/flags/flags.min",
"theme/css/fonts"
]
javascripts = []
javascripts += ["dashboard"]
html.header(title, javascripts=javascripts, stylesheets=stylesheets)
html.write("""<div class="col-lg-12 margin-top-50" id="content">""")
result = """
<!-- PAGE HEADER-->
<div class="row">
<div class="col-sm-12">
<div class="page-header">
<!-- STYLER -->
<!-- /STYLER -->
<!-- BREADCRUMBS -->
<ul class="breadcrumb">
<li>
<i class="fa fa-home"></i>
</li>
<li>Dashboard</li>
</ul>
<!-- /BREADCRUMBS -->
<div class="clearfix">
<h3 class="content-title pull-left">Dashboard</h3>
</div>
<div class="description">Blank Page</div>
</div>
</div>
</div>
<!-- /PAGE HEADER -->
"""
html.write(result)
html.write(""" <div class="row">
<div class="col-sm-12">
""")
# html.write("<div id=dashboard class=\"dashboard_%s\">\n" % name) # Container of all dashlets
refresh_dashlets = [] # Dashlets with automatic refresh, for Javascript
for nr, dashlet in enumerate(board["dashlets"]):
# dashlets using the 'urlfunc' method will dynamically compute
# an url (using HTML context variables at their wish).
if "urlfunc" in dashlet:
dashlet["url"] = dashlet["urlfunc"]()
# dashlets using the 'url' method will be refreshed by us. Those
# dashlets using static content (such as an iframe) will not be
# refreshed by us but need to do that themselves.
if "url" in dashlet:
refresh_dashlets.append([nr, dashlet.get("refresh", 0),
str(add_wato_folder_to_url(dashlet["url"], wato_folder))])
# Paint the dashlet's HTML code
render_dashlet(nr, dashlet, wato_folder)
html.write("</div>\n")
html.write(""" </div>
</div>
""")
# html.write("""</div>""")
# Put list of all autorefresh-dashlets into Javascript and also make sure,
# that the dashbaord is painted initially. The resize handler will make sure
# that every time the user resizes the browser window the layout will be re-computed
# and all dashlets resized to their new positions and sizes.
html.javascript("""
//var header_height = %d;
//var screen_margin = %d;
//var title_height = %d;
//var dashlet_padding = Array%s;
//var corner_overlap = %d;
var refresh_dashlets = %r;
//var dashboard_name = '%s';
//set_dashboard_size();
//window.onresize = function () { set_dashboard_size(); }
//window.onload = function () { set_dashboard_size(); }
dashboard_scheduler(1);
""" % (header_height, screen_margin, title_height, dashlet_padding,
corner_overlap, refresh_dashlets, name))
html.body_end() # omit regular footer with status icons, etc.
# Create the HTML code for one dashlet. Each dashlet has an id "dashlet_%d",
# where %d is its index (in board["dashlets"]). Javascript uses that id
# for the resizing. Within that div there is an inner div containing the
# actual dashlet content. The margin between the inner and outer div is
# used for stylish layout stuff (shadows, etc.)
def render_dashlet(nr, dashlet, wato_folder):
html.write("""
<div class="col-md-6">
""")
# html.write('<div class=dashlet id="dashlet_%d">' % nr)
# # render shadow
# # if dashlet.get("shadow", True):
# # for p in [ "nw", "ne", "sw", "se", "n", "s", "w", "e" ]:
# # html.write('<img id="dashadow_%s_%d" class="shadow %s" src="images/dashadow-%s.png">' %
# # (p, nr, p, p))
# if dashlet.get("title"):
# url = dashlet.get("title_url", None)
# if url:
# title = '<a href="%s">%s</a>' % (url, dashlet["title"])
# else:
# title = dashlet["title"]
# html.write('<div class="title" id="dashlet_title_%d">%s</div>' % (nr, title))
# if dashlet.get("background", True):
# bg = " background"
# else:
# bg = ""
# html.write('<div class="dashlet_inner%s" id="dashlet_inner_%d">' % (bg, nr))
# html.write( "%s" % dashlet)
# # Optional way to render a dynamic iframe URL
if "iframefunc" in dashlet:
dashlet["iframe"] = dashlet["iframefunc"]()
# # The method "view" is a shortcut for "iframe" with a certain url
if "view" in dashlet:
dashlet["iframe"] = "view.py?view_name=%s&_display_options=HRSIXL&_body_class=dashlet" % dashlet["view"]
html.write("""
<div class="box border">
<div class="box-title">
<h4><i class="fa fa-adjust"></i>%s</h4>
<div class="tools hidden-xs">
<a class="config" data-toggle="modal" href="#box-config">
<i class="fa fa-cog"></i>
</a>
</div>
</div>
<div class="box-body">
<div class="chart" id="dashlet_%s" style="padding: 0px; position: relative;">
""" % (dashlet["title"], nr))
# # The content is rendered only if it is fixed. In the
# # other cases the initial (re)-size will paint the content.
if "content" in dashlet: # fixed content
html.write(dashlet["content"])
elif "iframe" in dashlet: # fixed content containing iframe
# Fix of iPad >:-P
html.write('<div style="width: 100%; height: 100%; -webkit-overflow-scrolling:touch; overflow: auto;">')
html.write('<iframe allowTransparency="true" frameborder="0" width="100%%" height="100%%" src="%s"></iframe>' %
add_wato_folder_to_url(dashlet["iframe"], wato_folder))
html.write('</div>')
html.write('<div class="dashlet_inner" id="dashlet_inner_%d">' % (nr))
html.write("</div>")
html.write("""
</div>
</div>
</div>
""" )
html.write("</div>\n")
# Here comes the brain stuff: An intelligent liquid layout algorithm.
# It is called via ajax, mainly because I was not eager to code this
# directly in Javascript (though this would be possible and probably
# more lean.)
# Compute position and size of all dashlets
def ajax_resize():
# computation with vectors
class vec:
def __init__(self, xy):
self._data = xy
def __div__(self, xy):
return vec((self._data[0] / xy[0], self._data[1] / xy[1]))
def __repr__(self):
return repr(self._data)
def __getitem__(self, i):
return self._data[i]
def make_absolute(self, size):
n = []
for i in [0, 1]:
if self._data[i] < 0:
n.append(size[i] + self._data[i] + 1) # Here was a bug fixed by Markus Lengler
else:
n.append(self._data[i] - 1) # make begin from 0
return vec(n)
# Compute the initial size of the dashlet. If MAX is used,
# then the dashlet consumes all space in its growing direction,
# regardless of any other dashlets.
def initial_size(self, position, rastersize):
n = []
for i in [0, 1]:
if self._data[i] == MAX:
n.append(rastersize[i] - abs(position[i]) + 1)
elif self._data[i] == GROW:
n.append(1)
else:
n.append(self._data[i])
return n
def compute_grow_by(self, size):
n = []
for i in [0, 1]:
if size[i] != GROW: # absolute size, no growth
n.append(0)
elif self._data[i] < 0:
n.append(-1) # grow direction left, up
else:
n.append(1) # grow direction right, down
return n
def __add__(self, b):
return vec((self[0] + b[0], self[1] + b[1]))
board = dashboards[html.var("name")]
screensize = vec((int(html.var("width")), int(html.var("height"))))
rastersize = screensize / raster
used_matrix = {} # keep track of used raster elements
# first place all dashlets at their absolute positions
positions = []
for nr, dashlet in enumerate(board["dashlets"]):
# Relative position is as noted in the declaration. 1,1 => top left origin,
# -1,-1 => bottom right origin, 0 is not allowed here
rel_position = vec(dashlet["position"]) # starting from 1, negative means: from right/bottom
# Compute the absolute position, this time from 0 to rastersize-1
abs_position = rel_position.make_absolute(rastersize)
# The size in raster-elements. A 0 for a dimension means growth. No negative values here.
size = vec(dashlet["size"])
# Compute the minimum used size for the dashlet. For growth-dimensions we start with 1
used_size = size.initial_size(rel_position, rastersize)
# Now compute the rectangle that is currently occupied. The choords
# of bottomright are *not* included.
if rel_position[0] > 0:
left = abs_position[0]
right = left + used_size[0]
else:
right = abs_position[0]
left = right - used_size[0]
if rel_position[1] > 0:
top = abs_position[1]
bottom = top + used_size[1]
else:
bottom = abs_position[1]
top = bottom - used_size[1]
# Allocate used squares in matrix. If not all squares we need are free,
# then the dashboard is too small for all dashlets (as it seems).
# TEST: Dashlet auf 0/0 setzen, wenn kein Platz dafür da ist.
try:
for x in range(left, right):
for y in range(top, bottom):
if (x,y) in used_matrix:
raise Exception()
used_matrix[(x,y)] = True
# Helper variable for how to grow, both x and y in [-1, 0, 1]
grow_by = rel_position.compute_grow_by(size)
positions.append((nr, True, left, top, right, bottom, grow_by))
except:
positions.append((nr, False, left, top, right, bottom, (0,0)))
# now resize all elastic dashlets to the max, but only
# by one raster at a time, in order to be fair
def try_resize(x, y, width, height):
return False
if x + width >= xmax or y + height >= ymax:
return False
for xx in range(x, x + width):
for yy in range(y, y + height):
if used_matrix[xx][yy]:
return False
for xx in range(x, x + width):
for yy in range(y, y + height):
used_matrix[xx][yy] = True
return True
# Das hier ist FALSCH! In Wirklichkeit muss ich nur prüfen,
# ob der *Zuwachs* nicht in der Matrix belegt ist. Das jetzige
# Rechteck muss ich ausklammern. Es ist ja schon belegt.
def try_allocate(left, top, right, bottom):
# Try if all needed squares are free
for x in range(left, right):
for y in range(top, bottom):
if (x,y) in used_matrix:
return False
# Allocate all needed squares
for x in range(left, right):
for y in range(top, bottom):
used_matrix[(x,y)] = True
return True
# Now try to expand all elastic rectangles as far as possible
at_least_one_expanded = True
while at_least_one_expanded:
at_least_one_expanded = False
new_positions = []
for (nr, visible, left, top, right, bottom, grow_by) in positions:
if visible:
# html.write(repr((nr, left, top, right, bottom, grow_by)))
# try to grow in X direction by one
if grow_by[0] > 0 and right < rastersize[0] and try_allocate(right, top, right+1, bottom):
at_least_one_expanded = True
right += 1
elif grow_by[0] < 0 and left > 0 and try_allocate(left-1, top, left, bottom):
at_least_one_expanded = True
left -= 1
# try to grow in Y direction by one
if grow_by[1] > 0 and bottom < rastersize[1] and try_allocate(left, bottom, right, bottom+1):
at_least_one_expanded = True
bottom += 1
elif grow_by[1] < 0 and top > 0 and try_allocate(left, top-1, right, top):
at_least_one_expanded = True
top -= 1
new_positions.append((nr, visible, left, top, right, bottom, grow_by))
positions = new_positions
resize_info = []
for nr, visible, left, top, right, bottom, grow_by in positions:
# html.write(repr((nr, left, top, right, bottom, grow_by)))
# html.write("<br>")
title = board["dashlets"][nr].get("title")
if title:
th = title_height
else:
th = 0
resize_info.append([nr,
visible and 1 or 0,
left * raster[0],
top * raster[1] + th,
(right - left) * raster[0],
(bottom - top) * raster[1] - th])
html.write(repr(resize_info))
def dashlet_overview():
html.write(
'<table class=dashlet_overview>'
'<tr><td valign=top>'
'<a href="http://mathias-kettner.de/check_mk.html"><img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>'
'</td>'
'<td><h2>Check_MK Multisite</h2>'
'Welcome to Check_MK Multisite. If you want to learn more about Multsite, please visit '
'our <a href="http://mathias-kettner.de/checkmk_multisite.html">online documentation</a>. '
'Multisite is part of <a href="http://mathias-kettner.de/check_mk.html">Check_MK</a> - an Open Source '
'project by <a href="http://mathias-kettner.de">Mathias Kettner</a>.'
'</td>'
)
html.write('</tr></table>')
def dashlet_mk_logo():
html.write('<a href="http://mathias-kettner.de/check_mk.html">'
'<img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>')
def dashlet_hoststats():
table = [
( _("Up"), "#0b3",
"searchhost&is_host_scheduled_downtime_depth=0&hst0=on",
"Stats: state = 0\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"StatsAnd: 2\n"),
( _("Down"), "#f00",
"searchhost&is_host_scheduled_downtime_depth=0&hst1=on",
"Stats: state = 1\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"StatsAnd: 2\n"),
( _("Unreachable"), "#f80",
"searchhost&is_host_scheduled_downtime_depth=0&hst2=on",
"Stats: state = 2\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"StatsAnd: 2\n"),
( _("In Downtime"), "#0af",
"searchhost&search=1&is_host_scheduled_downtime_depth=1",
"Stats: scheduled_downtime_depth > 0\n" \
)
]
filter = "Filter: custom_variable_names < _REALNAME\n"
render_statistics("hoststats", "hosts", table, filter, "Host Statistics")
def dashlet_servicestats():
table = [
( _("OK"), "#0b3",
"searchsvc&hst0=on&st0=on&is_in_downtime=0",
"Stats: state = 0\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
( _("In Downtime"), "#0af",
"searchsvc&is_in_downtime=1",
"Stats: scheduled_downtime_depth > 0\n" \
"Stats: host_scheduled_downtime_depth > 0\n" \
"StatsOr: 2\n"),
( _("On Down host"), "#048",
"searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0",
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state != 0\n" \
"StatsAnd: 3\n"),
( _("Warning"), "#ff0",
"searchsvc&hst0=on&st1=on&is_in_downtime=0",
"Stats: state = 1\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
( _("Unknown"), "#f80",
"searchsvc&hst0=on&st3=on&is_in_downtime=0",
"Stats: state = 3\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
( _("Critical"), "#f00",
"searchsvc&hst0=on&st2=on&is_in_downtime=0",
"Stats: state = 2\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
]
filter = "Filter: host_custom_variable_names < _REALNAME\n"
render_statistics("servicestats", "services", table, filter, "Service Statistics")
def render_statistics(pie_id, what, table, filter, title=None):
html.write("<div class=stats>")
pie_diameter = 130
pie_left_aspect = 0.5
pie_right_aspect = 0.8
# Is the query restricted to a certain WATO-path?
wato_folder = html.var("wato_folder")
if wato_folder:
# filter += "Filter: host_state = 0"
filter += "Filter: host_filename ~ ^/wato/%s/\n" % wato_folder.replace("\n", "")
# Is the query restricted to a host contact group?
host_contact_group = html.var("host_contact_group")
if host_contact_group:
filter += "Filter: host_contact_groups >= %s\n" % host_contact_group.replace("\n", "")
# Is the query restricted to a service contact group?
service_contact_group = html.var("service_contact_group")
if service_contact_group:
filter += "Filter: service_contact_groups >= %s\n" % service_contact_group.replace("\n", "")
query = "GET %s\n" % what
for entry in table:
query += entry[3]
query += filter
result = html.live.query_summed_stats(query)
pies = zip(table, result)
total = sum([x[1] for x in pies])
html.write('<div class=pie width=%d height=%d id="%s_stats" style="float: left"></div>' %
(pie_diameter, pie_diameter, pie_id))
# html.write('<img src="images/globe.png" class="globe">')
# html.write('<table class="hoststats%s" style="float:left">' % (
# len(pies) > 1 and " narrow" or ""))
# table_entries = pies
# while len(table_entries) < 6:
# table_entries = table_entries + [ (("", "#95BBCD", "", ""), " ") ]
# table_entries.append(((_("Total"), "", "all%s" % what, ""), total))
# for (name, color, viewurl, query), count in table_entries:
# url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" \
# + htmllib.urlencode(html.var("wato_folder", ""))
# if host_contact_group:
# url += '&opthost_contactgroup=' + host_contact_group
# if service_contact_group:
# url += '&optservice_contactgroup=' + service_contact_group
# html.write('<tr><th><a href="%s">%s</a></th>' % (url, name))
# style = ''
# if color:
# style = ' style="background-color: %s"' % color
# html.write('<td class=color%s>'
# '</td><td><a href="%s">%s</a></td></tr>' % (style, url, count))
# html.write("</table>")
data = []
for pie in pies:
typeof = pie[0][0]
count = pie[1]
data.append([ typeof, count])
html.write("</div>")
html.javascript("""
draw_hchart(id="%s", type="pie" , title="%s", name="%s", data=%s);
""" % (pie_id, title, title, data))
def dashlet_pnpgraph():
render_pnpgraph(
html.var("site"), html.var("host"), html.var("service"),
int(html.var("source", 0)), int(html.var("view", 0)),
)
def dashlet_nodata():
html.write("<div class=nograph><div class=msg>")
html.write(html.var("message", _("No data available.")))
html.write("</div></div>")
def render_pnpgraph(site, host, service = None, source = 0, view = 0):
if not host:
html.message("Invalid URL to this dashlet. Missing <tt>host</tt>")
return;
if not service:
service = "_HOST_"
if not site:
base_url = defaults.url_prefix
else:
base_url = html.site_status[site]["site"]["url_prefix"]
base_url += "pnp4nagios/index.php/"
var_part = "?host=%s&srv=%s&view=0&source=%d&view=%d&theme=multisite&_t=%d" % \
(pnp_cleanup(host), pnp_cleanup(service), source, view, int(time.time()))
pnp_url = base_url + "graph" + var_part
img_url = base_url + "image" + var_part
html.write('<a href="%s"><img border=0 src="%s"></a>' % (pnp_url, img_url))
# load_plugins()
|
17,722 | 475eee5670ec37008cf3fe271f55545d5473611c |
from django.contrib import admin
from .models import Address,Order,FoodSeeker
# Register your models here.
admin.site.register(Address)
admin.site.register(Order)
admin.site.register(FoodSeeker) |
17,723 | 3bae659c806107d8369d2ef1734e33ec4c1576ec | from scenes.scenebase import sceneBase
from scenes.utils.scrolling_text import DynamicText
from scenes.utils.button import button
from scenes.utils.textbox import TextBox
from scenes.day1 import day1
import pygame
import os
class TitleScene(sceneBase):
def __init__(self):
sceneBase.__init__(self)
pygame.font.init()
self.font = pygame.font.Font('fonts/retro.TTF', 48)
self.delay = 1000
self.titletext = 'The Innocent'
self.title = self.font.render(self.titletext, False, (0, 0, 0))
self.startbutton = button(400, 100, 0, 0, 'art/button.png')
#self.tb = TextBox(DynamicText(self.font, ['YEEET', 'No uuuuuuuu'], (55, 76), (255, 0, 255)), 'art\dtb.png', 5, 500, 100)
def update(self):
#pygame.time.set_timer(pygame.USEREVENT, self.delay)
self.startbutton.update()
if self.startbutton.Lclicked:
self.switchScene(day1())
self.tb.update()
def processInput(self, events, pressed_keys):
pass
def render(self, screen):
screen.fill((255, 255, 255))
screen.blit(self.startbutton.img, (screen.get_width() / 2 - self.startbutton.img.get_width() / 2, screen.get_height() / 2 - self.startbutton.img.get_height() / 2))
self.startbutton.pos = (screen.get_width() / 2 - self.startbutton.img.get_width() / 2, screen.get_height() / 2 - self.startbutton.img.get_height() / 2)
screen.blit(self.title, (screen.get_width() / 2 - self.font.size(self.titletext)[0] / 2, screen.get_height() / 2 - self.font.size(self.titletext)[1] / 2 - 200 ))
self.tb.render(screen)
|
17,724 | b5e98b1fdd6eb347d15198bd5799bbb396d8019c | #!/usr/bin/env python
"""
convert dos linefeeds (crlf) to unix (lf)
usage: dos2unix.py
"""
import pickle
import numpy as np
import scipy.io
filename = '20180621_12_50000'
data = pickle.load(open(filename + '.p', 'rb'))
n = 46*92
key_list = range(n)
properties = ['temperature', 'pressure', 'humidity']
data_list = {}
# LatLon values
data_list['latlon'] = data.lonlat[:,::-1]
height = data.height.reshape([31, n]).T
temperature = data.temperature.reshape([31, n]).T
humidity = data.humidity.reshape([31, n]).T
# pressure = data.pressure.reshape([31, n])
wind_x = data.wind_x.reshape([31, n]).T
wind_y = data.wind_y.reshape([31, n]).T
# Storing weather data
data_list['temperature'] = temperature
data_list['humidity'] = humidity
# data_list['pressure'] = np.array([height, pressure]).T
data_list['wind_x'] = wind_x
data_list['wind_y'] = wind_y
data_list['height'] = height
data_list['noise'] = data.noise
data_list['elevation'] = np.array(data.elevation).flatten().T
# Storing noise data
scipy.io.savemat('./' + filename + '.mat', data_list)
|
17,725 | 5f40ce63dabc83dd531423f2e69860be3cc5211e | from threading import Thread
from queue import Queue
__all__ = ['ThreadPool', 'season_map', 'Course', 'CSCurriculumKey', 'ISCurriculumKey']
class CSCurriculumKey:
INTRODUCTORY = 0
FOUNDATION = 1
SOFTWARE_AND_SYSTEMS_DEVELOPMENT = 2
THEORY = 3
DATA_SCIENCE = 4
DATABASE_SYSTEMS = 5
ARTIFICIAL_INTELLIGENCE = 6
SOFTWARE_ENGINEERING = 7
MULTIMEDIA = 8
RESEARCH_COLLOQUIUM = 9
MASTERS_RESEARCH = 10
MASTERS_THESIS = 11
GRADUATE_INTERNSHIP = 12
AREAS = 13
RESEARCH_AND_THESIS_OPTIONS = 14
class ISCurriculumKey:
INTRODUCTORY = 0
FOUNDATION = 1
ADVANCED = 2
MAJOR_ELECTIVES = 3
CAPSTONE = 4
# extra cdm elective
season_map = {
"Fall 2015-2016": "AuE",
"Winter 2015-2016": "WiE",
"Spring 2015-2016": "SpE",
"Summer 10 week 2015-2016": "S0E",
"Summer I 2015-2016": "S1E",
"Summer II 2015-2016": "S2E",
"Fall 2016-2017": "AuO",
"Winter 2016-2017": "WiO",
"Spring 2016-2017": "SpO",
"Summer 10 week 2016-2017": "S0O",
"Summer I 2016-2017": "S1O",
"Summer II 2016-2017": "S2O"
}
class Course:
__slots__ = 'subject', 'num', 'name', 'prerequisites', 'history', 'description', 'priority'
def __init__(self, subject=None, num=None, name=None):
self.subject = subject
self.num = num
self.name = name
self.description = None
self.prerequisites = None
self.history = None
self.priority = None
def __repr__(self):
return "<Course subject={}, num={}, name={}, prerequisites={}, history={}>"\
.format(self.subject, self.num, self.name, self.prerequisites, self.history)
def __str__(self):
return "({} {})".format(self.subject, self.num)
class Worker(Thread):
""" Thread executing tasks from a given tasks queue """
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
# An exception happened in this thread
print(e)
finally:
# Mark this task as done, whether an exception happened or not
self.tasks.task_done()
class ThreadPool:
""" Pool of threads consuming tasks from a queue """
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, func, *args, **kargs):
""" Add a task to the queue """
self.tasks.put((func, args, kargs))
def wait_completion(self):
""" Wait for completion of all the tasks in the queue """
self.tasks.join() |
17,726 | d57610f673ed1c9dcb242e9be838e987022374a4 | from selenium import webdriver
browser = webdriver.Chrome()
url = 'https://dynamic2.scrape.cuiqingcai.com/'
browser.get(url)
input = browser.find_element_by_class_name('logo-title')
print(input.id)
print(input.location)
print(input.tag_name)
print(input.size) |
17,727 | 27d039125261bb5b3405c8db40fde57b3caffd0b | import time
import numpy as np
import modern_robotics as mrcl
## base to HIP (mechanically fixed)
theta0 = (330)*(np.pi/180)
theta1 = (270)*(np.pi/180)
theta2 = (210)*(np.pi/180)
theta3 = (150)*(np.pi/180)
theta4 = (90)*(np.pi/180)
theta5 = (30)*(np.pi/180)
thetalist = np.array([theta0,theta1,theta2,theta3,theta4,theta5])
phi0 = (0)*(np.pi/180)
phi1 = (0)*(np.pi/180)
phi2 = (0)*(np.pi/180)
phi3 = (0)*(np.pi/180)
phi4 = (0)*(np.pi/180)
phi5 = (0)*(np.pi/180)
psi0 = (0)*(np.pi/180)
psi1 = (0)*(np.pi/180)
psi2 = (0)*(np.pi/180)
psi3 = (0)*(np.pi/180)
psi4 = (0)*(np.pi/180)
psi5 = (0)*(np.pi/180)
## directional angle and magnitude
omega = (0)*(np.pi/180)
magnitude = 0.01
## hip to KNEE (each leg 60 degrees from one another)
phinot = 0*((np.pi)/180)
## knee to LEG (30 degrees on servo ~ CHECK!!)
psinot = 0*((np.pi)/180) #140-90-50*((np.pi)/180)
## max rise height (this will go to servo angle 140)
riseheight = 0*((np.pi)/180)
## screw axes
Ship = np.array([0, 0, 1, 0, 0, 0])
Sknee = np.array([0, 1, 0, 0, 0, 0.04923984])
Slist = (np.array([Ship, Sknee])).T
BhipTOknee = np.array([0, 0, 1, 0, 0.04923984 + 0.0737185, 0])
BkneeTOEE = np.array([0, 1, 0, 0, 0, 0.0737185])
Blist = (np.array([BhipTOknee, BkneeTOEE])).T
## home configuration for one leg:
legNORM = np.array([[-0.692965, 0.720971, 0, -0.128679],[-0.720971, -0.692965, 0, -0.00616296],[0,0,1,-0.0692727],[0,0,0,1]])
Mleg = np.array([[1, 0, 0, 0.04923984 + 0.0737185], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]);
## directional arrays
direcangle = np.array([[np.cos(omega),np.sin(omega),0,0],[-np.sin(omega),np.cos(omega),0,0],[0,0,1,0],[0,0,0,1]])
direcmagnitude = np.array([[1,0,0,magnitude],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
direcvec = np.dot(direcangle,direcmagnitude)
direcvecPLOT = np.array([direcvec[0][3],direcvec[1][3],direcvec[2][3]])
## transformation matrices for world frame and initial body
Tworldframe = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
TbodyframeInit = Tworldframe
## home configuration from body to hip for each leg:
# fixed rotation from base frame to each hip joint
TbasecomROT0 = np.array([[np.sqrt(3)/2,1/2,0,0],[-1/2,np.sqrt(3)/2,0,0],[0,0,1,0],[0,0,0,1]])
TbasecomROT1 = np.array([[0,1,0,0],[-1,0,0,0],[0,0,1,0],[0,0,0,1]])
TbasecomROT2 = np.array([[-np.sqrt(3)/2,1/2,0,0],[-1/2,-np.sqrt(3)/2,0,0],[0,0,1,0],[0,0,0,1]])
TbasecomROT3 = np.array([[-np.sqrt(3)/2,-1/2,0,0],[1/2,-np.sqrt(3)/2,0,0],[0,0,1,0],[0,0,0,1]])
TbasecomROT4 = np.array([[0,-1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]])
TbasecomROT5 = np.array([[np.sqrt(3)/2,-1/2,0,0],[1/2,np.sqrt(3)/2,0,0],[0,0,1,0],[0,0,0,1]])
# translation from base frame to hip joint
TbasecomTOhipjoint = np.array([[1,0,0,0.0578104],[0,1,0,0],[0,0,1,-0.009611],[0,0,0,1]])
# transformation from base frame to each hip joint
TbaseTOhip0 = np.dot(TbasecomROT0,TbasecomTOhipjoint)
TbaseTOhip1 = np.dot(TbasecomROT1,TbasecomTOhipjoint)
TbaseTOhip2 = np.dot(TbasecomROT2,TbasecomTOhipjoint)
TbaseTOhip3 = np.dot(TbasecomROT3,TbasecomTOhipjoint)
TbaseTOhip4 = np.dot(TbasecomROT4,TbasecomTOhipjoint)
TbaseTOhip5= np.dot(TbasecomROT5,TbasecomTOhipjoint)
# rotation about knee joint
ThipjointROT0 = np.array([[np.cos(phi0),np.sin(phi0),0,0],[-np.sin(phi0),np.cos(phi0),0,0],[0,0,1,0],[0,0,0,1]])
ThipjointROT1 = np.array([[np.cos(phi1),np.sin(phi1),0,0],[-np.sin(phi1),np.cos(phi1),0,0],[0,0,1,0],[0,0,0,1]])
ThipjointROT2 = np.array([[np.cos(phi2),np.sin(phi2),0,0],[-np.sin(phi2),np.cos(phi2),0,0],[0,0,1,0],[0,0,0,1]])
ThipjointROT3 = np.array([[np.cos(phi3),np.sin(phi3),0,0],[-np.sin(phi3),np.cos(phi3),0,0],[0,0,1,0],[0,0,0,1]])
ThipjointROT4 = np.array([[np.cos(phi4),np.sin(phi4),0,0],[-np.sin(phi4),np.cos(phi4),0,0],[0,0,1,0],[0,0,0,1]])
ThipjointROT5 = np.array([[np.cos(phi5),np.sin(phi5),0,0],[-np.sin(phi5),np.cos(phi5),0,0],[0,0,1,0],[0,0,0,1]])
# transformation from hip joint to knee joint
ThipjointTOkneejoint = np.array([[1,0,0,0.04923984],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
# rotation about knee joint
TkneejointROT0 = np.array([[np.cos(psi0),0,np.sin(psi0),0],[0,1,0,0],[-np.sin(psi0),0,np.cos(psi0),0],[0,0,0,1]])
TkneejointROT1 = np.array([[np.cos(psi1),0,np.sin(psi1),0],[0,1,0,0],[-np.sin(psi1),0,np.cos(psi1),0],[0,0,0,1]])
TkneejointROT2 = np.array([[np.cos(psi2),0,np.sin(psi2),0],[0,1,0,0],[-np.sin(psi2),0,np.cos(psi2),0],[0,0,0,1]])
TkneejointROT3 = np.array([[np.cos(psi3),0,np.sin(psi3),0],[0,1,0,0],[-np.sin(psi3),0,np.cos(psi3),0],[0,0,0,1]])
TkneejointROT4 = np.array([[np.cos(psi4),0,np.sin(psi4),0],[0,1,0,0],[-np.sin(psi4),0,np.cos(psi4),0],[0,0,0,1]])
TkneejointROT5 = np.array([[np.cos(psi5),0,np.sin(psi5),0],[0,1,0,0],[-np.sin(psi5),0,np.cos(psi5),0],[0,0,0,1]])
# translation from knee joint to end-effector
TkneejointTOEE = np.array([[1,0,0,0.0737185],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
## transformation matrices for new body frame (b')
TbodyframeNew = TbodyframeInit
TbodyframeNew[0][3] = (TbodyframeNew[0][3] + direcvecPLOT[0])
TbodyframeNew[1][3] = (TbodyframeNew[1][3] + direcvecPLOT[1])
TbodyframeNew[2][3] = (TbodyframeNew[2][3] + direcvecPLOT[2])
Tbprimeb = mrcl.TransInv(TbodyframeNew)
TbtoHIP = TbasecomTOhipjoint
TbprimeHIP = np.dot(Tbprimeb,TbtoHIP)
# representation of hip_ in (b')
TbodyTOhip0 = np.dot(TbasecomROT0,TbprimeHIP)
TbodyTOhip1 = np.dot(TbasecomROT1,TbprimeHIP)
TbodyTOhip2 = np.dot(TbasecomROT2,TbprimeHIP)
TbodyTOhip3 = np.dot(TbasecomROT3,TbprimeHIP)
TbodyTOhip4 = np.dot(TbasecomROT4,TbprimeHIP)
TbodyTOhip5 = np.dot(TbasecomROT5,TbprimeHIP)
# function for assigning all new joint angles
def newAnglesAssign(newphi0,newphi1,newphi2,newphi3,newphi4,newphi5,newpsi0,newpsi1,newpsi2,newpsi3,newpsi4,newpsi5):
# new angles
newPhi0 = newphi0*(np.pi/180)
newPhi1 = newphi1*(np.pi/180)
newPhi2 = newphi2*(np.pi/180)
newPhi3 = newphi3*(np.pi/180)
newPhi4 = newphi4*(np.pi/180)
newPhi5 = newphi5*(np.pi/180)
newPsi0 = newpsi0*(np.pi/180)
newPsi1 = newpsi1*(np.pi/180)
newPsi2 = newpsi2*(np.pi/180)
newPsi3 = newpsi3*(np.pi/180)
newPsi4 = newpsi4*(np.pi/180)
newPsi5 = newpsi5*(np.pi/180)
# store new angles in list
AngleList0 = np.array([newphi0,newPsi0])
AngleList1 = np.array([newphi1,newPsi1])
AngleList2 = np.array([newphi2,newPsi2])
AngleList3 = np.array([newphi3,newPsi3])
AngleList4 = np.array([newphi4,newPsi4])
AngleList5 = np.array([newphi5,newPsi5])
return AngleList0,AngleList1,AngleList2,AngleList3,AngleList4,AngleList5
# function for performing FK in space for new angles
def FKallSpace(aList0,aList1,aList2,aList3,aList4,aList5):
newPos0 = mrcl.FKinSpace(Mleg,Slist,aList0)
newPos1 = mrcl.FKinSpace(Mleg,Slist,aList1)
newPos2 = mrcl.FKinSpace(Mleg,Slist,aList2)
newPos3 = mrcl.FKinSpace(Mleg,Slist,aList3)
newPos4 = mrcl.FKinSpace(Mleg,Slist,aList4)
newPos5 = mrcl.FKinSpace(Mleg,Slist,aList5)
return newPos0,newPos1,newPos2,newPos3,newPos4,newPos5
# function which converts FK's EE position from the hip frame to the (b') frame
def convertFKtoBodyFrame(newP0,newP1,newP2,newP3,newP4,newP5):
newPos0b = np.dot(TbodyTOhip0,newP0)
newPos1b = np.dot(TbodyTOhip1,newP1)
newPos2b = np.dot(TbodyTOhip2,newP2)
newPos3b = np.dot(TbodyTOhip3,newP3)
newPos4b = np.dot(TbodyTOhip4,newP4)
newPos5b = np.dot(TbodyTOhip5,newP5)
return newPos0b,newPos1b,newPos2b,newPos3b,newPos4b,newPos5b
# function which subtracts the directional control from EE in (b') frame
def calcStep(newP0b,newP1b,newP2b,newP3b,newP4b,newP5b,direcvecPLOT):
newPos0bq = newP0b
newPos0bq[0][3] = (newP0b[0][3] - (1*direcvecPLOT[0]))
newPos0bq[1][3] = (newP0b[1][3] - (1*direcvecPLOT[1]))
newPos0bq[2][3] = (newP0b[2][3] - (1*direcvecPLOT[2]))
newPos1bq = newP1b
newPos1bq[0][3] = (newP1b[0][3] - (1*direcvecPLOT[0]))
newPos1bq[1][3] = (newP1b[1][3] - (1*direcvecPLOT[1]))
newPos1bq[2][3] = (newP1b[2][3] - (1*direcvecPLOT[2]))
newPos2bq = newP2b
newPos2bq[0][3] = (newP2b[0][3] - (1*direcvecPLOT[0]))
newPos2bq[1][3] = (newP2b[1][3] - (1*direcvecPLOT[1]))
newPos2bq[2][3] = (newP2b[2][3] - (1*direcvecPLOT[2]))
newPos3bq = newP3b
newPos3bq[0][3] = (newP3b[0][3] - (1*direcvecPLOT[0]))
newPos3bq[1][3] = (newP3b[1][3] - (1*direcvecPLOT[1]))
newPos3bq[2][3] = (newP3b[2][3] - (1*direcvecPLOT[2]))
newPos4bq = newP4b
newPos4bq[0][3] = (newP4b[0][3] - (1*direcvecPLOT[0]))
newPos4bq[1][3] = (newP4b[1][3] - (1*direcvecPLOT[1]))
newPos4bq[2][3] = (newP4b[2][3] - (1*direcvecPLOT[2]))
newPos5bq = newP5b
newPos5bq[0][3] = (newP5b[0][3] - (1*direcvecPLOT[0]))
newPos5bq[1][3] = (newP5b[1][3] - (1*direcvecPLOT[1]))
newPos5bq[2][3] = (newP5b[2][3] - (1*direcvecPLOT[2]))
return newPos0bq,newPos1bq,newPos2bq,newPos3bq,newPos4bq,newPos5bq
# convert end-effector from (b') frame to hip frame for IK
def convertPointBprimeFORIK(newP0bq,newP1bq,newP2bq,newP3bq,newP4bq,newP5bq):
Tpinleg0 = np.dot(mrcl.TransInv(TbodyTOhip0),newP0bq)
Tpinleg1 = np.dot(mrcl.TransInv(TbodyTOhip1),newP1bq)
Tpinleg2 = np.dot(mrcl.TransInv(TbodyTOhip2),newP2bq)
Tpinleg3 = np.dot(mrcl.TransInv(TbodyTOhip3),newP3bq)
Tpinleg4 = np.dot(mrcl.TransInv(TbodyTOhip4),newP4bq)
Tpinleg5 = np.dot(mrcl.TransInv(TbodyTOhip5),newP5bq)
return Tpinleg0, Tpinleg1, Tpinleg2, Tpinleg3, Tpinleg4, Tpinleg5
# IK in space for all Tb',ee
def IKallSpace(Tpl0,Tpl1,Tpl2,Tpl3,Tpl4,Tpl5,aList0,aList1,aList2,aList3,aList4,aList5):
newAngles0 = mrcl.IKinSpace(Slist,Mleg,Tpl0,aList0,0.01,0.01)
newAngles1 = mrcl.IKinSpace(Slist,Mleg,Tpl1,aList1,0.01,0.01)
newAngles2 = mrcl.IKinSpace(Slist,Mleg,Tpl2,aList2,0.01,0.01)
newAngles3 = mrcl.IKinSpace(Slist,Mleg,Tpl3,aList3,0.01,0.01)
newAngles4 = mrcl.IKinSpace(Slist,Mleg,Tpl4,aList4,0.01,0.01)
newAngles5 = mrcl.IKinSpace(Slist,Mleg,Tpl5,aList5,0.01,0.01)
return newAngles0,newAngles1,newAngles2,newAngles3,newAngles4,newAngles5
def main():
aList0,aList1,aList2,aList3,aList4,aList5 = newAnglesAssign(0,0,0,0,0,0,-100,-100,-100,-100,-100,-100)
FKleg0,FKleg1,FKleg2,FKleg3,FKleg4,FKleg5 = FKallSpace(aList0,aList1,aList2,aList3,aList4,aList5)
FKbprime0,FKbprime1,FKbprime2,FKbprime3,FKbprime4,FKbprime5 = convertFKtoBodyFrame(FKleg0,FKleg1,FKleg2,FKleg3,FKleg4,FKleg5)
newPbpq0,newPbpq1,newPbpq2,newPbpq3,newPbpq4,newPbpq5 = calcStep(FKbprime0,FKbprime1,FKbprime2,FKbprime3,FKbprime4,FKbprime5,direcvecPLOT)
Tpinleg0, Tpinleg1, Tpinleg2, Tpinleg3, Tpinleg4, Tpinleg5 = convertPointBprimeFORIK(newPbpq0,newPbpq1,newPbpq2,newPbpq3,newPbpq4,newPbpq5)
newaList0,newaList1,newaList2,newaList3,newaList4,newaList5 = IKallSpace(Tpinleg0, Tpinleg1, Tpinleg2, Tpinleg3, Tpinleg4, Tpinleg5,aList0,aList1,aList2,aList3,aList4,aList5)
print aList0,aList1,aList2,aList3,aList4,aList5
print newaList0,newaList1,newaList2,newaList3,newaList4,newaList5
try:
main()
except KeyboardInterrupt:
print('\nExiting.')
exit()
|
17,728 | d6e648975040a6ecf869e8fa70b0739d0c025bfd | # Import celery when Django starts for shared_tasks
from .celery import app as celery_app
|
17,729 | bb0a87eda7c17d057247023df0982161e88c360a | from typing import List
def perform_operation(stack: list, num: int) -> List[str]:
while stack:
if stack[-1] == '(':
break
operator, left_operand = stack[-1], stack[-2]
stack = stack[:-2]
if operator == '+':
num = left_operand + num
elif operator == '*':
num = left_operand * num
stack.append(num)
return stack
def perform_advance_operation(stack: list, num: int, close_bracket: bool) -> List[str]:
# Perform "*" operations
if close_bracket:
ix = -1
# Find the last position of open bracket - "("
for sx, s in enumerate(stack):
if s == '(':
ix = sx
if ix != -1:
target_stack = stack[ix+1:]
stack = stack[:ix]
else:
target_stack = stack
stack = []
for s in target_stack:
if s != '*': # only "*" and digits are in the target stack
num *= int(s)
# Perform "+" operations
while stack:
if stack[-1] in '(*':
break
operator, left_operand = stack[-1], stack[-2]
stack = stack[:-2]
if operator == '+':
num = left_operand + num
stack.append(num)
return stack
def calculate(expression: str, advance: bool) -> int:
stack = []
ix = 0
while ix < len(expression):
if expression[ix] in '1234567890':
num = int(expression[ix])
if advance:
stack = perform_advance_operation(stack, num, close_bracket=False)
else:
stack = perform_operation(stack, num)
elif expression[ix] in '*+(':
stack.append(expression[ix])
elif expression[ix] in ')':
num = int(stack[-1])
if advance:
stack = stack[:-1]
stack = perform_advance_operation(stack, num, close_bracket=True)
else:
stack = stack[:-2] # stack[-2] must be "("
stack = perform_operation(stack, num)
ix += 1
if advance and (len(stack) > 1):
stack = perform_advance_operation(stack[:-1], int(stack[-1]), close_bracket=True)
return int(stack[0]) # should only be 1 value left at the end
def part1(expressions):
ans = 0
for expression in expressions:
ans += calculate(expression, advance=False)
return ans
def part2(expressions):
ans = 0
for expression in expressions:
ans += calculate(expression, advance=True)
return ans
with open('input.txt') as f:
inputs = [
expression
for expression in f.read().splitlines()
]
print(part1(inputs))
print(part2(inputs)) |
17,730 | 26a225fbf6b4b6f1bc6d7e68fb2cfaeda361f33a | from django.shortcuts import HttpResponse, redirect, render
from django.contrib.auth.decorators import login_required, user_passes_test
from .models import *
from xlsxwriter.workbook import Workbook
import csv
# -------------- user functions---------------------
def is_sub_admin(user):
try:
return UserDepartmentMapping.objects.get(User=user).Is_Sub_Admin
except:
return False
def get_user_dep(user):
try:
return UserDepartmentMapping.objects.get(User=user).Department
except:
return -1
def is_authorized(user):
dep = get_user_dep(user)
# print(dep)
if dep == -1:
return False
else:
return True
def get_lab_cost(lab):
cost = 0
lab = Laboratory.objects.get(id=lab)
comps = Computer.objects.filter(Location=lab.id)
epq = Equipment.objects.filter(Location=lab.id)
for ls in [comps, epq]:
for item in ls:
purch = item.Invoice
total = purch.Total_Cost_With_VAT
total_comp = len(Computer.objects.filter(Invoice=purch.id))
total_epq = len(Equipment.objects.filter(Invoice=purch.id))
total_soft = len(Software.objects.filter(Invoice=purch.id))
cost += total / (total_comp+total_epq+total_soft)
return cost
# --------------csv---------------------
def writeRow(obj, last_row, row):
for i in range(len(row)):
obj.write(last_row, i, row[i])
@login_required
@user_passes_test(is_authorized, login_url='not_allowed')
def getlabRep(request, lab, lab_attrs, epq_l, epq_attrs, comp_l, comp_attrs, extra):
extra_attrs = ['Invoice', 'Supplier_Info', 'Date_YYYYMMDD',
'GI_No', 'Rate_With_VAT', 'Total_Cost_With_VAT']
epq_attrs = epq_attrs[:-1]
response = HttpResponse(
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = "attachment; filename=lab Details.xlsx"
book = Workbook(response, {'in_memory': True})
sheet = book.add_worksheet('test')
# sheet.write(0, 0, 'Hello, world!')
cell_format = book.add_format({'bold': True})
cell_format.set_align('center')
cell_format.set_valign('center')
c = 0
sheet.merge_range(
'A1:O1', 'K. J. SOMAIYA INSTITUTE OF ENGINEERING AND INFORMATION TECHNOLOGY Sion, Mumbai - 400022.', cell_format)
sheet.merge_range(
'A2:O2', str(lab.Department), cell_format)
sheet.merge_range(
'A3:O3', 'lab :' + lab.Name, cell_format)
sheet.merge_range(
'A4:O4', 'Epuipmets in Lab', cell_format)
writeRow(sheet, 4, epq_attrs + extra_attrs)
c = 5
# looping for both comp and epq
for one_ls in [comp_l, epq_l]:
last = None
for epq in one_ls:
# create the data row
ls = []
for epq_attr in epq_attrs:
ls.append(getattr(epq, epq_attr))
# print data row
if last == epq.Invoice:
writeRow(sheet, c, [str(i) for i in ls])
col = 9
for e in [getattr(epq.Invoice, attr) for attr in extra_attrs]:
sheet.merge_range(last_empty, col, c, col, e)
col += 1
else:
last_empty = c
writeRow(sheet, c, [
str(i) for i in ls + [getattr(epq.Invoice, attr) for attr in extra_attrs]])
c += 1
last = epq.Invoice
c += 1
# printing infoat bottom
for row in [
['Total Rs:', get_lab_cost(lab.id)],
['Lab Incharge :', lab.Lab_Incharge],
['Lab Assistant 1 :', lab.Lab_Assistant_1],
['Lab Assistant 2 :', lab.Lab_Assistant_2]
]:
writeRow(sheet, c, row)
c += 1
book.close()
return response
@ login_required
@ user_passes_test(is_authorized, login_url='not_allowed')
def getPurchaseRep(request, purch, purch_attrs, epq_l, epq_attrs, comp_l, comp_attrs, soft_l, soft_attrs):
filename = purch.Invoice + ' detail.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=' + filename
writer = csv.writer(response)
writer.writerow(['Invoice :', purch.Invoice])
writer.writerow('')
writer.writerow(['Invoice Detail'])
writer.writerow('')
for purch_attr in purch_attrs:
ls = [purch_attr]
ls.append(getattr(purch, purch_attr))
writer.writerow(ls)
writer.writerow('')
writer.writerow(['Epuipmets in Invoice'])
writer.writerow('')
writer.writerow(epq_attrs)
for epq in epq_l:
ls = []
for epq_attr in epq_attrs:
ls.append(getattr(epq, epq_attr))
writer.writerow(ls)
writer.writerow('')
writer.writerow(['Computers in Invoice'])
writer.writerow('')
writer.writerow(comp_attrs)
for comp in comp_l:
ls = []
for comp_attr in comp_attrs:
ls.append(getattr(comp, comp_attr))
writer.writerow(ls)
writer.writerow('')
writer.writerow(['Software in Invoice'])
writer.writerow('')
writer.writerow(soft_attrs)
for soft in soft_l:
ls = []
for soft_attr in soft_attrs:
ls.append(getattr(soft, soft_attr))
writer.writerow(ls)
return response
# -------------------common function --------------
@ login_required
@ user_passes_test(is_authorized, login_url='not_allowed')
def DataListView(request, Obj, attr_names, table_name, detail_url, create_url,
):
dep = get_user_dep(request.user)
try:
data_list = Obj.objects.filter(Department=dep).order_by('-id')
except:
data_list = Obj.objects.order_by('-id')
for i in data_list:
print(i)
context = {
'data_list': data_list,
'attr_names': attr_names,
'detail_url': detail_url,
'create_url': create_url,
'table_name': table_name,
}
return render(request, 'repo/common_table.html', context)
@ login_required
@ user_passes_test(is_authorized, login_url='not_allowed')
@ user_passes_test(is_sub_admin, login_url='not_allowed')
def DataCreateView(request, dataForm, redirect_url, initial={}):
form = dataForm(initial=initial)
if request.method == 'POST':
form = dataForm(request.POST)
if form.is_valid():
form.save()
return redirect(redirect_url)
context = {'form': form}
return render(request, 'repo/create.html', context)
@ login_required
@ user_passes_test(is_authorized, login_url='not_allowed')
@ user_passes_test(is_sub_admin, login_url='not_allowed')
def DataUpdateView(request, num, Obj, dataForm, redirect_url):
obj = Obj.objects.get(id=num)
form = dataForm(instance=obj)
if request.method == 'POST':
form = dataForm(request.POST, instance=obj)
if form.is_valid():
form.save()
return redirect(redirect_url, num)
context = {'form': form}
return render(request, 'repo/create.html', context)
@ login_required
@ user_passes_test(is_authorized, login_url='not_allowed')
@ user_passes_test(is_sub_admin, login_url='not_allowed')
def DataDeleteView(request, num, Obj, redirect_url):
obj = Obj.objects.get(id=num)
if request.method == 'POST':
obj.delete()
return redirect(redirect_url)
context = {'item': obj}
return render(request, 'repo/delete.html', context)
|
17,731 | 80a68b772d7332db626d17884b5e225e16a2e85d | # Generated by Django 2.2.6 on 2019-10-09 18:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('s3objects', '0004_auto_20191009_1058'),
]
operations = [
migrations.AddField(
model_name='s3objects',
name='file',
field=models.FileField(default=None, upload_to=''),
preserve_default=False,
),
]
|
17,732 | a57228cc192e513ee98a3f20d862e27030585384 | import pytest
import numpy as np
from scipy import linalg
import pandas as pd
from linkalman.core import Filter
from copy import deepcopy
from linkalman.core.utils import *
# Test _joseph_form
def test_joseph_form(ft_ar1, theta_ar1, Yt_1d):
"""
Test normal run
"""
kf = Filter(ft_ar1, Yt_1d, for_smoother=True)
L = np.array([[2, 3], [4, 5]])
P = np.array([[3, 4], [4, 5]])
KRK = np.ones([2, 2])
result = kf._joseph_form(L, P, KRK=KRK)
expected_result = np.array([[106, 188], [188, 334]])
np.testing.assert_array_equal(result, expected_result)
# Test init
def test_init_attr_input(ft_mvar, theta_mvar, Yt_mvar, Xt_mvar):
"""
Test normal run
"""
kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)
kf.init_attr(theta_mvar)
assert len(kf.L_star_t) == len(Yt_mvar) and \
len(kf.L_star_t[0]) == Yt_mvar[0].shape[0]
def test_init_attr_diffuse(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):
"""
Test if init_attr for diffuse
"""
kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)
kf.init_attr(theta_mvar_diffuse)
assert kf.q == 1 and \
len(kf.L0_t[0]) == Yt_mvar[0].shape[0]
# Test _LDL
def test_LDL(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):
"""
Test normal run
"""
kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)
kf.init_attr(theta_mvar_diffuse)
Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(0)
assert kf.n_t[0] == 3
R_t_move = np.array([[3, 2, 1],
[2, 4, 3],
[1, 3, 6]])
L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move)
L_inv_expected, _ = linalg.lapack.dtrtri(
L_t_expected, lower=True)
np.testing.assert_array_equal(L_t, L_t_expected)
np.testing.assert_array_equal(R_t, R_t_expected)
Y_t_expected = linalg.pinv(L_t_expected).dot(
np.array([1, 2, 2.1]).reshape(-1, 1))
np.testing.assert_array_almost_equal(Y_t, Y_t_expected)
H_t_expected = L_inv_expected.dot(
np.array([1, 2, 2.4]).reshape(-1, 1))
np.testing.assert_array_almost_equal(H_t, H_t_expected)
expected_partitioned_index = np.array([0, 1, 2])
np.testing.assert_array_equal(kf.partitioned_index[0],
expected_partitioned_index)
def test_LDL_first_missing(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):
"""
Test when first measurement is missing
"""
kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)
kf.init_attr(theta_mvar_diffuse)
Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(1)
assert kf.n_t[1] == 2
R_t_move = np.array([[4, 3, 2],
[3, 6, 1],
[2, 1, 3]])
L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move)
L_inv_expected, _ = linalg.lapack.dtrtri(
L_t_expected, lower=True)
np.testing.assert_array_equal(L_t, L_t_expected)
np.testing.assert_array_equal(R_t, R_t_expected)
Y_t_expected = linalg.pinv(L_t_expected).dot(
np.array([2.2, 3, 0]).reshape(-1, 1))
np.testing.assert_array_almost_equal(Y_t, Y_t_expected)
H_t_expected = L_inv_expected.dot(
np.array([2, 2.4, 1]).reshape(-1, 1))
np.testing.assert_array_almost_equal(H_t, H_t_expected)
expected_partitioned_index = np.array([1, 2, 0])
np.testing.assert_array_equal(kf.partitioned_index[1],
expected_partitioned_index)
def test_LDL_full_missing(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):
"""
Test when all measurements are missing
"""
kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)
kf.init_attr(theta_mvar_diffuse)
Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(2)
assert kf.n_t[2] == 0
R_t_move = np.array([[3, 2, 1],
[2, 4, 3],
[1, 3, 6]])
L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move)
L_inv_expected, _ = linalg.lapack.dtrtri(
L_t_expected, lower=True)
np.testing.assert_array_equal(L_t, L_t_expected)
np.testing.assert_array_equal(R_t, R_t_expected)
Y_t_expected = linalg.pinv(L_t_expected).dot(
np.array([0, 0, 0]).reshape(-1, 1))
np.testing.assert_array_almost_equal(Y_t, Y_t_expected)
H_t_expected = L_inv_expected.dot(
np.array([1, 2, 2.4]).reshape(-1, 1))
np.testing.assert_array_almost_equal(H_t, H_t_expected)
expected_partitioned_index = np.array([0, 1, 2])
np.testing.assert_array_equal(kf.partitioned_index[2],
expected_partitioned_index)
def test_LDL_middle_missing(ft_mvar, theta_mvar_diffuse, Yt_mvar, Xt_mvar):
"""
Test when middle measurement is missing
"""
kf = Filter(ft_mvar, Yt_mvar, Xt_mvar, for_smoother=True)
kf.init_attr(theta_mvar_diffuse)
Y_t, H_t, D_t, R_t, L_t, L_inv = kf._LDL(3)
assert kf.n_t[3] == 2
R_t_move = np.array([[3, 1, 2],
[1, 6, 3],
[2, 3, 4]])
L_t_expected, R_t_expected, _ = linalg.ldl(R_t_move)
L_inv_expected, _ = linalg.lapack.dtrtri(
L_t_expected, lower=True)
np.testing.assert_array_equal(L_t, L_t_expected)
np.testing.assert_array_equal(R_t, R_t_expected)
Y_t_expected = linalg.pinv(L_t_expected).dot(
np.array([2, 3.2, 0]).reshape(-1, 1))
np.testing.assert_array_almost_equal(Y_t, Y_t_expected)
H_t_expected = L_inv_expected.dot(
np.array([1, 2.4, 2]).reshape(-1, 1))
np.testing.assert_array_almost_equal(H_t, H_t_expected)
expected_partitioned_index = np.array([0, 2, 1])
np.testing.assert_array_equal(kf.partitioned_index[3],
expected_partitioned_index)
# Test sequential_update
def test_sequential_update_uni(ft_rw_1, theta_rw, Yt_1d, Xt_1d):
"""
Test normal run in univariate case
"""
t = 0
index = 1
ob = index - 1
kf = Filter(ft_rw_1, Yt_1d, Xt_1d, for_smoother=True)
kf.init_attr(theta_rw)
kf._sequential_update(t)
K = kf.P_star_t[t][ob] / (kf.P_star_t[t][ob] + kf.Rt[t][ob][ob])
v = kf.Yt[t][ob] - kf.xi_t[t][ob] - kf.Dt[t][ob].dot(kf.Xt[t])
expected_xi_t_11 = kf.xi_t[t][ob] + K * v
expected_P_t_11 = kf.P_star_t[t][ob].dot(kf.Rt[t][ob][ob]) / (
kf.P_star_t[t][ob] + kf.Rt[t][ob][ob])
expected_P_t1_0 = kf.Ft[t].dot(expected_P_t_11).dot(
kf.Ft[t]) + kf.Qt[t]
expected_xi_t1_0 = kf.Ft[t].dot(expected_xi_t_11) + \
kf.Bt[t].dot(kf.Xt[t])
np.testing.assert_array_almost_equal(expected_xi_t_11,
kf.xi_t[t][1])
np.testing.assert_array_almost_equal(expected_P_t_11,
kf.P_star_t[t][1])
np.testing.assert_array_almost_equal(expected_P_t1_0,
kf.P_star_t[t+1][0])
np.testing.assert_array_almost_equal(expected_xi_t1_0,
kf.xi_t[t+1][0])
def test_sequential_update_uni_missing(
ft_rw_1, theta_rw, Yt_1d, Xt_1d):
"""
Test run in univariate case with missing y
"""
t = 1
index = 1
ob = index - 1
kf = Filter(ft_rw_1, Yt_1d, Xt_1d, for_smoother=True)
kf.init_attr(theta_rw)
for t_ in range(t+1):
kf._sequential_update(t_)
K = kf.P_star_t[t][ob] / (kf.P_star_t[t][ob] + kf.Rt[t][ob][ob])
v = kf.Yt[t][ob] - kf.xi_t[t][ob] - kf.Dt[t][ob].dot(kf.Xt[t])
expected_xi_t_11 = np.array([[np.nan]])
expected_P_t_11 = np.zeros((1, 1)) * np.nan
expected_P_t1_0 = kf.Ft[t].dot(kf.P_star_t[t][0]).dot(
kf.Ft[t]) + kf.Qt[t]
expected_xi_t1_0 = kf.Ft[t].dot(kf.xi_t[t][0]) + \
kf.Bt[t].dot(kf.Xt[t])
np.testing.assert_array_equal(expected_xi_t_11, kf.xi_t[t][1])
np.testing.assert_array_equal(expected_P_t_11, kf.P_star_t[t][1])
np.testing.assert_array_almost_equal(expected_P_t1_0,
kf.P_star_t[t+1][0])
np.testing.assert_array_almost_equal(expected_xi_t1_0,
kf.xi_t[t+1][0])
def test_sequential_update_mvar_full_obs(ft_ar2_mvar_kw, theta_ar2_mvar,
Yt_ar2_mvar, Xt_ar2_mvar):
"""
Test normal run in multi-variate case full measurements
"""
t = 0
kf = Filter(ft_ar2_mvar_kw, Yt_ar2_mvar, Xt_ar2_mvar, for_smoother=True)
kf.init_attr(theta_ar2_mvar)
kf._sequential_update(t)
Mt = kf.ft(kf.theta, kf.T, x_0=Xt_ar2_mvar[0])
Ht = Mt['Ht'][t]
Bt = Mt['Bt'][t]
Dt = Mt['Dt'][t]
Ft = Mt['Ft'][t]
Qt = Mt['Qt'][t]
Rt = Mt['Rt'][t]
Upsilon = Ht.dot(kf.P_star_t[t][0]).dot(Ht.T) + Rt
K = kf.P_star_t[t][0].dot(Mt['Ht'][t].T).dot(linalg.pinvh(Upsilon))
v = kf.Yt[t] - Ht.dot(kf.xi_t[t][0]) - Dt.dot(kf.Xt[t])
expected_xi_t1_0 = Ft.dot(kf.xi_t[t][0] + K.dot(v)) + Bt.dot(kf.Xt[t])
P_t_0 = kf.P_star_t[t][0]
P_t_t = P_t_0 - P_t_0.dot(Ht.T).dot(linalg.pinvh(Upsilon)).dot(
Ht).dot(P_t_0)
expected_P_t1_0 = Ft.dot(P_t_t).dot(Ft.T) + Qt
np.testing.assert_array_almost_equal(expected_P_t1_0,
kf.P_star_t[t+1][0])
np.testing.assert_array_almost_equal(expected_xi_t1_0,
kf.xi_t[t+1][0])
def test_sequential_update_mvar_missing_middle(ft_ar2_mvar_kw, theta_ar2_mvar,
Yt_ar2_mvar, Xt_ar2_mvar):
"""
Test normal run in multi-variate case missing middle measurements
"""
t = 1
kf = Filter(ft_ar2_mvar_kw, Yt_ar2_mvar, Xt_ar2_mvar, for_smoother=True)
kf.init_attr(theta_ar2_mvar)
for t_ in range(t+1):
kf._sequential_update(t_)
Mt = kf.ft(kf.theta, kf.T, x_0=Xt_ar2_mvar[0])
Ht = Mt['Ht'][t][[0, 2]]
Bt = Mt['Bt'][t]
Dt = Mt['Dt'][t][[0, 2]]
Ft = Mt['Ft'][t]
Qt = Mt['Qt'][t]
Rt = Mt['Rt'][t][[0, 2]][:,[0, 2]]
Upsilon = Ht.dot(kf.P_star_t[t][0]).dot(Ht.T) + Rt
K = kf.P_star_t[t][0].dot(Ht.T).dot(linalg.pinvh(Upsilon))
v = kf.Yt[t][[0, 1]] - Ht.dot(kf.xi_t[t][0]) - Dt.dot(kf.Xt[t])
expected_xi_t1_0 = Ft.dot(kf.xi_t[t][0] + K.dot(v)) + Bt.dot(kf.Xt[t])
P_t_0 = kf.P_star_t[t][0]
P_t_t = P_t_0 - P_t_0.dot(Ht.T).dot(linalg.pinvh(Upsilon)).dot(
Ht).dot(P_t_0)
expected_P_t1_0 = Ft.dot(P_t_t).dot(Ft.T) + Qt
np.testing.assert_array_almost_equal(expected_P_t1_0,
kf.P_star_t[t+1][0])
np.testing.assert_array_almost_equal(expected_xi_t1_0,
kf.xi_t[t+1][0])
def test_sequential_update_mvar_all_missing(ft_ar2_mvar_kw, theta_ar2_mvar,
Yt_ar2_mvar, Xt_ar2_mvar):
"""
Test normal run in multi-variate case missing all measurements
"""
t = 2
kf = Filter(ft_ar2_mvar_kw, Yt_ar2_mvar, Xt_ar2_mvar, for_smoother=True)
kf.init_attr(theta_ar2_mvar)
for t_ in range(t+1):
kf._sequential_update(t_)
Mt = kf.ft(kf.theta, kf.T, x_0=Xt_ar2_mvar[0])
Bt = Mt['Bt'][t]
Ft = Mt['Ft'][t]
Qt = Mt['Qt'][t]
expected_xi_t1_0 = Ft.dot(kf.xi_t[t][0]) + Bt.dot(kf.Xt[t])
P_t_0 = kf.P_star_t[t][0]
P_t_t = P_t_0
expected_P_t1_0 = Ft.dot(P_t_t).dot(Ft.T) + Qt
np.testing.assert_array_almost_equal(expected_P_t1_0,
kf.P_star_t[t+1][0])
np.testing.assert_array_almost_equal(expected_xi_t1_0,
kf.xi_t[t+1][0])
def test_sequential_update_mvar_missing_first(ft_ar2_mvar_kw, theta_ar2_mvar,
Yt_ar2_mvar, Xt_ar2_mvar):
"""
Test normal run in multi-variate case missing middle measurements
"""
t = 3
kf = Filter(ft_ar2_mvar_kw, Yt_ar2_mvar, Xt_ar2_mvar, for_smoother=True)
kf.init_attr(theta_ar2_mvar)
for t_ in range(t+1):
kf._sequential_update(t_)
Mt = kf.ft(kf.theta, kf.T, x_0=Xt_ar2_mvar[0])
Ht = Mt['Ht'][t][[1, 2]]
Bt = Mt['Bt'][t]
Dt = Mt['Dt'][t][[1, 2]]
Ft = Mt['Ft'][t]
Qt = Mt['Qt'][t]
Rt = Mt['Rt'][t][[1, 2]][:,[1, 2]]
Upsilon = Ht.dot(kf.P_star_t[t][0]).dot(Ht.T) + Rt
K = kf.P_star_t[t][0].dot(Ht.T).dot(linalg.pinvh(Upsilon))
v = kf.Yt[t][[0, 1]] - Ht.dot(kf.xi_t[t][0]) - Dt.dot(kf.Xt[t])
expected_xi_t_nt = kf.xi_t[t][0] + K.dot(v)
P_t_0 = kf.P_star_t[t][0]
P_t_t = P_t_0 - P_t_0.dot(Ht.T).dot(linalg.pinvh(Upsilon)).dot(
Ht).dot(P_t_0)
expected_P_t_nt = P_t_t
np.testing.assert_array_almost_equal(expected_P_t_nt,
kf.P_star_t[t][kf.n_t[t]])
np.testing.assert_array_almost_equal(expected_xi_t_nt,
kf.xi_t[t][kf.n_t[t]])
# Test sequential_update_diffuse
def test_sequential_update_diffuse_missing(ft_rw_1_diffuse, theta_rw,
Yt_1d_missing, Xt_1d):
"""
Test first missing
"""
t = 0
kf = Filter(ft_rw_1_diffuse, Yt_1d_missing, Xt_1d, for_smoother=True)
kf.init_attr(theta_rw)
for t_ in range(t+1):
kf._sequential_update_diffuse(t_)
e_P_inf_t1_0 = np.array([[1]])
e_xi_t1_0 = np.array([[0]])
np.testing.assert_array_almost_equal(e_P_inf_t1_0, kf.P_inf_t[t+1][0])
np.testing.assert_array_almost_equal(e_xi_t1_0, kf.xi_t[t+1][0])
def test_sequential_update_diffuse(ft_rw_1_diffuse, theta_rw,
Yt_1d_missing, Xt_1d):
"""
Test normal run
"""
t = 1
kf = Filter(ft_rw_1_diffuse, Yt_1d_missing, Xt_1d, for_smoother=True)
kf.init_attr(theta_rw)
for t_ in range(t+1):
kf._sequential_update_diffuse(t_)
e_P_inf_t1_0 = np.array([[0]])
e_P_star_t1_0 = kf.Rt[0] + kf.Qt[0]
e_xi_t1_0 = kf.Yt[1]
np.testing.assert_array_almost_equal(e_P_inf_t1_0, kf.P_inf_t[t+1][0])
np.testing.assert_array_almost_equal(e_xi_t1_0, kf.xi_t[t+1][0])
np.testing.assert_array_almost_equal(e_P_star_t1_0, kf.P_star_t[t+1][0])
def test_sequential_update_diffuse_ll_1d(ft_ll_1d_diffuse,
theta_ll_1d_diffuse, Yt_1d_full):
"""
Test local linear models from chapter 5 of Koopman and Durbin (2012)
"""
t = 3
kf = Filter(ft_ll_1d_diffuse, Yt_1d_full, for_smoother=True)
kf.init_attr(theta_ll_1d_diffuse)
for t_ in range(t):
kf._sequential_update_diffuse(t_)
# Test period 0 result
q1 = theta_ll_1d_diffuse[0] / theta_ll_1d_diffuse[2]
q2 = theta_ll_1d_diffuse[1] / theta_ll_1d_diffuse[2]
e_P_inf_t1_0 = np.ones([2, 2])
e_P_star_t1_0 = np.array([[1 + q1, 0], [0, q2]]) * theta_ll_1d_diffuse[2]
e_xi_t1_0 = np.array([[Yt_1d_full[0][0]], [0]])
np.testing.assert_array_almost_equal(e_P_inf_t1_0, kf.P_inf_t[1][0])
np.testing.assert_array_almost_equal(e_xi_t1_0, kf.xi_t[1][0])
np.testing.assert_array_almost_equal(e_P_star_t1_0, kf.P_star_t[1][0])
# Test period 1 result
e_P_inf_t1_0 = np.zeros([2, 2])
e_P_star_t1_0 = np.array([[5 + 2 * q1 + q2, 3 + q1 + q2],
[3 + q1 + q2, 2 + q1 + 2 * q2]]) * \
theta_ll_1d_diffuse[2]
y2 = Yt_1d_full[1][0][0]
y1 = Yt_1d_full[0][0][0]
e_xi_t1_0 = np.array([[2 * y2 - y1], [y2 - y1]])
np.testing.assert_array_almost_equal(e_P_inf_t1_0, kf.P_inf_t[2][0])
np.testing.assert_array_almost_equal(e_xi_t1_0, kf.xi_t[2][0])
np.testing.assert_array_almost_equal(e_P_star_t1_0, kf.P_star_t[2][0])
# Test period 2 result, should return same result as _sequential_update()
P_inf_t1_0 = kf.P_inf_t[3][0].copy()
P_star_t1_0 = kf.P_star_t[3][0].copy()
xi_t1_0 = kf.xi_t[3][0].copy()
kf._sequential_update(2)
np.testing.assert_array_almost_equal(P_inf_t1_0, np.zeros([2, 2]))
np.testing.assert_array_almost_equal(xi_t1_0, kf.xi_t[3][0])
np.testing.assert_array_almost_equal(P_star_t1_0, kf.P_star_t[3][0])
def test_sequential_update_diffuse_ll_Upsilon_inf0(ft_ll_mvar_diffuse,
theta_ll_mvar_diffuse, Yt_mvar_diffuse):
"""
For ll model, only measurements across time can reduce rank of P_inf_t
"""
t = 1
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse, for_smoother=True)
kf.init_attr(theta_ll_mvar_diffuse)
for t_ in range(t):
kf._sequential_update_diffuse(t_)
# Test period 0 result
e_P_inf_t1_0 = np.ones([2, 2])
expected_q = 1
np.testing.assert_array_almost_equal(e_P_inf_t1_0, kf.P_inf_t[1][0])
assert expected_q == kf.q
# Test update when Upsilon_inf = 0
index = 2
ob = index - 1
t = 0
l_t, _, _ = linalg.ldl(kf.Rt[t])
l_inv, _ = linalg.lapack.dtrtri(l_t, lower=True)
R_t = l_inv.dot(kf.Rt[t])
H_t = (l_inv.dot(kf.Ht[t]))[ob:index]
D_t = (l_inv.dot(kf.Dt[t]))[ob:index]
Upsilon = H_t.dot(kf.P_star_t[t][ob]).dot(H_t.T) + R_t[ob][ob]
K = kf.P_star_t[t][ob].dot(H_t.T) / Upsilon
v = l_inv.dot(kf.Yt[t])[ob] - H_t.dot(kf.xi_t[t][ob]) - D_t.dot(kf.Xt[t])
expected_xi_t_11 = kf.xi_t[t][ob] + K * v
expected_P_t_11 = kf.P_star_t[t][ob] - kf.P_star_t[t][ob].dot(
(K.dot(H_t)).T)
expected_P_t1_0 = kf.Ft[t].dot(expected_P_t_11).dot(
kf.Ft[t].T) + kf.Qt[t]
expected_xi_t1_0 = kf.Ft[t].dot(expected_xi_t_11) + \
kf.Bt[t].dot(kf.Xt[t])
np.testing.assert_array_almost_equal(expected_xi_t_11,
kf.xi_t[t][kf.n_t[t]])
np.testing.assert_array_almost_equal(expected_P_t_11,
kf.P_star_t[t][kf.n_t[t]])
np.testing.assert_array_almost_equal(expected_P_t1_0,
kf.P_star_t[t+1][0])
np.testing.assert_array_almost_equal(expected_xi_t1_0,
kf.xi_t[t+1][0])
def test_sequential_update_diffuse_update_multiple_q(ft_q,
theta_ll_mvar_diffuse, Yt_q):
"""
For ll model, only measurements across time can reduce rank of P_inf_t
"""
t = 1
kf = Filter(ft_q, Yt_q, for_smoother=True)
kf.init_attr(theta_ll_mvar_diffuse)
kf._sequential_update_diffuse(0)
assert kf.q == 0
def test_sequential_update_diffuse_ll_equivalent(ft_ll_mvar_diffuse,
ft_ll_mvar_1d, Yt_mvar_diffuse_missing, Yt_mvar_1d,
theta_ll_mvar_diffuse):
"""
Test in the case of misisng values such that at most 1 measurement present
at time t, whether we get same result as 1d case
"""
kf_mvar = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf_mvar.fit(theta_ll_mvar_diffuse)
kf_1d = Filter(ft_ll_mvar_1d, Yt_mvar_1d, for_smoother=True)
kf_1d.fit(theta_ll_mvar_diffuse)
for t_ in range(kf_mvar.T-1):
np.testing.assert_array_almost_equal(kf_1d.P_star_t[t_][0],
kf_mvar.P_star_t[t_][0])
np.testing.assert_array_almost_equal(kf_1d.P_inf_t[t_][0],
kf_mvar.P_inf_t[t_][0])
np.testing.assert_array_almost_equal(kf_1d.xi_t[t_][0],
kf_mvar.xi_t[t_][0])
# Test get_filtered_val
def test_get_filtered_val_not_filtered(ft_ll_mvar_1d, Yt_mvar_diffuse_missing):
"""
Test error message when fit is not run
"""
kf = Filter(ft_ll_mvar_1d, Yt_mvar_diffuse_missing, for_smoother=True)
with pytest.raises(TypeError) as error:
kf.get_filtered_val()
expected_result = 'The Kalman filter object is not fitted yet'
result = str(error.value)
assert result == expected_result
def test_get_filtered_val_missing(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
Test missing measurements handling
"""
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
Yt_filtered, Yt_filtered_cov, _, _ = kf.get_filtered_val()
np.testing.assert_array_equal(kf.Ht[2].dot(kf.xi_t[2][0]), Yt_filtered[2])
def test_get_filtered_val_all_xi(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
Test df with all xi
"""
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
Yt_filtered, Yt_filtered_cov, xi_t, P_t = kf.get_filtered_val()
np.testing.assert_array_equal(xi_t[2], kf.xi_t[2][0])
np.testing.assert_array_equal(P_t[2], np.nan * np.ones(P_t[2].shape))
np.testing.assert_array_equal(P_t[3], kf.P_star_t[3][0])
def test_get_filtered_y_selected_xi(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
Test df with selected xi
"""
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
Yt_filtered, Yt_filtered_cov, xi_t, P_t = kf.get_filtered_val(xi_col=[1])
np.testing.assert_array_equal(xi_t[2], kf.xi_t[2][0][[1]])
np.testing.assert_array_equal(P_t[2], np.nan * np.ones(P_t[2].shape))
np.testing.assert_array_equal(P_t[3], kf.P_star_t[3][0][[1]][:,[1]])
def test_get_filtered_y_no_xi(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
Test df without xi
"""
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
Yt_filtered, Yt_filtered_cov, xi_t, P_t = kf.get_filtered_val(is_xi=False)
assert np.isnan(xi_t[-1])
assert np.isnan(P_t[-1])
# Test get_filtered_state
def test_get_filtered_state_t(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
get values at t < self.T
"""
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
t = kf.T - 1
result = kf.get_filtered_state(t)
expected_result = {'P_star_t': kf.P_star_t[t][0],
'P_inf_t': kf.P_inf_t[t][0],
'xi_t': kf.xi_t[t][0],
'q': 0}
for i in ['P_star_t', 'P_inf_t', 'xi_t']:
np.testing.assert_array_equal(result[i], expected_result[i])
assert result['q'] == expected_result['q']
def test_get_filtered_state_T(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
t = kf.T
result = kf.get_filtered_state(t)
expected_result = {'P_star_t': kf.P_T1,
'P_inf_t': np.zeros([2, 2]),
'xi_t': kf.xi_T1,
'q': 0}
for i in ['P_star_t', 'P_inf_t', 'xi_t']:
np.testing.assert_array_equal(result[i], expected_result[i])
assert result['q'] == expected_result['q']
def test_get_filtered_state_diffuse(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
t = 1
with pytest.raises(ValueError) as error:
result = kf.get_filtered_state(t)
expected_result = 'Diffuse state at time 1'
result = str(error.value)
assert expected_result == result
def test_get_filtered_state_max(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse)
t = 6
with pytest.raises(ValueError) as error:
result = kf.get_filtered_state(t)
expected_result = 'Maximum t allowed is 4'
result = str(error.value)
assert expected_result == result
def test_get_filtered_state_not_fitted(ft_ll_mvar_diffuse,
Yt_mvar_diffuse_missing, theta_ll_mvar_diffuse):
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
t = 6
with pytest.raises(ValueError) as error:
result = kf.get_filtered_state(t)
expected_result = 'Kalman filter is not fitted yet'
result = str(error.value)
assert expected_result == result
# Test override from init_state
def test_over_ride(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
Force a diffuse kalman filter to become regular filter
"""
init_val = {'P_star_t': np.zeros([2, 2]),
'xi_t': 100 * np.ones([2, 1]), 'q': 0}
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
kf.fit(theta_ll_mvar_diffuse, init_state=init_val)
t = 0
result = kf.get_filtered_state(t)
expected_result = {'P_star_t': np.zeros([2, 2]),
'P_inf_t': np.zeros([2, 2]),
'xi_t': 100 * np.ones([2, 1]),
'q': 0}
for i in ['P_star_t', 'P_inf_t', 'xi_t']:
np.testing.assert_array_equal(result[i], expected_result[i])
assert result['q'] == expected_result['q']
def test_over_ride_error_input(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing,
theta_ll_mvar_diffuse):
"""
Raises exception when init_state wrong input
"""
init_val = {'P_star_t': np.zeros([2]),
'xi_t': 100 * np.ones([2, 1]), 'q': 0}
kf = Filter(ft_ll_mvar_diffuse, Yt_mvar_diffuse_missing, for_smoother=True)
with pytest.raises(ValueError) as error:
kf.fit(theta_ll_mvar_diffuse, init_state=init_val)
expected_result = 'User-specified P_star_t does not have 2 dimensions'
result = str(error.value)
assert expected_result == result
|
17,733 | 471afae29f76ade6ae0d4f59dcc2bb6109d4fecc | import io
import logging
from pathlib import Path
import pkgutil
import torch
from torch import nn
import numpy as np
from .defaults import DEVICE
def text_to_id(char):
x = ord(char)
return x + 2 if x <= 127 else 1
def id_to_text(x):
return chr(x - 2) if (x - 2) <= 127 and x > 1 else "X"
def print_sentence_and_label(sentence, label):
for char_id, label in zip(sentence, label):
print(id_to_text(char_id), int(label[0]), int(label[1]))
def label_tokens(tokenized_p):
text = ""
token_labels, sentence_labels = [], []
for sentence in tokenized_p:
for token in sentence:
raw_token = token.text + token.whitespace
text += raw_token
token_labels += [0] * len(raw_token)
sentence_labels += [0] * len(raw_token)
token_labels[-1] = 1.0
sentence_labels[-1] = 1.0
return text, np.stack([token_labels, sentence_labels], -1)
def store_model(model, store_directory):
store_directory = Path(store_directory)
store_directory.mkdir(exist_ok=True, parents=True)
sample = torch.zeros([1, 100])
# model is trained with fp16, so it can be safely quantized to 16 bit
# CPU model is quantized to 8 bit, with minimal loss in accuracy
# according to tests in train.ipynb
quantized_model = torch.quantization.quantize_dynamic(
model.float().cpu(), {nn.LSTM, nn.Linear}, dtype=torch.qint8
)
traced = torch.jit.trace(quantized_model, sample)
traced.save(str(store_directory / "ts_cpu.pt"))
if torch.cuda.is_available():
traced = torch.jit.trace(model.half().cuda(), sample.cuda())
traced.save(str(store_directory / "ts_cuda.pt"))
else:
logging.warn(
"CUDA is not available. CUDA version of model could not be stored."
)
import tensorflowjs as tfjs # noqa: F401
tfjs.converters.save_keras_model(
model.get_keras_equivalent(),
str(store_directory / "tfjs_model"),
quantization_dtype=np.uint8,
)
def _get_filename(device):
filename = (
"ts_cpu.pt"
if device == torch.device("cpu") or not torch.cuda.is_available()
else "ts_cuda.pt"
)
return filename
def load_provided_model(name, device=DEVICE):
bin_data = pkgutil.get_data(__package__, f"data/{name}/{_get_filename(device)}")
buffer = io.BytesIO(bin_data)
return torch.jit.load(buffer)
def load_model(store_directory, device=DEVICE):
full_path = Path(store_directory) / _get_filename(device)
return torch.jit.load(str(full_path))
|
17,734 | c2462c0c5464ea3bb0d30f93f35bfd75e7ef9182 | """Support for Genius Hub water_heater devices."""
import logging
from homeassistant.components.water_heater import (
WaterHeaterDevice,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE)
from homeassistant.const import (
ATTR_TEMPERATURE, STATE_OFF, TEMP_CELSIUS)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN
STATE_AUTO = 'auto'
STATE_MANUAL = 'manual'
_LOGGER = logging.getLogger(__name__)
GH_HEATERS = ['hot water temperature']
GH_SUPPORT_FLAGS = \
SUPPORT_TARGET_TEMPERATURE | \
SUPPORT_OPERATION_MODE
# HA does not have SUPPORT_ON_OFF for water_heater
GH_MAX_TEMP = 80.0
GH_MIN_TEMP = 30.0
# Genius Hub HW supports only Off, Override/Boost & Timer modes
HA_OPMODE_TO_GH = {
STATE_OFF: 'off',
STATE_AUTO: 'timer',
STATE_MANUAL: 'override',
}
GH_STATE_TO_HA = {
'off': STATE_OFF,
'timer': STATE_AUTO,
'footprint': None,
'away': None,
'override': STATE_MANUAL,
'early': None,
'test': None,
'linked': None,
'other': None,
}
GH_STATE_ATTRS = ['type', 'override']
async def async_setup_platform(hass, hass_config, async_add_entities,
discovery_info=None):
"""Set up the Genius Hub water_heater entities."""
client = hass.data[DOMAIN]['client']
entities = [GeniusWaterHeater(client, z)
for z in client.hub.zone_objs if z.type in GH_HEATERS]
async_add_entities(entities)
class GeniusWaterHeater(WaterHeaterDevice):
"""Representation of a Genius Hub water_heater device."""
def __init__(self, client, boiler):
"""Initialize the water_heater device."""
self._client = client
self._boiler = boiler
self._operation_list = list(HA_OPMODE_TO_GH)
async def async_added_to_hass(self):
"""Run when entity about to be added."""
async_dispatcher_connect(self.hass, DOMAIN, self._refresh)
@callback
def _refresh(self):
self.async_schedule_update_ha_state(force_refresh=True)
@property
def name(self):
"""Return the name of the water_heater device."""
return self._boiler.name
@property
def device_state_attributes(self):
"""Return the device state attributes."""
tmp = self._boiler.__dict__.items()
return {'status': {k: v for k, v in tmp if k in GH_STATE_ATTRS}}
@property
def should_poll(self) -> bool:
"""Return False as the geniushub devices should not be polled."""
return False
@property
def current_temperature(self):
"""Return the current temperature."""
try:
return self._boiler.temperature
except AttributeError:
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._boiler.setpoint
@property
def min_temp(self):
"""Return max valid temperature that can be set."""
return GH_MIN_TEMP
@property
def max_temp(self):
"""Return max valid temperature that can be set."""
return GH_MAX_TEMP
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
return GH_SUPPORT_FLAGS
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._operation_list
@property
def current_operation(self):
"""Return the current operation mode."""
return GH_STATE_TO_HA[self._boiler.mode]
async def async_set_operation_mode(self, operation_mode):
"""Set a new operation mode for this boiler."""
await self._boiler.set_mode(HA_OPMODE_TO_GH[operation_mode])
async def async_set_temperature(self, **kwargs):
"""Set a new target temperature for this boiler."""
temperature = kwargs[ATTR_TEMPERATURE]
await self._boiler.set_override(temperature, 3600) # 1 hour
|
17,735 | e42a8d9a0deff85a92c365d282633f3f6fec8266 | '''
Write a menu-driven program to accept a list of student names
and perform the following:
(a) Search an element using linear search/binary search.
(b) Sort the elements using bubble sort/insertion sort/
selection sort.
Written by Sudipto Ghosh
'''
def linearSearch(list, e):
for l in list:
if e == l:
return True
return False
def binarySearch(list, left, right, e):
if right >= left:
mid = (left + right) // 2
if list[mid] == e:
return mid
elif list[mid] > e:
return binarySearch(list, left, mid - 1, e)
else:
return binarySearch(list, mid + 1, right, e)
else:
return -1
def bubbleSort(list):
list = list.copy()
for i in range(len(list)-1):
for j in range(0, len(list) - i - 1):
if list[j] > list[j + 1]:
list[j], list[j + 1] = list[j + 1], list[j]
return list
def insertionSort(list):
list = list.copy()
for i in range(1, len(list)):
key = list[i]
j = i - 1
while j >= 0 and key < list[j]:
list[j + 1] = list[j]
j -= 1
list[j + 1] = key
return list
def selectionSort(list):
list = list.copy()
for i in range(len(list)):
minimum = i
for j in range(i + 1, len(list)):
if list[minimum] > list[j]:
minimum = j
list[i], list[minimum] = list[minimum], list[i]
return list
def main():
students = []
r = int(input('Enter Number of Students: '))
for i in range(1, r + 1, 1):
name = input(f'Enter Name of Student {i}: ')
students.append(name)
while True:
print('''
MENU
========================
(1) Linear Search
(2) Binary Search
(3) Bubble Sort
(4) Insertion Sort
(5) Selection Sort
(0) Exit
''')
choice = int(input('Enter Choice: '))
if choice == 1:
name = input("Enter Name to Search: ")
if (linearSearch(students, name)):
print("Student Found in List")
else:
print("Student Not Found in List")
elif choice == 2:
name = input("Enter Name to Search: ")
if (binarySearch(bubbleSort(students), 0, r - 1, name) >= 0):
print("Student Found in List")
else:
print("Student Not Found in List")
elif choice == 3:
print('Original List:', students)
print('Sorted List:', bubbleSort(students))
elif choice == 4:
print('Original List:', students)
print('Sorted List:', insertionSort(students))
elif choice == 5:
print('Original List:', students)
print('Sorted List:', selectionSort(students))
elif choice == 0:
break
input('Press any key to continue...')
if __name__ == "__main__":
main()
|
17,736 | ab5f0782c235ac1630fed984c39bd7c1ef1443eb | import getpass
import argparse
import string
import os
def load_data(filepath):
if os.path.isfile(filepath):
with open(filepath, 'r') as file:
return file.read().splitlines()
else:
return []
def check_digits(password):
return any(symbol.isdigit() for symbol in password)
def check_letters(password):
return any(symbol.isalpha() for symbol in password)
def check_case_sensitivity(password):
lower_letter = any(symbol.islower() for symbol in password)
upper_letter = any(symbol.isupper() for symbol in password)
return lower_letter and upper_letter
def check_special_symbols(password):
return any(symbol in string.punctuation for symbol in password)
def count_length_points(password):
bad_length = 5
normal_length = 10
good_length = 15
points = 0
if len(password) < bad_length:
points -= 1
elif normal_length < len(password) <= good_length:
points += 1
elif len(password) > good_length:
points += 2
return points
def check_password_in_blacklist(password, blacklist):
return password in blacklist
def get_password_strength(password, blacklist):
init_strength = 2
return (init_strength +
check_digits(password) +
check_letters(password) +
2 * check_case_sensitivity(password) +
2 * check_special_symbols(password) -
check_password_in_blacklist(password, blacklist) +
count_length_points(password))
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('filepath', help='Path to blacklist file')
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
blacklist = load_data(args.filepath)
if not blacklist:
print('Warning: your blacklist is empty')
password = getpass.getpass('Password:')
print('Password strength is:', get_password_strength(password, blacklist))
|
17,737 | d5aea3b2ac872aadb10e941ec52a2b46c0057862 | # coding: utf-8
# a swift kernel for Jupyter
# copyright Tim Nugent, made available under the MIT License
# see the repository https://github.com/McJones/jupyter-swift-kernel/ for full details
import subprocess, os, shutil, tempfile, re
from ipykernel.kernelbase import Kernel
class SwiftKernel(Kernel):
# Jupiter stuff
implementation = 'Swift'
implementation_version = '1.1.1'
language = 'swift'
language_version = '3.0.2'
language_info = {'mimetype': 'text/plain', 'file_extension': 'swift', 'name': 'swift'}
banner = "Swift kernel"
# my stuff
output = ""
swiftDirectory = tempfile.mkdtemp()
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
errorCode, dump = self.runCode(code)
if errorCode == 0:
if not silent:
stream = {'name':'stdout', 'text':dump}
self.send_response(self.iopub_socket, 'stream', stream)
return {
'status':'ok',
'execution_count':self.execution_count,
'payload':[],
'user_expressions':{}
}
else:
# every example does it like this but this just feels weird
# why does the execution_count increment?!
if not silent:
stream = {
'status' : 'error',
'ename': 'ERROR',
'evalue': 'error',
'traceback': dump
}
self.send_response(self.iopub_socket, 'error', stream)
return {
'status':'error',
'execution_count':self.execution_count,
'ename': 'ERROR',
'evalue': 'error',
'traceback': dump
}
def do_shutdown(self, restart):
# delete the temporary swift file(s) and directory
shutil.rmtree(self.swiftDirectory)
# appends the new text to the swift file
# runs the swift file
# capture all output
# returns the result
def runCode(self, command):
swiftFileLocation = os.path.join(self.swiftDirectory, 'scratch.swift')
canonicalFile = os.path.join(self.swiftDirectory, 'canonical.swift')
# now copy everything from canonical into the scratch
if os.path.isfile(canonicalFile):
shutil.copyfile(canonicalFile, swiftFileLocation)
with open(swiftFileLocation, 'a') as swiftFile:
unicodeCommand = (command + "\n").encode("UTF-8")
swiftFile.write(unicodeCommand)
errorOutput = []
# because who needs warnings, right?!
# queue up mental picture of Holtzman while reading the above comment please
cmd = 'swift -suppress-warnings {0}'.format(swiftFileLocation)
swift = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# handle all valid output
newOutput = swift.stdout.read()
# handle any errors
for line in swift.stderr.readlines():
# to clean up the default error message swift returns
line = re.sub('^.*error: ', '', line)
errorOutput.append(line.rstrip("\n\r"))
retval = swift.wait()
# ran without error
if retval == 0:
# putting the valid code back into the canonical file
shutil.copyfile(swiftFileLocation, canonicalFile)
# returning the result
diff = newOutput[len(self.output):]
self.output = newOutput
return 0, diff
else:
# dumping the dodgy file
os.remove(swiftFileLocation)
# returning the error(s)
return 1, errorOutput
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=SwiftKernel)
|
17,738 | 92325c56d4a0c7cb025883c98dd48c961eb5127d | # coding: utf-8
__all__ = ['Touken']
__author__ = 't-black-msq <t.black.msq@gmail.com>'
from typing import Any, Dict, Tuple
from const import HIGEKIRI, HIZAMARU, TOKU, Status, ToukenInfoKey
from data import DataAccessor
from utils import KatanaInfo
class ToukenStatus(object):
__LG_MESSAGE = '入力された {0} が初期ステータスより{1}です'
__LOWER = '低い'
__GREATER = '高い'
def __init__(self):
self.__level = None
self.__hp_max = None
self.__attack = None
self.__defense = None
self.__mobile = None
self.__back = None
self.__scout = None
self.__loyalties = None
self.__hide = None
def set_all_status(
self,
hp_max: int,
attack: int,
defense: int,
mobile: int,
back: int,
scout: int,
loyalties: int,
hide: int):
if self.__level:
self.__hp_max = hp_max
self.__attack = attack
self.__defense = defense
self.__mobile = mobile
self.__back = back
self.__scout = scout
self.__loyalties = loyalties
self.__hide = hide
else:
print('Set level before setting other status')
def set_status(self, status: Status, value: int) -> bool:
valid = False
if self.__level:
if status is Status.HP_MAX:
if self.__check_level(value):
self.__hp_max = value
valid = True
elif status is Status.ATTACK:
if self.__check_status(
Status.ATTACK, Status.KEY_ATTACK, value):
self.__attack = value
valid = True
elif status is Status.DEFENSE:
if self.__check_status(
Status.DEFENSE, Status.KEY_DEFENSE, value):
self.__defense = value
valid = True
elif status is Status.MOBILE:
if self.__check_status(
Status.MOBILE, Status.KEY_MOBILE, value):
self.__mobile = value
valid = True
elif status is Status.BACK:
if self.__check_status(
Status.BACK, Status.KEY_BACK, value):
self.__back = value
valid = True
elif status is Status.SCOUT:
if self.__check_status(
Status.SCOUT, Status.KEY_SCOUT, value):
self.__scout = value
valid = True
elif status is Status.LOYALTIES:
self.__loyalties = self.__status_i[Status.KEY_LOYALTIES.value]
valid = True
elif status is Status.HIDE:
if self.__check_status(
Status.HIDE, Status.KEY_HIDE, value):
self.__hide = value
valid = True
else:
print('Set level before setting other status')
return valid
def set_level(self, level: int):
if self.__check_level(level):
self.__level = level
return True
return False
def set_limit_status(self, initial: Dict[str, int], max_: Dict[str, int]):
self.__status_i = initial
self.__status_m = max_
def __check_level(self, level: int) -> bool:
if level < 1:
print(self.__LG_MESSAGE.format(Status.LEVEL.value, self.__LOWER))
elif 99 < level:
print(self.__LG_MESSAGE.format(Status.LEVEL.value, self.__GREATER))
return 1 <= level <= 99
def __check_status(self, status: Status, key: Status, val: int) -> bool:
if val < self.__status_i[key.value]:
print(self.__LG_MESSAGE.format(Status.value, self.__LOWER))
elif self.__status_m[key.value] < val:
print(self.__LG_MESSAGE.format(Status.value, self.__GREATER))
return self.__status_i[key.value] <= val <= self.__status_m[key.value]
@property
def level(self) -> int:
return self.__level
@property
def hp_max(self) -> int:
"""生存"""
return self.__hp_max
@property
def attack(self) -> int:
"""打撃"""
return self.__attack
@property
def defense(self) -> int:
"""統率"""
return self.__defense
@property
def mobile(self) -> int:
"""機動"""
return self.__mobile
@property
def back(self) -> int:
"""衝力"""
return self.__back
@property
def scout(self) -> int:
"""偵察"""
return self.__scout
@property
def loyalties(self) -> int:
"""必殺"""
return self.__loyalties
@property
def hide(self) -> int:
"""隠蔽"""
return self.__hide
class Touken(ToukenStatus):
accessor = DataAccessor()
def __init__(self, info: KatanaInfo, level: int = None):
super().__init__()
self.__info = info
if level:
self.set_level(level)
self.parse_info()
def __str__(self) -> str:
return '\n'.join([f'■ 刀帳No: {self.__uid}',
f'■ 刀剣名: {self.__name}{" 特" if self.is_toku else ""}',
f'■ {Status.LEVEL.value}: {self.level}',
f'■ {Status.ATTACK.value}: {self.attack}',
f'■ {Status.DEFENSE.value}: {self.defense}',
f'■ {Status.MOBILE.value}: {self.mobile}',
f'■ {Status.BACK.value}: {self.back}'])
def parse_info(self):
self.__uid = self.__info[ToukenInfoKey.UID.value]
self.__name = self.__info[ToukenInfoKey.NAME.value]
self.__toku_level = self.__info[ToukenInfoKey.TOKU_LEVEL.value]
if self.__check_toku():
self.__initial_status = self.__info[ToukenInfoKey.INITIAL_STATUS_TOKU.value]
self.__max_status = self.__info[ToukenInfoKey.MAX_STATUS_TOKU.value]
else:
self.__initial_status = self.__info[ToukenInfoKey.INITIAL_STATUS.value]
self.__max_status = self.__info[ToukenInfoKey.MAX_STATUS.value]
self.__faction = self.__info[ToukenInfoKey.FACTION.value]
self.__kind = self.__info[ToukenInfoKey.KIND.value]
self.__range = self.__info[ToukenInfoKey.RANGE.value]
self.__rarelity = self.__info[ToukenInfoKey.RARELITY.value]
self.__slot = self.__info[ToukenInfoKey.SLOT.value]
self.set_limit_status(self.__initial_status, self.__max_status)
def __check_toku(self) -> bool:
if self.__uid in HIGEKIRI + HIZAMARU:
return self.__check_genji_brothers()
return self.__toku_level <= self.level
def __check_genji_brothers(self):
if self.__uid in HIGEKIRI:
for uid in HIGEKIRI:
tmp = self.accessor.get_katana(uid)
if self.level < tmp[ToukenInfoKey.TOKU_LEVEL.value]:
if self.__uid != uid:
self.__uid = uid
self.__name = tmp[ToukenInfoKey.NAME.value]
self.__toku_level = tmp[ToukenInfoKey.TOKU_LEVEL.value]
return False
else:
continue
elif self.__uid in HIZAMARU:
for uid in HIZAMARU:
tmp = self.accessor.get_katana(uid)
if self.level < tmp[ToukenInfoKey.TOKU_LEVEL.value]:
if self.__uid != uid:
self.__uid = uid
self.__name = tmp[ToukenInfoKey.NAME.value]
self.__toku_level = tmp[ToukenInfoKey.TOKU_LEVEL.value]
return False
else:
continue
return False
def make_avant_message(self, mode: int) -> str:
return ''.join([self.__uid,
f'{self.level:02d}',
f'{self.attack:03d}',
f'{self.defense:03d}',
f'{self.mobile:03d}',
f'{self.back:03d}',
str(mode)])
@property
def uid(self) -> str:
return self.__uid
@property
def name(self) -> str:
return self.__name
@property
def toku_level(self) -> int:
return self.__toku_level
@property
def rarelity(self) -> int:
return self.__rarelity
@property
def initial_status(self) -> Dict[str, int]:
return self.__initial_status
@property
def max_status(self) -> Dict[str, int]:
return self.__max_status
@property
def is_toku(self) -> bool:
return self.__toku_level <= self.level
|
17,739 | 87c59d12e810b616947c548d75cfada7e88a88b4 | #!/usr/bin/python3
from flask import Flask, render_template, jsonify, send_file
import json
app = Flask(__name__)
@app.route('/api/v1/get_image', methods=['GET','POST'],
strict_slashes=False)
def get_image():
""" Returns an image given id """
response = send_file(tempFileObj, as_attachment=True, attachment_filename='marked_image.png')
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port='8080')
|
17,740 | 932ba5b5113aa0e3d990ee259e3a884ade5f3eb0 | # -*- encoding: utf-8 -*-
'''
@Filename : simulate_settings.py
@Datetime : 2020/09/24 10:21:18
@Author : Joe-Bu
@version : 1.0
'''
NORMAL_ITEMS = ['watertemp', 'pH', 'DO', 'conductivity', 'turbidity']
NORMAL_INDEX = ['codmn', 'nh3n', 'tp', 'tn']
model_params = dict(
savedir = '../../model',
modes = 'predict', # train/predict
index = 'codmn', # TP/TN/NH3N/CODMn
indexs = ['codmn','nh3n','tp','tn'], # TP+TN+NH3N+CODMn
model = 'XGB', # RF/GBRT/XGB
models = ['RF','GBRT','XGB'] # RF+GBRT+XGB
) |
17,741 | e05d31488e3515840004ca664340e381035ff7d8 | class person(object):
species="home sapiens"
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
def rename(self,renamed):
self.name=renamed
print("now my name is {}".format(self.name))
kelly=person("kelly")
joseph=person("joseph")
john_doe=person("john_Doe")
class A(object):
def __init__(self,num):
self.num=num
def __add__(self,other):
return A(self.num +other.num) |
17,742 | cb1d059584e1e3d67943430721a9859ec7c34aaf | import logging
log = logging.getLogger(__name__)
log.debug(f' module loaded')
|
17,743 | ca5f14efe004717fb948f8fc1677b6b1164ab8c1 | #!/usr/bin/python
# https://gist.github.com/shitalmule04/82d2091e2f43cb63029500b56ab7a8cc
import sqlite3 as sql
import csv
from sqlite3 import Error
import os
from test_folders import getDbDir, getTestDir
path = getTestDir()
db_path = getDbDir()
# Connect to database
conn = sql.connect(db_path + 'mydb.db')
try:
# Create Table into database
conn.execute('''CREATE TABLE IF NOT EXISTS Employee(Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\
Name TEXT NOT NULL, Salary INT NOT NULL
);''')
# Insert some values to database
conn.execute('''INSERT INTO Employee(Name, Salary) VALUES('Laxmi', 30000);''')
conn.execute('''INSERT INTO Employee(Name, Salary) VALUES('Prerna', 40000);''')
conn.execute('''INSERT INTO Employee(Name, Salary) VALUES('Shweta', 30000);''')
conn.execute('''INSERT INTO Employee(Name, Salary) VALUES('Soniya', 50000);''')
conn.execute('''INSERT INTO Employee(Name, Salary) VALUES('Priya', 60000);''')
conn.commit()
# To view table data in table format
print("******Employee Table Data*******")
cur = conn.cursor()
cur.execute('''SELECT * FROM Employee''')
rows = cur.fetchall()
for row in rows:
print(row)
# Export data into CSV file
print("Exporting data into CSV............")
cursor = conn.cursor()
cursor.execute("select * from Employee")
with open(path + "employee_data.csv", "w") as csv_file:
csv_writer = csv.writer(csv_file, delimiter="\t")
csv_writer.writerow([i[0] for i in cursor.description])
csv_writer.writerows(cursor)
dir_path = os.getcwd() + '/' + path + "employee_data.csv"
print("Data exported Successfully into {}".format(dir_path))
except Error as e:
print(e)
# Close database connection
finally:
conn.close()
'''
https://wellsr.com/python/convert-csv-to-sqlite-python-and-export-sqlite-to-csv/
https://www.adamsmith.haus/python/answers/how-to-insert-the-contents-of-a-csv-file-into-an-sqlite3-database-in-python
https://www.codegrepper.com/code-examples/sql/csv+to+sqlite+python
https://www.pythontutorial.net/python-basics/python-write-csv-file/
https://realpython.com/python-csv/
https://www.geeksforgeeks.org/python-os-path-isdir-method/
https://appdividend.com/2022/01/13/python-os-path-isdir-function/
'''
|
17,744 | 4628bf721dc2d0166c22e69c4160e0f6873d186c | import re
import click
from flask.cli import with_appcontext
from models import Pizza, catalog
from extensions import db
def upload_to_db(pizzas):
for pizza in pizzas:
for pizza_choice in pizza['choices']:
height_cm, weight_gr = re.findall(r'\d+', pizza_choice['title'])
Pizza.create(
title = pizza.get('title'),
description = pizza.get('description'),
height_cm = int(height_cm),
weight_gr = int(weight_gr),
price = pizza_choice.get('price'),
)
@click.command()
@with_appcontext
def create():
db.create_all()
click.echo('created {db}'.format(db=db.get_binds()))
@click.command()
@with_appcontext
def drop():
db.drop_all()
click.echo('table was dropped')
@click.command()
@with_appcontext
def feed():
click.echo('from catalog got {count} pizzas'.format(count=len(catalog)))
upload_to_db(catalog)
|
17,745 | 4da8b46adbbfd85298c3ac90df5335daa4c7cee0 | import os
import os.path as osp
import numpy as np
from time import strftime, localtime
from easydict import EasyDict as edict
import yaml
__C = edict()
cfg = __C
__C.stop_words_path = 'logs/stop_words_ch.txt'
__C.words_path = 'logs/yuliao.txt'
__C.model_output_path = 'model/train_w2v.model'
__C.train_df_path = 'data/train_data.csv'
__C.test_df_path = 'data/test_data.csv'
__C.top20_path = 'output/top.csv'
#w2v模型训练参数
__C.train_size = 150
__C.train_window = 3
__C.train_min_count = 3
__C.train_workers = 3
|
17,746 | dbcd91cdfa3b881fd82ea4221cfbadfd23c6f197 | ["__builtins__", "__cached__", "__doc__", "__file__", "__loader__", "__name__", "__package__", "__spec__", "convert2TransformMatrix", "datatypes", "getChainTransform", "getChainTransform2", "getClosestPolygonFromTransform", "getDistance2", "getFilteredTransform", "getInterpolateTransformMatrix", "getOffsetPosition", "getPositionFromMatrix", "getRotationFromAxis", "getSymmetricalTransform", "getTransform", "getTransformFromPos", "getTransformLookingAt", "getTranslation", "get_closes_transform", "get_orientation_from_polygon", "get_raycast_translation_from_mouse_click", "interpolate_rotation", "interpolate_scale", "matchWorldTransform", "math", "nodetypes", "om", "omui", "pm", "quaternionDotProd", "quaternionSlerp", "resetTransform", "setMatrixPosition", "setMatrixRotation", "setMatrixScale", "util", "vector"] |
17,747 | 8ed117d0d1cbf439171e65ef77ec4ec564de6107 | from module.camera_manager import CameraManager
from module.camera_manager import TriggerType
from module.camera_manager import AcquisitionMode
from module.camera_manager import AutoExposureMode
from module.camera_manager import AutoGainMode
import argparse
from pathlib import Path
import cv2
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("trigger", default="software", type=str, choices=["software", "hardware"], help="trigger type")
parser.add_argument("--exp", default=20000, type=int, help="exposure time [us]")
parser.add_argument("--gain", default=0, type=int, help="gain [dB]")
parser.add_argument("--save-dir", default=None, type=Path, help="directory to save images")
parser.add_argument("--num-imgs", default=100, type=int, help="number of images to save")
args = parser.parse_args()
if args.save_dir is not None:
args.save_dir.mkdir(parents=True, exist_ok=False)
cam_manager = CameraManager()
if args.trigger == "software":
cam_manager.choose_trigger_type(TriggerType.SOFTWARE)
elif args.trigger == "hardware":
cam_manager.choose_trigger_type(TriggerType.HARDWARE)
cam_manager.turn_on_trigger_mode()
cam_manager.choose_acquisition_mode(AcquisitionMode.CONTINUOUS)
cam_manager.choose_auto_exposure_mode(AutoExposureMode.OFF)
cam_manager.set_exposure_time(args.exp)
cam_manager.choose_auto_gain_mode(AutoGainMode.OFF)
cam_manager.set_gain(args.gain)
cam_manager.start_acquisition()
count = 0
while True:
if args.trigger == "software":
cam_manager.execute_software_trigger()
img = cam_manager.get_next_image()
if img is None:
continue
cv2.imshow("captured image", cv2.resize(img, (1024, 1024)))
if args.save_dir is not None:
cv2.imwrite(str(args.save_dir / "{:0>6}.png".format(count)), img)
if 0 <= cv2.waitKey(3):
break
count += 1
if args.save_dir is not None and args.num_imgs <= count:
break
cam_manager.stop_acquisition()
|
17,748 | 76e48297b1019227a39531da4542b81e1f1c50dd | nums = list(map(int, input().split()))
selection = int(input())
print(nums[selection-1]) |
17,749 | c884f4b0a4398ae32c28bd04ef8d21c9bc18b496 | import networkx as nx
from copy import deepcopy
def sortedtup(a,b):
return (min(a,b),max(a,b))
# Modification of nx.contracted_edge to work with keys in MultiGraphs
def contracted_edge_multi(G, edge, self_loops=True):
if not G.has_edge(*edge):
raise ValueError('Edge {0} does not exist in graph G; cannot contract'
' it'.format(edge))
return contracted_nodes_multi(G, *edge, self_loops=self_loops)
# Modification of nx.contracted_nodes to work with keys in MultiGraphs
# Here G must be a nx.MultiGraph
# TODO: figure out what G.copy() does (aka deepcopy(G)), to figure out where the bug was!!
def contracted_nodes_multi(G, u, v, self_loops=True):
H = G.copy()
if H.is_directed():
in_edges = ((w, u, k, d) for w, x, k, d in G.in_edges(v, keys=True, data=True)
if self_loops or w != u)
out_edges = ((u, w, k, d) for x, w, k, d in G.out_edges(v, keys=True, data=True)
if self_loops or w != u)
new_edges = chain(in_edges, out_edges)
else:
new_edges = ((u, w, k, d) for x, w, k, d in G.edges(v, keys=True, data=True)
if self_loops or w != u)
v_data = H.node[v]
H.remove_node(v)
H.add_edges_from(new_edges)
if 'contraction' in H.node[u]:
H.node[u]['contraction'][v] = v_data
else:
H.node[u]['contraction'] = {v: v_data}
return H
class MinorGraph(nx.MultiGraph):
hidden_edges = []
# Here G is an instance of an nx.MultiGraph
def __init__(self,G=None):
super(self.__class__,self).__init__()
if not G is None:
self.add_nodes_from(G.nodes(data=True))
# See if G is MultiGraph or just graph
try:
self.add_edges_from(G.edges(keys=True, data=True))
except TypeError:
self.add_edges_from(G.edges(data=True))
def add_edge(self,u,v,key=None,attr_dict=None,**attr):
if key is None:
super(self.__class__,self).add_edge(u,v,sortedtup(u,v),attr_dict,**attr)
else:
super(self.__class__,self).add_edge(u,v,key,attr_dict,**attr)
def remove_edge_hidden(self,u,v,key=None):
super(self.__class__,self).remove_edge(u,v,key=key)
self.hidden_edges.append(key)
def get_hidden(self):
return self.hidden_edges
def set_hidden(self, hiddens):
self.hidden_edges = hiddens
def append_hidden(self, hidden):
self.hidden_edges.append(hidden)
def to_string(self):
s = "{Edges : "
s += str(self.edges(keys=True))
s += ", Hidden edges: "
s += str(self.hidden_edges)+ "}"
return s
# Here graph is a MinorGraph
# Precondition: i < j and (i,j) is an edge of G
def get_minor(graph,i,j,key):
contracted_graph = contracted_edge_multi(graph,(i,j),False)
G = MinorGraph(contracted_graph)
G.set_hidden(deepcopy(graph.get_hidden()))
G.append_hidden(key)
return G
def get_bridges(graph):
all_edges = graph.edges(keys=True,data=True)
for e in all_edges:
graph.remove_edge(*e[:-1])
removed_comps = nx.number_connected_components(graph)
graph.add_edge(*e) # Will maintain the original key associated with this edge
if nx.number_connected_components(graph) < removed_comps:
yield e
def remove_bridges(graph):
all_bridges = get_bridges(graph)
for bridge in all_bridges:
graph.remove_edge_hidden(*bridge[:-1])
def get_spanningtrees(graph):
graph_stack = [graph]
spanning_trees = []
while graph_stack:
# print "***", [H.to_string() for H in graph_stack]
G = graph_stack.pop()
remove_bridges(G)
edges_iter = G.edges_iter(data=True,keys=True)
e = next(edges_iter,None) # Know that e will not be a branch
# print "Next edge: ", e
if e is None:
spanning_trees.append(G)
else:
G1 = get_minor(G,*e[:-1]) # Will automatically make deep copy of G
G2 = G.copy()
G2.remove_edge(*e[:-1])
graph_stack += [G1, G2]
return [T.get_hidden() for T in spanning_trees]
|
17,750 | 39ea6048988b828b44ca96fe942a2a4795106bc1 | # -*- coding: utf-8 -*-
# ## CHOICES FOR MODELS
# Choices for Passport model
BOTTLE = 'BOTTLE'
BRIQUETTE = 'BRIQ'
GEL = 'GEL'
GRANULES = 'GRANUL'
DUST = 'DUST'
FLUID = 'FLUID'
PENCIL = 'PENCIL'
GLUE = 'GLUE'
SNARE = 'SNARE'
C_EMULSION = 'C_EMULSION'
PAINT = 'PAINT'
LACQUER = 'LACQUER'
TRAP = 'TRAP'
PASTE = 'PASTE'
PLATES = 'PLATES'
POWDER = 'POWDER'
BAIT = 'BAIT'
CANDLES = 'CANDLES'
SPIRALS = 'SPIRALS'
SPRAY = 'SPRAY'
SUSPENSION = 'SUSPENSION'
PILLS = 'PILLS'
CHUNK = 'CHUNK'
EMULSION = 'EMULSION'
PASSPORT_FORM_CHOICES = (
(BOTTLE, u'Аэрозольные баллоны'),
(BRIQUETTE, u'Брикет'),
(GEL, u'Гель'),
(GRANULES, u'Гранулы'),
(DUST, u'Дуст'),
(FLUID, u'Жидкость (раствор)'),
(PENCIL, u'Карандаш'),
(GLUE, u'Клей'),
(SNARE, u'Клейкие ловушки'),
(C_EMULSION, u'Концентрат эмульсии'),
(PAINT, u'Краска'),
(LACQUER, u'Лак'),
(TRAP, u'Ловушка'),
(PASTE, u'Паста'),
(PLATES, u'Пластины'),
(POWDER, u'Порошок'),
(BAIT, u'Приманка'),
(CANDLES, u'Свечи'),
(SPIRALS, u'Спирали'),
(SPRAY, u'Спрей'),
(SUSPENSION, u'Суспензии'),
(PILLS, u'Таблетки'),
(CHUNK, u'Шашки'),
(EMULSION, u'Эмульсии'),
)
PASSPORT_TARGET_OBJECTS_CHOICES = (
('PARASITE1', u'Блохи'),
('PARASITE2', u'Вши головные'),
('PARASITE3', u'Вши платяные'),
('PARASITE4', u'Другие:-Бабочницы;-Мокрицы;-Ногохвостки;-Сверчки;-Пауки;-Уховертки;-Чешуйницы;'),
('PARASITE5', u'Клещи домашней пыли'),
('PARASITE6', u'Клещи иксодовые'),
('PARASITE7', u'Клещи крысиные'),
('PARASITE8', u'Клоп постельный'),
('PARASITE9', u'Кожееды'),
('PARASITE10', u'Комары'),
('PARASITE11', u'Летающие и нелетающие насекомые'),
('PARASITE12', u'Летающие насекомые'),
('PARASITE13', u'Личинки комаров'),
('PARASITE14', u'Мокрицы'),
('PARASITE15', u'Моль платяная'),
('PARASITE16', u'Мошки'),
('PARASITE17', u'Муравьи'),
('PARASITE18', u'Мухи'),
('PARASITE19', u'Нелетающие насекомые'),
('PARASITE20', u'Огневки (вредитель запасов)'),
('PARASITE21', u'Тараканы')
)
# Choices for Enclosure model
DISINSECTION_1 = "DIS_1"
DISINSECTION_2 = "DIS_2"
DISINSECTION_3 = "DIS_3"
IMPREGNATION = "IMP_1"
INSECTICIDE = "INS"
TERRITORY_HANDLING = "TER_HA"
PEDICULOSIS_1 = "PED_1"
PEDICULOSIS_2 = "PED_2"
OTHER = "OTHER"
ENCLOSURE_APP_SCOPE_CHOICES = (
(DISINSECTION_1, u'Дезинсекция белья и вещей'),
(DISINSECTION_2, u'Дезинсекция на объектах железнодорожного транспорта и метрополитена'),
(DISINSECTION_3, u'Дезинсекция помещений от вшей и чесоточных клещей'),
(IMPREGNATION, u'Импрегнация тканей'),
(INSECTICIDE, u'Инсектицид'),
(TERRITORY_HANDLING, u'Обработка территории от иксодовых клещей'),
(PEDICULOSIS_1, u'Педикулицид; взрослые и дети'),
(PEDICULOSIS_2, u'Педикулицид; взрослые с 16 лет'),
(OTHER, u'Уничтожение членистоногих в МО, для обработки отходов классов А, Б и В и др.'),
)
# Choices for ToxChar model
POSITIVE_MOUSE = "POS_MO"
NEGATIVE_MOUSE = "NEG_MO"
POSITIVE_RATS = "POS_RA"
NEGATIVE_RATS = "NEG_RA"
TOXCHAR_RES_ACT_CHOICES = (
(POSITIVE_MOUSE, u'Наличие эффекта, мыши'),
(NEGATIVE_MOUSE, u'Отсутствие эффекта, мыши'),
(POSITIVE_RATS, u'Наличие эффекта, крысы'),
(NEGATIVE_RATS, u'Отсутствие эффекта, крысы'),
)
DANGER_RATE_1 = "DR_1"
DANGER_RATE_2 = "DR_2"
DANGER_RATE_3 = "DR_3"
DANGER_RATE_4 = "DR_4"
TOXCHAR_ZBIOC_AC_CHOICES = (
(DANGER_RATE_1, u'I класс. Чрезвычайно-опасные: менее 10'),
(DANGER_RATE_2, u'II класс. Высокоопасные: 10-30'),
(DANGER_RATE_3, u'III класс. Умеренно-опасные: 31-100'),
(DANGER_RATE_4, u'IV класс. Малоопасные: более 100'),
)
TOXCHAR_ZBIOC_SUBAC_CHOICES = (
(DANGER_RATE_1, u'I класс. Чрезвычайно-опасные: менее 1'),
(DANGER_RATE_2, u'II класс. Высокоопасные: 1-5'),
(DANGER_RATE_3, u'III класс. Умеренно-опасные: 5,1-10'),
(DANGER_RATE_4, u'IV класс. Малоопасные: более 10'),
)
TOXCHAR_C20_CHOICES = (
(DANGER_RATE_1, u'I класс. Чрезвычайно-опасные: гибель'),
(DANGER_RATE_2, u'II класс. Высоко-опасные: клиника отравления, гибель отсутствует'),
(DANGER_RATE_3,
u'III класс. Умеренно-опасные: С20 > Limzc, min изменения интегральных показателей (пороговый уровень)'),
(DANGER_RATE_4, u'IV класс. Мало-опасные: не оказывают токсического действия'),
)
HIGH_RABBITS = "HG_RAB"
NORMAL_RABBITS = "NORMAL_RAB"
WEAK_RABBITS = "WE_RAB"
ABSENCE_RABBITS = "AB_RAB"
NOT_RABBITS = "NO_RAB" # not determine rabbits
HIGH_PIGS = "HG_PIG"
NORMAL_PIGS = "NORMAL_PIG"
WEAK_PIGS = "WE_PIG"
ABSENCE_PIGS = "AB_PIG"
NOT_PIGS = "NO_PIG" # not determine pigs
TOXCHAR_IRRITATION_CHOICES = (
(HIGH_RABBITS, u"Выраженное, кролики"),
(NORMAL_RABBITS, u"Умеренное, кролики"),
(WEAK_RABBITS, u"Слабое, кролики"),
(ABSENCE_RABBITS, u"Отсутствует, кролики"),
(NOT_RABBITS, u"Не определяли, кролики"),
(HIGH_PIGS, u"Выраженное, морские свинки"),
(NORMAL_PIGS, u"Умеренное, морские свинки"),
(WEAK_PIGS, u"Слабое, морские свинки"),
(ABSENCE_PIGS, u"Отсутствует, морские свинки"),
(NOT_PIGS, u"Не определяли, морские свинки"),
)
HIGH_MOUSE = "HG_MOU"
NORMAL_MOUSE = "NORMAL_MOU"
WEAK_MOUSE = "WE_MOU"
ABSENCE_MOUSE = "AB_MOU"
HIGH_PIGS_VKNK = "HG_PIG_VKNK"
NORMAL_PIGS_VKNK = "NORMAL_PIG_VKNK"
WEAK_PIGS_VKNK = "WE_PIG_VKNK"
ABSENCE_PIGS_VKNK = "AB_PIG_VKNK"
NOT_PIGS_VKNK = "NO_PIG_VKNK" # not determine pigs
NORMAL_PIGS_NK = "NORMAL_PIG_NK"
WEAK_PIGS_NK = "WE_PIG_NK"
ABSENCE_PIGS_NK = "AB_PIG_NK"
NOT_PIGS_NK = "NO_PIG_NK" # not determine pigs
TOXCHAR_SENSITIZATION_CHOICES = (
(HIGH_MOUSE, u"ГЗТ мыши – выраженный эффект"),
(NORMAL_MOUSE, u"ГЗТ мыши – умеренный эффект"),
(WEAK_MOUSE, u"ГЗТ мыши – слабый эффект"),
(ABSENCE_MOUSE, u"ГЗТ мыши – отсутствие эффекта"),
(HIGH_PIGS_VKNK, u"Морские свинки в/к и н/к - выраженный эффект"),
(NORMAL_PIGS_VKNK, u"Морские свинки в/к и н/к - умеренный эффект"),
(WEAK_PIGS_VKNK, u"Морские свинки в/к и н/к - слабый эффект"),
(ABSENCE_PIGS_VKNK, u"Морские свинки н/к - выраженный эффект"),
(NOT_PIGS_VKNK, u"Морские свинки в/к и н/к - отсутствие эффекта"),
(NORMAL_PIGS_NK, u"Морские свинки н/к - умеренный эффект"),
(WEAK_PIGS_NK, u"Морские свинки н/к - слабый эффект"),
(NOT_PIGS_NK, u"Морские свинки н/к - отсутствие эффекта"),
)
DANGER_RATE_1_MOUSE = "DR_1_l15_mouse"
DANGER_RATE_2_MOUSE = "DR_2_15_150_mouse"
DANGER_RATE_3_MOUSE = "DR_3_151_5000_mouse"
DANGER_RATE_4_MOUSE = "DR_4__m5000_mouse"
DANGER_RATE_1_RATS = "DR_1_l15_rats"
DANGER_RATE_2_RATS = "DR_2_15_150_rats"
DANGER_RATE_3_RATS = "DR_3_151_5000_rats"
DANGER_RATE_4_RATS = "DR_4__m5000_rats"
TOXCHAR_TOX_STOMACH_CHOICES = (
(DANGER_RATE_1_MOUSE, u"I класс. Чрезвычайно-опасные: менее 15, мыши"),
(DANGER_RATE_2_MOUSE, u"II класс. Высокоопасные: 15-150, мыши"),
(DANGER_RATE_3_MOUSE, u"III класс. Умеренно-опасные: 151-5000, мыши"),
(DANGER_RATE_4_MOUSE, u"IV класс. Малоопасные: более 5000, мыши"),
(DANGER_RATE_1_RATS, u"I класс. Чрезвычайно-опасные: менее 15, крысы"),
(DANGER_RATE_2_RATS, u"II класс. Высокоопасные: 15-150, крысы"),
(DANGER_RATE_3_RATS, u"III класс. Умеренно-опасные: 151-5000, крысы"),
(DANGER_RATE_4_RATS, u"IV класс. Малоопасные: более 5000, крысы"),
)
DANGER_RATE_1_MOUSE = "DR_1_l100_mouse"
DANGER_RATE_2_MOUSE = "DR_2_100_500_mouse"
DANGER_RATE_3_MOUSE = "DR_3_501_2500_mouse"
DANGER_RATE_4_MOUSE = "DR_4__m2500_mouse"
DANGER_RATE_1_RATS = "DR_1_l100_rats"
DANGER_RATE_2_RATS = "DR_2_100_500_rats"
DANGER_RATE_3_RATS = "DR_3_501_2500_rats"
DANGER_RATE_4_RATS = "DR_4__m2500_rats"
TOXCHAR_TOX_SKIN_CHOICES = (
(DANGER_RATE_1_MOUSE, u"I класс. Чрезвычайно-опасные: менее 100, мыши"),
(DANGER_RATE_2_MOUSE, u"II класс. Высокоопасные: 100-500, мыши"),
(DANGER_RATE_3_MOUSE, u"III класс. Умеренно-опасные: 501-2500, мыши"),
(DANGER_RATE_4_MOUSE, u"IV класс. Малоопасные: более 2500, мыши"),
(DANGER_RATE_1_RATS, u"I класс. Чрезвычайно-опасные: менее 100, крысы"),
(DANGER_RATE_2_RATS, u"II класс. Высокоопасные: 100-500, крысы"),
(DANGER_RATE_3_RATS, u"III класс. Умеренно-опасные: 501-2500, крысы"),
(DANGER_RATE_4_RATS, u"IV класс. Малоопасные: более 2500, крысы"),
)
# ##
# Common choices
PEOPLE = "PEOPLE"
PEOPLE_CHILDREN = "PEOPLE_CHILDREN"
SPECIALIST = "SPECIALIST"
SPECIALIST_PEOPLE = "SPECIALIST_PEOPLE"
SPECIALIST_PEOPLE_CHILDREN = "SPECIALIST_PEOPLE_CHILDREN"
PURPOSE_CHOICES = (
(PEOPLE, u'Населением в быту'),
(PEOPLE_CHILDREN, u'Населением в быту, включая детей'),
(SPECIALIST, u'Специалистами'),
(SPECIALIST_PEOPLE, u'Специалистами и населением в быту'),
(SPECIALIST_PEOPLE_CHILDREN, u'Специалистами и населением в быту, включая детей'),
)
# ##
|
17,751 | cd30e6a7f6eb383262be70dc2107cb5fea37c5c0 | #command to perform the same command more than once
for i in range(0,4):
print("here we go again")
#nested loop
for a in range(4):
for b in range(2):
print("na me me dey inside")
print("I am outside") |
17,752 | f062104e998e1a2689f9a8ef1f2c592c83a77e12 | from django.shortcuts import render
# Create your views here.
def page(req):
return render(req,'aste_core/index.html')
|
17,753 | a054e75cb5045a58399836aa62089b4ab136bf22 | from datapaths import projectPath, imagePath, docPath
import shutil
# tutorial images
for image in imagePath.glob("*.png"):
shutil.copy2(image, docPath)
# spectra comments
shutil.copy2(
projectPath
/ "specData"
/ "site1"
/ "meas_2012-02-10_11-30-00"
/ "spectra"
/ "comments.txt",
docPath / "viewSpec_comments.txt",
)
shutil.copy2(
projectPath
/ "specData"
/ "site1"
/ "meas_2012-02-10_11-30-00"
/ "notch"
/ "comments.txt",
docPath / "multspec_comments.txt",
)
# statistics
shutil.copy2(
projectPath
/ "statData"
/ "site1"
/ "meas_2012-02-10_11-05-00"
/ "spectra"
/ "coherence"
/ "comments.txt",
docPath / "usingStats_comments.txt",
)
# transfer functions
shutil.copy2(
projectPath / "transFuncData" / "site1" / "128_000" / "site1_fs128_000_spectra",
docPath,
)
shutil.copy2(
projectPath
/ "transFuncData"
/ "site1"
/ "128_000"
/ "site1_fs128_000_spectra_with_Hz",
docPath,
)
# copy config
shutil.copy2("tutorialconfig.ini", docPath)
shutil.copy2("multiconfig.ini", docPath)
shutil.copy2("multiconfigSeparate.ini", docPath)
shutil.copy2("usingWindowSelector.txt", docPath)
# copy the project file
shutil.copy2(projectPath / "mtProj.prj", docPath)
|
17,754 | 95c82902d94c21de6cac7862744bacca17e39b95 | """
select_server.py IO多路复用 -- select方法
"""
from socket import *
from select import select
# 创建监听套接字,作为关注的IO
s = socket()
s.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
s.bind(('0.0.0.0',10311))
s.listen(3)
# 设置关注列表
rlist = [s] # s用于等待处理来自客户端的连接
wlist = xlist = []
# 循环监控IO
while True:
rs,ws,xs = select(rlist,wlist,xlist)
# 遍历返回值列表,处理就绪的IO
for r in rs:
if r is s:
c,addr = r.accept()
print("Connected from",addr)
rlist.append(c) # 增加新的IO监听
else:
# 有客户端发消息
data = r.recv(1024).decode()
# 客户端退出
if not data:
rlist.remove(r)
r.close()
continue
print(data)
r.send(b'OK')
for w in ws:
pass
for x in xs:
pass
|
17,755 | 19e58c1d80fb30c61cbf9e781f6fd252b422c4df | # Generated by Django 2.1.4 on 2018-12-30 01:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('device', '0004_auto_20181230_0135'),
]
operations = [
migrations.AlterField(
model_name='devicemodel',
name='brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='device.Brand'),
),
migrations.AlterField(
model_name='devicemodel',
name='mark',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='device.Mark'),
),
migrations.AlterField(
model_name='devicemodel',
name='type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='device.DeviceType'),
),
]
|
17,756 | 0c6ae59c8f91debe6e8797f1e153c6f72080658f | # issues: servo spazzes and moves to other angles when DC going
# DC stops when press to go L/R
# DC one direction spins faster than other: set faster to forward
import RPi.GPIO as GPIO
import sys, tty, termios, time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
dcMotorIn1 = 3
dcMotorIn2 = 5
dcMotorSig = 7
GPIO.setup(dcMotorIn1, GPIO.OUT)
GPIO.setup(dcMotorIn2, GPIO.OUT)
GPIO.setup(dcMotorSig, GPIO.OUT)
dcMotor = GPIO.PWM(7, 100)
dcMotor.start(0)
#dcMotor.ChangeDutyCycle(0)
servoMotorIn = 10
servoMotorSig = 12
GPIO.setup(servoMotorIn, GPIO.OUT)
GPIO.setup(servoMotorSig, GPIO.OUT)
servoMotor = GPIO.PWM(12, 50)
servoMotor.start(7.5)
#servoMotor.ChangeDutyCycle(7.5)
# getch is to determine which key pressed, then return key as variable
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
# declares which way motors spin accordingly
def dcMotor_forward():
GPIO.output(dcMotorIn1, True)
GPIO.output(dcMotorIn2, False)
dcMotor.ChangeDutyCycle(35)
GPIO.output(dcMotorSig, True)
time.sleep(1)
GPIO.output(dcMotorSig, False)
def dcMotor_reverse():
GPIO.output(dcMotorIn1, False)
GPIO.output(dcMotorIn2, True)
dcMotor.ChangeDutyCycle(35)
GPIO.output(dcMotorSig, True)
time.sleep(1)
GPIO.output(dcMotorSig, False)
def servoMotor_right():
GPIO.output(servoMotorIn, True)
servoMotor.ChangeDutyCycle(2.5)
GPIO.output(servoMotorSig, True)
# sleep(1)
# GPIO.output(servoMotorSig, False)
# servoMotor.ChangeDutyCycle(0)
def servoMotor_left():
GPIO.output(servoMotorIn, True)
servoMotor.ChangeDutyCycle(12.5)
GPIO.output(servoMotorSig, True)
# sleep(1)
# GPIO.output(servoMotorSig, False)
# servoMotor.ChangeDutyCycle(0)
# toggle steering
def toggleSteering(direction):
global wheelStatus
if(direction == "right"):
if(wheelStatus == "centre"):
servoMotor_right()
wheelStatus = "right"
elif(wheelStatus == "left"):
servoMotor.ChangeDutyCycle(7.5)
wheelStatus = "centre"
if(direction == "left"):
if(wheelStatus == "centre"):
servoMotor_left()
wheelStatus = "left"
elif(wheelStatus == "right"):
servoMotor.ChangeDutyCycle(7.5)
wheelStatus = "centre"
# all motors don't move when starting
GPIO.output(dcMotorIn1, False)
GPIO.output(dcMotorIn2, False)
GPIO.output(servoMotorIn, False)
# assign global variable
wheelStatus = "centre"
# loop to get keyboard data and run motors
while True:
# Keyboard character retrieval method is called and saved
# into variable
keyPressed = getch()
if(keyPressed == "w"):
dcMotor_forward()
if(keyPressed == "s"):
dcMotor_reverse()
# The "a" key will toggle the steering left
if(keyPressed == "a"):
toggleSteering("left")
# The "d" key will toggle the steering right
if(keyPressed == "d"):
toggleSteering("right")
# The "x" key will break the loop and exit the program
if(keyPressed == "x"):
print("Program Ended")
break
# At the end of each loop the acceleration motor will stop
# and wait for its next command
dcMotor.ChangeDutyCycle(0)
# The keyboard character variable will be set to blank, ready
# to save the next key that is pressed
keyPressed = ""
GPIO.cleanup()
|
17,757 | dd70a857be25f2d26f8a1a53c71a820e5b8438dd | #! /usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
def callback(msg):
print("Ranges:")
print(msg.ranges)
print("Intensities:")
print(msg.intensities)
rospy.init_node('topic_subscriber')
sub = rospy.Subscriber('scan', LaserScan, callback)
rospy.spin()
|
17,758 | 76fc0484fba944b5b63cc6a1bbb2544140af161f | # Generated by Django 3.0 on 2019-12-15 23:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genre', '0001_initial'),
('author', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='author',
name='genre',
),
migrations.AddField(
model_name='author',
name='genre',
field=models.ManyToManyField(to='genre.Genre', verbose_name='Gênero'),
),
]
|
17,759 | 0eaec6cd116f4d105b5865e75c7dd9988f8e71b6 | #Python Web Development Techdegree
#Project 2 - Basketball Team Stat Tool
#Going for Exceeds Expectations rejected if project 2 does not meet all Exceeds Expectations Requirements Thank You.
import constants
import copy
import os
player_data = copy.deepcopy(constants.PLAYERS)
team_names = copy.deepcopy(constants.TEAMS)
team_data = copy.deepcopy(constants.TEAMS)
guardian_list = []
experienced_player = []
non_experienced_player = []
def clean_data():
for player in player_data:
height = player["height"].split()
player["height"] = int(height[0])
guardian = player["guardians"].split(" and ")
for parent in guardian:
guardian_list.append(parent)
if player["experience"] == "YES":
player["experience"] = True
experienced_player.append(player)
else:
player["experience"] = False
non_experienced_player.append(player)
return player_data
def draft_teams():
if int(len(player_data)) % int(len(team_data)) == 0:
team = 0
team_length = int(len(team_data))
draft_increment = int(len(experienced_player)/len(team_data))
draft_number = 0
player_per_round = 3
while team != team_length:
team_data[team] = experienced_player[draft_number:player_per_round] + non_experienced_player[draft_number:player_per_round]
team += 1
draft_number += draft_increment
player_per_round += draft_increment
return team_data
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
def show_stats(stats):
clear_screen()
stats = stats - 1
team_roster = []
experienced_team_players = []
inexperienced_team_players = []
player_height = []
team_guardians = []
for player in team_data[stats]:
team_roster.append(player["name"])
guardians = player["guardians"].split(" and ")
for parent in guardians:
team_guardians.append(parent)
if player["experience"]:
experienced_team_players.append(player["name"])
else:
inexperienced_team_players.append(player["name"])
player_height.append(player["height"])
print("**********{}**********\n".format(team_names[stats]))
print("Total number of players: {}\n".format(len(team_roster)))
print("-----TEAM ROSTER-----\n{}\n".format(", ".join(team_roster)))
print("-----TEAM EXPERIENCE-----\nTotal number of experienced players: {}\nTotal number of inexperienced players: {}\n".format(len(experienced_team_players), len(inexperienced_team_players)))
print("Avrage height of players:{} inches\n".format(round(sum(player_height) / int(len(team_roster)), 2)))
print("*****TEAM GUARDIANS*****\n{}".format(", ".join(team_guardians)))
input("\n\nEnter any key to continue")
clear_screen()
def team_menu():
while True:
clear_screen()
team_number = 1
print("-----Teams-----")
for team in team_names:
print("{}) ".format(team_number), team)
team_number += 1
team_input = input("Enter the number corosponding to the team you wish to view\n>")
try:
team_input = int(team_input)
if team_input < 0 or team_input > len(team_names):
raise ValueError
except ValueError:
clear_screen()
input("OH NO!!! please Enter the number corosponding to the team you wish to view\nFor example ENTER 1 for {} stats\nEnter any key to continue\n>".format(team_names[0]))
continue
else:
show_stats(team_input)
break
def main_menu():
print("BASKETBALL TEAM STATS TOOL\n")
while True:
print("-----------MENU-----------\n1) Display Team Stats\n2)QUIT\n")
menu_input = input("Enter a corosponding menu number to continue ")
while menu_input != "1" and menu_input != "2":
clear_screen()
menu_input = input("OH NO invlad selection!!!\nPlease enter either 1 for TEAM STATS or 2 to QUIT the program\n\n-----------MENU-----------\n\n1) Display Team Stats\n2)QUIT\n> ")
if menu_input == "1":
team_menu()
else:
clear_screen()
print("Good Bye")
break
if __name__ == "__main__":
clean_data()
draft_teams()
main_menu()
|
17,760 | 24980bf7f81c3bf250dd14fddcf3e85cb847055a | from django_hstore.fields import DictionaryField, ReferencesField, SerializedDictionaryField # noqa
from django_hstore.managers import HStoreManager # noqa
from django_hstore.apps import GEODJANGO_INSTALLED
if GEODJANGO_INSTALLED:
from django_hstore.managers import HStoreGeoManager # noqa
import django
if django.get_version() < '1.7':
from . import apps # noqa
|
17,761 | b5d9fd105772e0c23c6c2aa2eb960c7956dc8554 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, InputRequired, Optional, Email, Length, Required
from wtforms.widgets import SubmitInput, HTMLString
class ContactForm(FlaskForm):
name = StringField(HTMLString('Name <span class="required">*</span>'), validators=[Required("Please enter your name."), InputRequired()])
email = StringField(HTMLString('Email <span class="required">*</span>'), validators=[Required("Please enter your email address."), InputRequired(),Email()])
subject = StringField('Subject', validators=[Optional()])
comments = TextAreaField(HTMLString('Message <span class="required">*</span>'), validators=[Required("Don't be shy. Say something."), InputRequired(), Length(max=300)])
submit = SubmitField('Send') |
17,762 | 9381ff6955bf7f35d752673d34f61865ae4f0bfa | """
Calculator library contianing basic math operations
"""
def add(first_term, second_term):
return first_term + second_term
def substract(first_term, second_term):
return second_term - first_term
|
17,763 | 9c5672508ee2e2d5dae688b277938af66539d66c | def reverse(number):
rev = 0
while number > 0:
reminder = number % 10
rev = (rev*10) + reminder
number = number//10
return rev
print(reverse(123))
|
17,764 | 7c51e1d44b4c5c9e5cda34ef5878aa4c4943a426 | import datetime as dt
from typing import List, Tuple
from src.repos.adjustmentRepo import Adjustment
def doAdjustmentBeforeForecast(startDate : dt.datetime, endDate :dt.datetime, configDict:dict)->bool:
""" this will do necessary adjustment for D-2
Args:
startDate (dt.datetime): start date
endDate (dt.datetime): end date
configDict (dict): application configuration dictionary
Returns:
bool: returns true if adjustment is success else false
"""
conString:str = configDict['con_string_mis_warehouse']
isAdjustmentSuccesscount = 0
currDate = startDate
#create object of class Adjustment
obj_adjustment = Adjustment(conString)
while currDate <= endDate:
isAdjustmentSuccess = obj_adjustment.doAdjustment(currDate)
if isAdjustmentSuccess:
isAdjustmentSuccesscount = isAdjustmentSuccesscount + 1
currDate += dt.timedelta(days=1)
numOfDays = (endDate-startDate).days
if isAdjustmentSuccesscount == numOfDays +1 :
return True
else:
return False
|
17,765 | 7418e68f64540d02a9882ad3a811287b310b5389 | # -*- coding: utf-8 -*-
# PhantomJS path
PhantomJS_path = "/Users/lizhaolin/Documents/Software/phantomjs-2.1.1-macosx/bin/phantomjs"
Chrome_path = ""
# LOL首页
LOL_HomePage = "http://www.loldytt.com"
LOL_dongzuo = "http://www.loldytt.com/Dongzuodianying/"
LOL_kehuan = "http://www.loldytt.com/Kehuandianying/" |
17,766 | c837d4ca7bf1bb1e01af465e3f7d39dc5d865ce0 | from glob import glob
import os
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
# Read the version from the main package.
with open('react/__init__.py') as f:
for line in f:
if '__version__' in line:
_, version, _ = line.split("'")
break
# Read the requirements from the file
requirements = [
"numpy>=1.15",
"scipy>=0.19.1",
"nibabel>=3.0.0",
"scikit-learn>=0.22"
]
setup(
author='Ottavia Dipasquale, Matteo Frigo',
author_email='ottavia.dipasquale@kcl.ac.uk',
license='MIT',
classifiers=[
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Image Processing',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
],
description='A Python package that implements REACT: Receptor-Enriched '
'Analysis of Functional Connectivity by Targets',
long_description=long_description,
long_description_content_type='text/markdown',
include_package_data=True,
install_requires=requirements,
name='react-fmri',
packages=['react'],
python_requires='>=3',
scripts=glob('script/*'),
url='https://github.com/ottaviadipasquale/react-fmri/',
version=version,
project_urls={
'Source': 'https://github.com/ottaviadipasquale/react-fmri',
'Bug Reports': 'https://github.com/ottaviadipasquale/react-fmri/issues',
'Documentation': 'https://github.com/ottaviadipasquale/react-fmri/'
'blob/main/README.md',
},
)
|
17,767 | b17c7ed259a8876e26318999dfc651e716fb6ca3 | import random
def jogar():
print("*************************************")
print("* Bem vindo ao jogo de Adivinhação! *")
print("*************************************")
#numero_secreto = 42
#numero_secreto = int(random.random()*100+1)
numero_secreto = random.randrange(1,101)
#total_de_tentativas = 3
#rodada_atual = 1
pontos = 1000
total_de_tentativas = 0
print("Qual o nível de dificuldade?")
print("(1) Fácil (2) Médio (3) Difícil")
nivel = int(input("Defina o nível: "))
if (nivel == 1):
total_de_tentativas = 20
elif (nivel == 2):
total_de_tentativas = 10
else:
total_de_tentativas = 5
#while (rodada_atual <= total_de_tentativas):
for rodada_atual in range(1, total_de_tentativas+1):
print("Tentativa {} de {}".format(rodada_atual, total_de_tentativas))
chute_str = input("Digite seu palpite entre 1 e 100: ")
print("Você digitou {}".format(chute_str))
chute = int(chute_str)
if chute < 1 or chute > 100:
print("Os palpites devem ser entre 1 e 100")
continue
acertou = chute == numero_secreto
maior = chute > numero_secreto
menor = chute < numero_secreto
if(acertou):
print("Asertô, mizerávi! Toma {} pontos".format(pontos))
break
else:
if(maior):
print("Errôu! O palpite é maior!")
elif(menor):
print("Errôu! O palpite é menor!")
pontos_perdidos = abs(numero_secreto - chute)
pontos = pontos - pontos_perdidos
rodada_atual=rodada_atual+1
print("Fim de jogo! O número era {}.".format(numero_secreto))
if(__name__ == "__main__"):
jogar() |
17,768 | aa7fbb4f717869da78cfaea2fdff3c993bd3faef | import matplotlib.pyplot as plt
plt.rc('font', family='Times New Roman')
fig, axe = plt.subplots(1, 1, figsize=(8, 5))
font1 = {'family': 'Times New Roman', 'size': '15'}
# 饼状图各个部分的标签、值、颜色
labels = ['food', 'clothing', 'housing', 'transport']
values = [0.35, 0.15, 0.2, 0.3]
colors = ['#D2ACA3', '#EBDFDF', '#DE6B58', '#E1A084']
# 突出显示
explode = [0, 0.1, 0, 0]
# 标题
axe.set_title("daily cost", fontdict=font1)
# 画饼状图
wedge, texts, pcts = axe.pie(values, labels=labels, colors=colors, startangle=45, autopct='%3.1f%%'
, explode=explode)
axe.axis('equal')
# 图例
axe.legend(wedge, labels, fontsize=10, title='event', loc=2)
# 设置文本的属性
plt.setp(texts, size=12)
plt.setp(pcts, size=12)
plt.show()
fig.savefig("bingtu.png", dpi=800)
|
17,769 | 107dc2503a8763175a984ec84d3b40a7387b9cc3 | import urllib.request
#import requests
#!conda install -c anaconda beautifulsoup4
from bs4 import BeautifulSoup
# import pandas as pd
url = 'https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M'
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html,'html.parser')
#tags = soup('table')
result = soup.find("table",{"class":'wikitable sortable'})
#test = result.findAll('td')
#print(type(result),type(soup))
rows = result.findAll('tr')
for row in rows:
#print('in Row')
print(row.text,'ROW TEXT')
cols = row.findAll('td')
for col in cols:
if(col.findNext('td').text.strip() == 'Not assigned'):
# print('This actually workssssssss')
break
print(col.text,'COL TEXT')
# for trow in result:
# print('Working')
# df = pd.read_html(soup)
# df.head()
|
17,770 | e2df236154f082fd11be4c7d245c0505de1de73c | n=int(input("enter the no. here: "))
def frst_d(n):
while n>=10:
n=n//10
return n
def lst_d(n):
n=n%10
return n
print("the first digit is: ",frst_d(n))
print("the last digit is: ",lst_d(n))
sum=frst_d(n)+lst_d(n)
print("the sum of first and last digit is: ",sum)
|
17,771 | ff00d24ceb3c769e6db55e714c63d24cf4110b88 | import unittest
import mycode as m
class mytest(unittest.TestCase):
def test_hello_world():
self.assertEqual(m.hello(), "hello world")
def test_score_joseph(self):
self.assertEqual(m.calculer_score("Joseph", "16"), "66") #MODIFIE
def test_score_marie(self):
self.assertEqual(m.calculer_score("Marie", "33"), "50")
def test_score_marc(self):
self.assertEqual(m.calculer_score("Marc", "60"), "43")
def test_score_ely(self):
self.assertEqual(m.calculer_score("Broly", "28"), "75")
if __name__ == '__name__':
unittest.main() |
17,772 | 5afd7b061e27610cea8d351f4de5fcb76798d92c | # This python file need to be run on the zerorpc server. Call the member method through zerorpc
from __future__ import print_function
from enhance import supic_process as real_predict_sr
import sys
import zerorpc
import argument_sr
from monodepth_inference import args as args_monodepth
from monodepth_inference import predict_depth as real_predict_depth
from tensorflow import reset_default_graph
from PIL import Image
import imageFilter
class PredictApi(object):
# member method (API to be used by other language)
def predict_sr(self, input_path, output_dir, out_width, out_height, pic_type):
""" render and store temp sr images in output_dir """
try:
result = real_predict_sr(input_path, output_dir, int(out_width), int(out_height))
reset_default_graph()
return result
except Exception as e:
return '!ERROR' + str(e)
def predict_depth(self, input_path, output_dir):
""" render and store temp depth image in output_dir """
try:
result = real_predict_depth(input_path, output_dir)
reset_default_graph()
return result
except Exception as e:
return '!ERROR' + str(e)
def save_file(self, input_path, output_path):
""" rename and save file to output_path """
try:
im = Image.open(input_path)
im.save(output_path)
return output_path
except Exception as e:
return '!ERROR' + str(e)
def lens_blur(self, input_path, depthmap_path, min_focal, max_focal, transition, radius, brightness, output_dir, speed):
""" lens blur """
return imageFilter.lens_blur(input_path, depthmap_path, min_focal, max_focal, transition, radius, brightness, output_dir, speed)
def parse_port():
port = 4242
try:
port = args_monodepth.port
except Exception as e:
pass
return '{}'.format(port)
def parse_path():
path = './'
try:
path = args_monodepth.path
except Exception as e:
pass
argument_sr.options.save_path = path + '/sr/'
args_monodepth.checkpoint_path = path + '/depth/monodepth'
def main():
addr = 'tcp://127.0.0.1:' + parse_port()
parse_path()
s = zerorpc.Server(PredictApi())
s.bind(addr)
print('start running on {}'.format(addr))
s.run()
if __name__ == '__main__':
main()
|
17,773 | 0fa8ed92442d492623e76db1d515d38d5b311d26 | from flask import Flask
from config import app_config
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(app_config[config_name])
if not app.config.get("SQLALCHEMY_DATABASE_URI"):
raise Exception("Environment variable SQLALCHEMY_DATABASE_URI must be defined")
from flask_restful import Api
api = Api(app)
setattr(app, 'api', api)
with app.app_context():
api.init_app(app)
from . import sqlalquemy_store
from . import routes
from . import healthcheck_routes
return app
|
17,774 | de50b900dcc3474f3acf67f9d488df94ac10e710 | def plus(a, b):
return a + b
print(plus(2, 3))
print(plus(a=1, b=2))
|
17,775 | d48c46d7d45cfb039a3e4e4a6c5b51101f2f47c5 | import unittest
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import patch, MagicMock, mock_open, call
from topper.file_io import FileReader, FolderReader, FileWriter
class TestFileReader(unittest.TestCase):
"""
Unit tests of file utils
"""
def setUp(self):
self.file_reader = FileReader(path='my/path/file.log', checkpoint_dir='checkpoint_directory/')
def test_reader_constructor(self):
self.assertIsNotNone(self.file_reader)
self.assertEqual(Path('my/path/file.log'), self.file_reader.path)
self.assertEqual(Path('checkpoint_directory/'), self.file_reader._checkpoint_dir)
def test__countries_iso2(self):
list_countries = self.file_reader._countries_iso2()
# At least 240 countries
self.assertLess(240, len(list_countries))
# France and Spain in list
self.assertTrue('FR' in list_countries)
self.assertTrue('ES' in list_countries)
def test__check_file_name(self):
# Mock reject_file method
self.file_reader.reject_file = MagicMock(return_value=None)
# doesn't match regex
result_ko = self.file_reader.check_file_name('bad_name.log')
self.assertFalse(result_ko)
# Date out of range: 41th of february
result_bad_date = self.file_reader.check_file_name('listen-20200241.log')
self.assertFalse(result_bad_date)
# test OK
result_ok = self.file_reader.check_file_name('listen-20200211.log')
self.assertTrue(result_ok)
def test_read_file(self):
# Read file OK
with patch('topper.file_io.FileReader._parse_log_file', MagicMock()) as mock_parse_file:
self.file_reader.check_file_name = MagicMock(return_value=True)
self.file_reader.path = PathMock('valid.log', is_a_file=True)
self.file_reader.read_file()
mock_parse_file.assert_called()
# Read file KO
with patch('topper.file_io.FileReader.reject_file', MagicMock()) as mock_reject_file, \
self.assertLogs(self.file_reader.logger, level='WARNING') as cm:
self.file_reader.check_file_name = MagicMock(return_value=False)
self.file_reader.path = PathMock('invalid.log', is_a_file=True)
self.file_reader.read_file()
mock_reject_file.assert_called()
self.assertEqual(['ERROR:topper.file_io:Can\'t process file=invalid.log'], cm.output)
def test_parse_file(self):
self.file_reader._countries_iso2 = MagicMock(return_value=['FR', 'GB', 'ES'])
# 2 valid lines, 2 invalid lines (invalid pattern + country not in list)
open_mock_return_value = ['12|15|FR', '2929292902|120200120|ES', '12|FR', '12|15|AA']
opener = mock_open(read_data='\n'.join(open_mock_return_value))
with patch("pathlib.Path.open", opener) as mock_open_file, \
self.assertLogs(self.file_reader.logger, level='WARNING') as cm:
result = list(self.file_reader._parse_log_file())
mock_open_file.assert_called()
self.assertEqual([('FR', '15', '12'), ('ES', '120200120', '2929292902')], result)
self.assertEqual(['WARNING:topper.file_io:Line is incorrect. Pattern invalid: 12|FR\n',
"WARNING:topper.file_io:Line is incorrect. The country 'AA' doesn't exists"],
cm.output)
def test_reject_file(self):
with patch("pathlib.Path.rename") as mock_path_rename, \
patch("pathlib.Path.mkdir") as mock_path_mkdir:
self.file_reader.reject_file()
mock_path_mkdir.assert_called()
mock_path_rename.assert_called_with(self.file_reader._checkpoint_dir /
self.file_reader.REJECT_FOLDER / 'file.log')
def test_move_file_archive(self):
with patch("pathlib.Path.rename") as mock_path_rename, \
patch("pathlib.Path.mkdir") as mock_path_mkdir:
self.file_reader.move_file_archive()
mock_path_mkdir.assert_called()
mock_path_rename.assert_called_with(self.file_reader._checkpoint_dir /
self.file_reader.ARCHIVE_FOLDER / 'file.log')
def test_move_file_top_days(self):
with patch("pathlib.Path.rename") as mock_path_rename, \
patch("pathlib.Path.mkdir") as mock_path_mkdir, \
patch("pathlib.Path.is_file", return_value=True):
self.file_reader.path = Path('my/path/listen-20200213.log')
self.file_reader.move_file_top_days()
mock_path_mkdir.assert_called()
mock_path_rename.assert_called_with(self.file_reader._checkpoint_dir /
self.file_reader.CURRENT_FOLDER / 'listen-20200213.log')
class TestFolderReader(unittest.TestCase):
"""
Unit tests of folder utils
"""
def setUp(self):
self.folder_reader = FolderReader(_path_dir='my/path/', _checkpoint_directory='checkpoint/')
def test_list_files(self):
# Mock reject_file method
self.folder_reader.reject_file = MagicMock(return_value=None)
# Empty folder
with patch('pathlib.Path.iterdir', return_value=[]):
self.assertEqual([], self.folder_reader._list_files())
# 1 match, 1 not match
mock_files = [PathMock('bad_format.log', True), PathMock('listen-20200211.log', True)]
with patch('pathlib.Path.iterdir', return_value=mock_files), \
patch('topper.file_io.FileReader.reject_file', return_value=None):
result = self.folder_reader._list_files()
get_name = list(map(lambda x: x.path.name, result))
self.assertEqual(['listen-20200211.log'], get_name)
def test_read_folder_days(self):
# Test KO: path is a file
self.folder_reader._path_dir = PathMock(name='my_file.log', is_a_file=True, is_a_dir=False)
with self.assertLogs(self.folder_reader.logger, level='WARNING') as cm:
self.folder_reader.read_folder_current()
self.assertEqual(['ERROR:topper.file_io:Path provided is not a directory: my_file.log'], cm.output)
# Test: 2 valid files
self.folder_reader._path_dir = PathMock(name='my_path/', is_a_file=False, is_a_dir=True)
self.folder_reader._list_files = MagicMock(return_value=[FileReader('my_path/1.log', '/'),
FileReader('my_path/2.log', '/')])
with patch('topper.file_io.FileReader.read_file', MagicMock(return_value=True)) as mock_read_file:
self.folder_reader.read_folder_current()
mock_read_file.assert_has_calls([call(), call()]) # 2 calls because 2 valid files
def test_archive_old_files(self):
today = datetime.today()
three_days_ago_str = (today - timedelta(days=3)).strftime('%Y%m%d')
two_days_ago_str = (today - timedelta(days=2)).strftime('%Y%m%d')
# 1 match, 1 not match
mock_files = [Path('listen-{}.log'.format(three_days_ago_str), is_a_file=True),
Path('listen-{}.log'.format(two_days_ago_str), is_a_file=True)]
with patch('pathlib.Path.iterdir', return_value=mock_files), \
patch('topper.file_io.FileReader.move_file_archive', return_value=None) as mock_archive, \
patch("pathlib.Path.is_dir", return_value=True):
self.folder_reader.archive_old_files(3)
# Only 1 call on move archive: file 3 days ago
mock_archive.assert_called_once()
class TestFileWriter(unittest.TestCase):
"""
Unit tests of Writer utils
"""
def setUp(self):
self.file_writer = FileWriter(path='my/path/file.txt')
def test_write_result(self):
expected_line1 = call('FR|13,3:16,2:20192323,1\n')
expected_line2 = call('GB|1313,42:16,28:23,14\n')
input_data = {'FR': [(13, 3), (16, 2), (20192323, 1)], 'GB': [(1313, 42), (16, 28), (23, 14)]}
open_mock = mock_open()
with patch("builtins.open", open_mock, create=True), \
patch("pathlib.Path.mkdir") as mock_path_mkdir:
self.file_writer.write_result(input_data)
mock_path_mkdir.assert_called()
open_mock.assert_called_with(Path("my/path/file.txt"), mode="w")
open_mock.return_value.write.assert_has_calls([expected_line1, expected_line2])
class PathMock:
def __init__(self, name, is_a_file=True, is_a_dir=False):
self.name = name
self.is_a_file = is_a_file
self.is_a_dir = is_a_dir
def __repr__(self):
return self.name
def is_file(self):
return self.is_a_file
def is_dir(self):
return self.is_a_dir
if __name__ == '__main__':
unittest.main()
|
17,776 | 8d0bb0e29b8359552f0a18c8f87522176f9cb3b6 | # import
import httplib
# create a connection
connection = httplib.HTTPConnection('www.google.com')
# Send http rewuest
connection.request('GET','/')
# Get Response
get_response = connection.getresponse()
response_data = get_response.read()
print(response_data)
|
17,777 | 9e8b980d2342556738f866d882bbfa5e00a0a749 | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
import bcrypt
def index(request):
if 'gold' not in request.session:
request.session['gold'] = 0
else:
request.session['gold'] += 1
return render(request, "index.html")
def register_owner(request):
print("1. inside views.py register_owner method")
errors = Owner.objects.registerValidator(request)
print("4. back inside views.py register_owner method")
print("5.", errors)
if errors:
for key, value in errors.items():
messages.error(request, value)
return redirect("/")
else:
# create owner in database
hash = bcrypt.hashpw(request.POST["password"].encode(), bcrypt.gensalt())
# print(hash)
# print(hash.decode())
owner = Owner.objects.create(fname=request.POST["fname"], lname=request.POST["lname"], email=request.POST["email"],password=hash.decode())
# store id in session
request.session['id'] = owner.id
return redirect("/success")
def success(request):
logged_in_user = Owner.objects.get(id=request.session['id'])
return render(request, "success.html", {'fname': logged_in_user.fname})
|
17,778 | 6d129192c67481c8e0f1c5e61b4cd468d20a440a | /Users/freddydrennan/anaconda/lib/python3.6/io.py |
17,779 | 117e8120edff47393596f4a5108245ae34d64308 | ## CommonRegex
from commonregex import ip
data = 'hello 255.21.255.22 okay'
ip.findall(data)
data = '23.14.2.4.2 255.21.255.22 567.12.2.1'
ip.findall(data)
[e for e in data.split() if ip.fullmatch(e)]
|
17,780 | 12f5c1ef0300903fbf6399300c90308f7e2ad10a | #!/usr/bin/env python3
import torch
from constants import *
# import sys
# from IPython.core import ultratb
# sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux', call_pdb=1)
def get_vars(batch_sz, X_test_t, Y_test_t):
batch_data_ = torch.empty(batch_sz, X_test_t.size(1), device=DEVICE)
batch_targets_ = torch.empty(batch_sz, Y_test_t.size(1), device=DEVICE)
return batch_data_, batch_targets_
def get_vars_scalar_out(batch_sz, X_test_t, Y_test_t):
batch_data_ = torch.empty(batch_sz, X_test_t.size(1), device=DEVICE)
batch_targets_ = torch.empty(batch_sz, dtype=torch.long, device=DEVICE)
return batch_data_, batch_targets_
# General batch evaluation
def get_cost_helper(batch_sz, epoch, model, X_test_t, Y_test_t,
loss_fn, var_getter_fn):
test_cost = 0
batch_data_, batch_targets_ = var_getter_fn(
batch_sz, X_test_t, Y_test_t)
size = batch_sz
for i in range(0, X_test_t.size(0), batch_sz):
# Deal with potentially incomplete (last) batch
if i + batch_sz > X_test_t.size(0):
size = X_test_t.size(0) - i
batch_data_, batch_targets_ = var_getter_fn(
size, X_test_t, Y_test_t)
batch_data_.data[:] = X_test_t[i:i+size]
batch_targets_.data[:] = Y_test_t[i:i+size]
preds = model(batch_data_)
batch_cost = loss_fn(preds, batch_targets_)
# Keep running average of loss
test_cost += (batch_cost - test_cost) * size / (i + size)
print('TEST SET RESULTS:' + ' ' * 20)
print('Average loss: {:.4f}'.format(test_cost.item()))
return test_cost
def get_cost(batch_sz, epoch, model, X_test_t, Y_test_t, loss_fn):
return get_cost_helper(batch_sz, epoch, model, X_test_t, Y_test_t,
loss_fn, get_vars)
def get_cost_nll(batch_sz, epoch, model, X_test_t, Y_test_t, loss_fn):
return get_cost_helper(batch_sz, epoch, model, X_test_t, Y_test_t,
loss_fn, get_vars_scalar_out)
|
17,781 | 1ec71f22c614ca1da48305b8e78c0bd9216f5741 | n,m = map(int,input().split())
if n < 0 and m < 0:
ans = m - n
elif n < 0 and m > 0:
ans = m - n - 1
else:
ans = m - n
print(ans) |
17,782 | bdb0d65068cb450c2cdb2ff366859f3ea6877628 | from django.urls import reverse_lazy
from .base import *
from .utils import get_env_var
DEBUG = True
INSTALLED_APPS += [
'debug_toolbar',
'django_extensions',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ('127.0.0.1',)
SECRET_KEY = get_env_var("SECRET_KEY")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env_var("NAME"),
'USER': get_env_var("USER"),
'PASSWORD': get_env_var("PASSWORD"),
'HOST': get_env_var("HOST"),
'PORT': get_env_var("PORT"),
}
}
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
|
17,783 | f60065e860f5f1a14ce7263c32f63cf80840a69a | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from cta.fornecedor.forms import FornecedorForm
from cta.fornecedor.models import Fornecedor
# Create your views here.
from django.contrib.auth.decorators import login_required
@login_required(login_url = 'core:userlogin')
def cadastro_fornecedor(request):
newcadastro = 'S'
if request.method == 'POST':
fornecedor_form = FornecedorForm(request.POST, request.FILES)
if fornecedor_form.is_valid():
fornecedor_form.save()
fornecedor_form = FornecedorForm()
return render(request, 'cadastro_fornecedor.html', {'fornecedor_form': fornecedor_form, 'newcadastro': newcadastro})
#return redirect('/')
else:
fornecedor_form = FornecedorForm()
return render(request, 'cadastro_fornecedor.html', {'fornecedor_form': fornecedor_form, 'newcadastro': newcadastro})
def apagar_fornecedor(request, id):
fornecedores = Fornecedor.objects.get(id=id).delete()
return redirect(reverse('core:consultafornecedor'))
def editar_fornecedor(request,id):
fornecedores = Fornecedor.objects.get(id=id)
newcadastro = 'N'
if request.method == 'POST':
fornecedor_form = FornecedorForm(request.POST, instance=fornecedores)
if fornecedor_form.is_valid:
fornecedor_form.save()
return redirect(reverse('core:consultafornecedor'))
else:
print(fornecedor_form.errors)
else:
fornecedor_form = FornecedorForm(instance=fornecedores)
return render(request, 'cadastro_fornecedor.html',{'fornecedor_form':fornecedor_form, 'newcadastro': newcadastro}) |
17,784 | c938a230318e9f5915ffb9fc903d15c5244817f1 | __author__ = 'Lunzhy'
import os, sys
path = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir))
if not path in sys.path:
sys.path.append(path)
import matplotlib.pyplot as plt
from Submissions.SISPAD2014 import *
from QuickView.TwoDim import TrapOccupy as occ
import lib.common as comm
import lib.format as fmt
from matplotlib import font_manager
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
Main_path = Directory_Sispad2014
Main_prj = 'retention'
Prj_300K = r'4nm_300K_1.6eV_PF2e11_T2B5e5' # thick_1.6eV_PF1e10, 4nm_300K_1.6eV_PF2e11_T2B2e6
Prj_350K = r'4nm_350K_1.6eV_PF2e11_T2B5e5'
Time_to_plot = 1e6
def plotTrappedDensity(prj_path, ax):
im = occ.plotDensitySingleTime(ax, prj_path, Time_to_plot)
return im
def formatAxes(ax):
ax.set_xlabel('X (nm)', labelpad=0)
ax.set_ylabel('Y (nm)', labelpad=0)
ax.set_yticks([4, 6, 8, 10, 12])
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(0)
ticks_font = font_manager.FontProperties(family='times new roman', style='normal',
size=22, weight='normal', stretch='normal')
labels_font = font_manager.FontProperties(family='times new roman', style='normal',
size=24, weight='normal', stretch='normal')
for label_item in ([ax.xaxis.label, ax.yaxis.label]):
label_item.set_fontproperties(labels_font)
for label_item in (ax.get_xticklabels() + ax.get_yticklabels()):
label_item.set_fontproperties(ticks_font)
ax.xaxis.set_tick_params(which='major', width=0, size=5)
ax.xaxis.set_tick_params(which='minor', width=0, size=3)
ax.yaxis.set_tick_params(which='major', width=0, size=5)
ax.yaxis.set_tick_params(which='minor', width=0, size=3)
for tick in ax.get_xaxis().get_major_ticks():
tick.set_pad(8.)
return
def plotTempCmp():
fig = plt.figure()
ax_low = fig.add_subplot(211)
ax_high = fig.add_subplot(212)
plotTrappedDensity(os.path.join(Main_path, Main_prj, Prj_300K), ax_low)
im = plotTrappedDensity(os.path.join(Main_path, Main_prj, Prj_350K), ax_high)
formatAxes(ax_low)
formatAxes(ax_high)
plt.tight_layout()
fig.subplots_adjust(hspace=0.4, right=0.82)
ax_cb = fig.add_axes([0.88, 0.2, 0.03, 0.7])
cb = fig.colorbar(im, cax=ax_cb, extend='both')
cb.set_label('Trapped Electron Density ($\mathbf{cm^{-3}}$)', rotation=90, labelpad=5)
fmt.setColorbar(cb, font_size=22)
drawFig(fig, 'Temperature_4nm_5e5')
return
def main():
plotTempCmp()
plt.show()
return
if __name__ == '__main__':
main() |
17,785 | 6397a44f5606b5388185f93cf3863e32a22ce571 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides a simple script for running module doctests.
This should work even if the target module is unaware of xdoctest.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
def main():
"""
python -m xdoctest xdoctest all
python -m xdoctest networkx all --options=+IGNORE_WHITESPACE
"""
import argparse
from xdoctest import utils
description = utils.codeblock(
'''
discover and run doctests within a python package
''')
parser = argparse.ArgumentParser(prog='python -m xdoctest', description=description)
parser.add_argument('modname', help='what files to run')
parser.add_argument('command', help='a doctest name or a command (list|all)', default='list')
parser.add_argument(*('--style',), type=str, help='choose your style',
choices=['auto', 'google', 'freeform'], default='auto')
parser.add_argument(*('--options',), type=str,
help='specify the default directive state',
default=None)
parser.add_argument(*('--offset',), dest='offset_linenos', action='store_true',
help=('Doctest outputs will display line numbers '
'wrt to the source file.'))
args, unknown = parser.parse_known_args()
ns = args.__dict__.copy()
# ... postprocess args
modname = ns['modname']
command = ns['command']
style = ns['style']
offset_linenos = ns['offset_linenos']
if ns['options'] is None:
from os.path import exists
ns['options'] = ''
if exists('pytest.ini'):
from six.moves import configparser
parser = configparser.ConfigParser()
parser.read('pytest.ini')
try:
ns['options'] = parser.get('pytest', 'xdoctest_options')
except configparser.NoOptionError:
pass
from xdoctest.directive import parse_directive_optstr
default_runtime_state = {}
for optpart in ns['options'].split(','):
if optpart:
directive = parse_directive_optstr(optpart)
if directive is not None:
default_runtime_state[directive.name] = directive.positive
# Specify a default doctest_example.Config state
config = {
'default_runtime_state': default_runtime_state,
'offset_linenos': offset_linenos,
}
import xdoctest
xdoctest.doctest_module(modname, argv=[command], style=style,
config=config)
if __name__ == '__main__':
main()
|
17,786 | 7d5a781d7dc0f2c5525c056f53403ea85a586a01 | """
This a nonuniform sampler with additive random sampling (ARS) sampling scheme. |br|
The modules samples the given signals nonuniformly. |br|
The sampling aptterns are generated using ARS scheme.
The used ARS patterns generator is described further in
"Generation and Analysis of Constrained Random Sampling Patterns",
available in arXiv: http://arxiv.org/abs/1409.1002
*Examples*:
Please go to the *examples/acquisitions* directory for examples on how to
use the sampler. |br|
*Settings*:
Parameters of the sampler are described below.
Take a look on '__parametersDefine' function for more info on the
parameters.
Parameters of the sampler are attributes of the class which must/can
be set before the sampler is run.
Required parameters:
- a. **mSig** (*Numpy array 2D*): Input signals
- b. **tS** (*float*): time of input signals
- d. **fR** (*float*): input signals' representation sampling frequency
- d. **Tg** (*float*): patterns sampling grid
- e. **fSamp** (*float*): the requested average sampling frequency of the sampling patterns
Optional parameters:
- f. **iSigma** (*float*): variance of Gaussian random process [default = 1]
- g. **bMute** (*int*): mute the console output from the sampler [default = 0]
*Output*:
Description of the sampler output is below.
This is the list of attributes of the sampler class which are available
after calling the 'run' method:
Observed signals:
- a. **mObSig** (*Numpy array 2D*): Observed sampled signals
Sampling patterns:
- b. **mPatts** (*Numpy array 2D*): Sampling patterns (as grid indices)
- c. **mPattsRep** (*Numpy array 2D*): Sampling patterns
(as signal representaion points)
- d. **mPattsT** (*Numpy array 2D*): Sampling patterns
(as time moments)
Observation matrices:
- e. **lPhi** (list) List with observation matrices.
One matrix p. signal.
Additional parameters of sampling patterns:
- f. **nK_g** (*int*): the number of grid points in the sampling pattern
- g. **tTau_real** (*float*): the real time of sampling patterns
- h. **nK_s** (*int*): the expected number of sampling points in a pattern
- i. **f_s** (*float*): the expected average sampling frequency
- j. **nT** (*int*): the expected average sampling period (as grid pts)
- k **tT_s**. (*float*): the expected average sampling period
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
1.0 | 29-JAN-2015 : * Initial version. |br|
1.1 | 9-MAR-2015 : * Observation matrices are grouped in a list, not in a 3D Numpy array |br|
2.0 | 14-AUG-2015 : * Objectified version (2.0) |br|
2.0r1 | 18-AUG-2015 : * Adjusted to RxCSObject v1.0 |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import math
import rxcs
import numpy as np
class nonuniARS(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Acquisition' # Name of group of RxCS modules
self.strModuleName = 'ARS sampler' # Module name
self.__inputSignals() # Define input signals
self.__parametersDefine() # Define the parameters
# Input signals
def __inputSignals(self):
# 1d/2d array with input signals, one signal p. row
self.paramAddMan('mSig', 'Input signals')
self.paramType('mSig', np.ndarray)
self.paramTypeEl('mSig', (int, float))
self.paramNDimLE('mSig', 2)
# Time of input signals
self.paramAddMan('tS', 'Time of input signals', unit='s')
self.paramType('tS', (int, float))
self.paramH('tS', 0)
self.paramL('tS', np.inf)
# Input signals representation sampling frequency
self.paramAddMan('fR', 'Input signals representation sampling frequency', unit='Hz')
self.paramType('fR', (int, float))
self.paramH('fR', 0)
self.paramL('fR', np.inf)
# Define parameters
def __parametersDefine(self):
# Patterns sampling grid
self.paramAddMan('Tg', 'Patterns sampling grid', unit='s')
self.paramType('Tg', (int, float)) # Must be of int or float type
self.paramH('Tg', 0) # Patterns sampling grid must be higher than zero
self.paramL('Tg', np.inf) # ...and lower than infinity
# Requested sampling frequency
self.paramAddMan('fSamp', 'Requested sampling frequency', unit='Hz')
self.paramType('fSamp', (int, float)) # Must be of int or float type
self.paramH('fSamp', 0) # Requested sampling frequency must be higher than zero
self.paramL('fSamp', np.inf) # ...and lower than infinity
# Variance of Gaussian random process used by the pattern generator
self.paramAddOpt('iSigma', 'Variance of Gaussian random process', default=1)
self.paramType('iSigma', (int, float)) # Must be of int or float type
self.paramH('iSigma', 0) # Variance must be higher than zero
self.paramL('iSigma', np.inf) # ...and lower than infinity
# 'Mute the output' flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
# Run
def run(self):
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
# Engine of the function
def __engine(self):
self._computeParam() # Compute parameters of sampling
self._checkConf() # Check configuration of sampling
self._generatePatterns() # Generate the sampling patterns
self._sampleSignals() # Sample the signals
self._generObser() # Generate the observation matrices
return
# Compute parameters
def _computeParam(self):
"""
This function computes parameters of sampling.
Args:
none
Returns:
none
List of variables added by function to the object:
nK_g (float): the number of grid points in the sampling pattern
tTau_real (float): the real time of sampling patterns
nK_s (float): the expected number of sampling points in a pattern
f_s (float): the expected average sampling frequency
nT (float): the expected average sampling period (as grid pts)
tT_s (float): the expected average sampling period
"""
# Calculate the number of grid points in the sampling period
nK_g = math.floor(self.tS / self.Tg)
# Calculate the real time of sampling patterns
tTau_real = nK_g * self.Tg
# Calculate the expected number of sampling points in a pattern
nK_s = int(round(tTau_real * self.fSamp))
# Calculate the expected average sampling frequency
f_s = nK_s / tTau_real
# Calculate the expected average sampling period
tT_s = 1 / f_s
# Calculate the expected average sampling period and recalculate it to
# the grid
nT = int(math.ceil(1 / (f_s * self.Tg)))
self.nK_g = nK_g # the number of grid points in the sampling pattern
self.tTau_real = tTau_real # the real time of sampling patterns
self.nK_s = nK_s # the expected number of sampling points in a pattern
self.f_s = f_s # the expected average sampling frequency
self.nT = nT # the expected average sampling period (as grid pts)
self.tT_s = tT_s # the expected average sampling period
return
def _checkConf(self):
"""
This function checks configuration of sampling
Args:
none
Returns:
none
"""
# -----------------------------------------------------------------
# Check if the number of grid points in patterns is higher than 0
if not self.nK_g > 0:
strError = ('Real number of grid points in patterns must be higher ')
strError = strError + ('than zero')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the real time of patterns is higher than 0
if not self.tTau_real > 0:
strError = ('Real time of patterns must be higher than zero')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the expected number of sampling points is higher than 0
if not self.nK_s > 0:
strError = ('The expected number of sampling points in patterns ')
strError = strError + ('must be higher than zero')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the time of patterns is equal to the time of signals to be
# sampled
if (self.tTau_real - self.tS) > self.tS/1e12:
strError = ('The real time of patterns is different than the time ')
strError = strError + ('of signals to be sampled')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the expected number of sampling points is lower or equal
# to the number of grid points
if not self.nK_g >= self.nK_s:
strError = ('The real number of grid points in patterns must be ')
strError = strError + ('higher or equal to the number of expected ')
strError = strError + ('sampling points')
raise ValueError(strError)
# -----------------------------------------------------------------
# Check if the signal representation sampling frequency is compatible
# with the sampling period
if np.round(self.Tg * self.fR) != (self.Tg * self.fR):
strError = ('The chosen sampling grid period is incompatible with ')
strError = strError + ('the signals representation sampling ')
strError = strError + ('frequency')
raise ValueError(strError)
# -----------------------------------------------------------------
return
# Generate the sampling patterns
def _generatePatterns(self):
"""
This function generates the required number of sampling patterns.
Args:
none
Returns:
none
List of variables added by function to the object:
nSigs (number): the number of input signals
mPatts (matrix): the sampling patterns (grid indices)
mPattsRep (matrix): the sampling patterns (signal rep. sampling points)
mPattsT (matrix): the sampling patterns (time moments)
"""
# Make the matrix with signals 2 dim, if it is 1 dim
if self.mSig.ndim == 1:
self.mSig = self.mSig.copy()
self.mSig.shape = (1, self.mSig.size)
(nSigs, _) = self.mSig.shape # The number of input signals
# Allocate the matrix for all the sampling patterns
mPatts = np.ones((nSigs, self.nK_s), dtype='int64')
iSMaxPatt = 0 # Reset the max size of a pattern
# Generate all the needed sampling patterns
for inxP in np.arange(nSigs):
# Generate a vector with a sampling pattern
vPattern = self._ars_engine(self.nK_s, self.nT, self.nK_g, self.iSigma)
# Adjust the sampling pattern or a matrix with patterns
(iS_v,) = vPattern.shape # Get the size of the generated pattern
iSMaxPatt = max((iS_v, iSMaxPatt)) # Update the max size of a pattern
(iR_m, iC_m) = mPatts.shape # Get the number of rows and columns in the matrix with patterns
# Check the inequalities between matrix with vectors and the current vector with a pattern
if iS_v < iC_m: # <- the size of a generated pattern is lower than the
# number of columns in the storage matrix
iEmpty = iC_m - iS_v # Calculate the size of empty space in the pattern
vPatch = np.nan*np.ones(iEmpty) # Update the pattern (fill up the empty spaces with a patch of -1)
vPattern = np.hstack((vPattern, vPatch)) #
elif iS_v > iC_m: # <- the size of a generated pattern is higher than the number of columns in the storage matrix
iEmpty = iS_v - iC_m # The size of empty space in the storage matrix
mPatch = np.nan*np.ones(iR_m, iEmpty) # Update the storage matrix
mPatts = np.hstack((mPatts, mPatch))
# --------------------------------------------------------------
mPatts[inxP, :] = vPattern # Store the generated pattern
vInx = range(0, iSMaxPatt) # Clip the matrix with patterns
mPatts = mPatts[:, vInx] # ^
# The patterns engine generates patterns in range <1 ; N>, where
# N is the number of possible positions of a sampling points.
# Because Numpy indexes arrays from 0, the patterns should be represented
# in range from <0 ; N-1>
mPatts = mPatts - 1
# --------------------------------------------------------------
# Compute the number of signal representation points which equals
# one grid point
iGridvsRep = int(np.round((self.Tg * self.fR)))
# Recalculate the patterns to the signal representation sampling points
mPattsRep = iGridvsRep * mPatts
# --------------------------------------------------------------
# Recalculate the patterns to the time moments
vTSig = (1 / self.fR) * np.arange(int(np.round(self.tTau_real * self.fR))) # The time vector of the input signal
mPattsRep_ = mPattsRep.copy()
(iTPoints, ) = vTSig.shape # Get the number of points in the time vector
mPattsRep_[np.isnan(mPattsRep)] = iTPoints - 1 # Change nan into pointer to nan
mPattsT = vTSig[mPattsRep]
self.nSigs = nSigs
self.mPatts = mPatts
self.mPattsRep = mPattsRep
self.mPattsT = mPattsT
return
# Sample the signals
def _sampleSignals(self):
"""
This function samples signals using the previously generated
sampling patterns.
Args:
none
Returns:
none
List of variables added by function to the object:
mObSig (matrix): the observed signals
"""
mSig_ = np.hstack((self.mSig, np.nan*np.zeros((self.nSigs,1)))) # Add nan to the end of signals
# Generate matrix with patterns where NaN is changed to index of NaN in mSig_
mPattsRep_ = self.mPattsRep.copy()
(_, nSamps) = self.mSig.shape # Get the number of samples in the signals
mPattsRep_[np.isnan(self.mPattsRep)] = nSamps # Change nan into pointer to nan
self.mObSig = (mSig_[np.arange(self.nSigs), mPattsRep_.T.astype(int)]).T # Generate correct observation signals
return
# Generate the observation matrices
def _generObser(self):
"""
This function generates the observation matrices.
Args:
none
Returns:
none
List of variables added by function to the object:
lPhi (list): list with observation matrices
"""
nSmp = int(round(self.tS * self.fR)) # The number of representation samples in the input signals
lPhi = [] # Start the list with the observation matrices
# Generate the observation matrices
for inxPat in np.arange(self.nSigs): # <- loop over all observation matrices
# Get the current pattern
vPatts = self.mPattsRep[inxPat, :]
vPatts = vPatts[np.invert(np.isnan(vPatts))] # Clean the pattern
nPatSiz = vPatts.size # Size of the pattern
# Generate the observation matrix for the current sampling pattern
mPhi = np.zeros((nPatSiz, nSmp)) # Allocat the observation matrix
inxRow = 0 # Reset index of a row
for inxCol in vPatts: # <- loop over all sampling points in a pattern
mPhi[inxRow, int(inxCol)] = 1
inxRow = inxRow + 1
lPhi.append(mPhi.copy()) # Add the matrix to the list
# -----------------------------------------------------------------
self.lPhi = lPhi
return
# =================================================================
# ARS engine
# =================================================================
def _ars_engine(self, nK_s, nT, K_g, sigma):
# -----------------------------------------------------------------
# Allocate the vector for the sampling points
vPattern = np.nan*np.zeros(2*nK_s)
# Reset the index of a correct sampling time point
hatk = 0
# Reset the current index of the grid
n_hatk = 0
# -----------------------------------------------------------------
# Draw all time points
for unused in range(1, nK_s+1):
# Draw the current time point
x_k = np.random.randn()
nstar_hatk = \
round(n_hatk + nT + np.sqrt(sigma) * x_k * nT)
# Store the time point in the vector,
# if it is somewhere in the sampling time due
# and was not drawn yet
if nstar_hatk > 0 and nstar_hatk <= K_g:
# Store the time point
n_hatk = nstar_hatk
vPattern[hatk] = n_hatk
hatk = hatk + 1
# Check if the whole vector vPattern was
# already filled up. If so, make it longer
nSiz = vPattern.size
if hatk == nSiz:
vExtraSpace = np.nan*np.zeros(nK_s)
vPattern = np.hstack((vPattern, vExtraSpace))
# -----------------------------------------------------------------
# Clear the pattern
vPattern = vPattern[np.equal(np.isnan(vPattern), False)]
# -----------------------------------------------------------------
# Remove repeated moments
vPattern = np.unique(vPattern)
# -----------------------------------------------------------------
return vPattern
|
17,787 | b025364e45cc1aa23bce37c29b41c83dca263eb9 | #!/usr/bin/env python3
import sys
import yaml
STICK_LENGTH = 0.6
NODE_DIAMETER = 0.8
class Node():
def __init__(self, name):
self.name = name
self.parent_idx = None
self.children_nodes = {}
self.num_modes = 0
self.angles = None
def set_num_modes(self, n):
self.num_modes = n
def add_child_node(self, idx, node):
self.children_nodes[idx] = node
def set_parent_idx(self, idx):
self.parent_idx = idx
def compute_angles(self, parent_angle):
angle_increment = 360.0 / self.num_modes
if self.parent_idx is None:
init_angle = -180.0
else:
init_angle = (parent_angle - 180.0)
init_angle += angle_increment * self.parent_idx
self.angles = [
(init_angle - angle_increment * i) % 360.0
for i in range(self.num_modes)
]
def tikz_nodes(self, location):
tikz_id = self.name
s = f"\\node[tensor node] ({tikz_id:s}) at ({location:s}) {{{self.name:s}}};\n"
for i, a in enumerate(self.angles):
if i in self.children_nodes:
length = STICK_LENGTH + NODE_DIAMETER
else:
length = STICK_LENGTH + NODE_DIAMETER / 2.0
if i != self.parent_idx:
s += f"\\coordinate ({tikz_id:s}-m{i + 1:d}) "
s += f"at ([shift=({a:g}:{length:g} cm)] {tikz_id:s});\n"
return s
def tikz_connections(self):
tikz_id = self.name
s = ""
for i in range(self.num_modes):
if i in self.children_nodes:
child = self.children_nodes[i]
s += f"\\draw[virtual idx] ({tikz_id:s}) -- ({child:s})\n"
s += f" node[mode num, pos = 0.1] {{{i + 1:d}}}\n"
s += " node[mode num, pos = 0.9] {};\n"
elif i != self.parent_idx:
s += f"\\draw[physical idx] "
s += f"({tikz_id:s}) -- ({tikz_id:s}-m{i + 1:d})\n"
s += f" node[mode num, pos = 0.1] {{{i + 1:d}}};\n"
return s
def collect_nodes(d, nodes):
if isinstance(d, dict):
# We're dealing with a node here, so the d.keys() is relevant
assert(len(d) == 1)
node_name = list(d.keys())[0]
nodes.append(Node(node_name))
for i, c in enumerate(d[node_name]):
if isinstance(c, dict):
nodes[-1].add_child_node(i, list(c.keys())[0])
elif c == "^":
nodes[-1].set_parent_idx(i)
nodes[-1].set_num_modes(len(d[node_name]))
for v in d.values():
collect_nodes(v, nodes)
elif isinstance(d, list):
# We're dealing with a list here, so these are indices
for e in d:
collect_nodes(e, nodes)
def find_parent(nodes, node_name):
for n in nodes:
if node_name in n.children_nodes.values():
idx = next(
k for k, v in n.children_nodes.items()
if v == node_name
)
return n.name, idx
return None, None
if __name__ == "__main__":
filename = sys.argv[1]
with open(filename, "r", encoding="utf8") as f:
data = yaml.safe_load(f)
nodes = []
collect_nodes(data, nodes)
nodes_dict = {n.name: n for n in nodes}
while any(n.angles is None for n in nodes):
for n in nodes:
parent_name, parent_side_idx = find_parent(nodes, n.name)
if parent_name is None:
# This is a root node
n.compute_angles(0.0)
else:
if not nodes_dict[parent_name].angles is None:
parent_side_angle = \
nodes_dict[parent_name].angles[parent_side_idx]
n.compute_angles(parent_side_angle)
print("\\begin{tikzpicture}")
for v in nodes_dict.values():
parent_name, parent_side_idx = find_parent(nodes, v.name)
if parent_name is None:
print(v.tikz_nodes("0, 0"), "\n")
else:
parent_coord = f"{parent_name:s}-m{parent_side_idx + 1:d}"
print(v.tikz_nodes(parent_coord), "\n")
for v in nodes_dict.values():
print(v.tikz_connections(), "\n")
print("\\end{tikzpicture}")
|
17,788 | 39999650797d67a62e8a4042491bf77802a5c085 | import json
import os
from gcp_census.model.table import Table
from gcp_census.model.view import View
class FilesystemModelProvider(object):
def __init__(self, model_directory):
self.model_directory = model_directory
def list_tables(self):
for table in self.__list_files('.json'):
with open(table[2]) as json_file:
json_dict = json.load(json_file)
yield Table(table[0], table[1], json_dict)
def list_views(self):
for view in self.__list_files('.sql'):
with open(view[2]) as view_file:
content = view_file.readlines()
yield View(view[0], view[1], content)
def list_groups(self):
for group_dir in os.listdir(self.model_directory):
subdirectory = os.path.join(self.model_directory, group_dir)
if os.path.isdir(subdirectory):
yield group_dir
def __list_files(self, extension):
for group_dir in os.listdir(self.model_directory):
subdirectory = os.path.join(self.model_directory, group_dir)
if os.path.isdir(subdirectory):
for model_file in os.listdir(subdirectory):
if model_file.endswith(extension):
model_name = os.path.splitext(model_file)[0]
filename = os.path.join(self.model_directory, group_dir,
model_file)
yield group_dir, model_name, filename
|
17,789 | 9216705625792d7f103bfd186bd16b93826ce567 | import falcon
import requests
import json
import pickle
from dbwrapper import DB
import util
def validate_batch_get_params (req, resp, resource, params):
for i in ('org_id','page','page_size'):
if i not in req.params:
raise falcon.HTTPBadRequest('Bad request', 'Missing param <%s>' % i)
for i in ('page','page_size'):
if not util.is_positive_int (req.params[i]):
raise falcon.HTTPBadRequest('Bad request', '<%s> must be positive int' % i)
if int(req.params['page_size']) > 1024:
raise falcon.HTTPBadRequest('Bad request', 'page_size %s too big(max 200)' % req.params['page_size'])
if 'sort_key' not in req.params:
req.params['sort_key'] = 'id' #default sort by id
if 'order' not in req.params:
req.params['order'] = 'ASC' #default sort by id
def validate_api_key (req, resp, resource, params):
if 'key' not in req.params or req.params['key'] != 'Oakridge' :
raise falcon.HTTPBadRequest('Bad request', 'Authentication fail')
# handle batch admin ops
@falcon.before (validate_api_key)
class Admins (object):
@falcon.before (validate_batch_get_params)
def on_get (self, req, resp):
try:
with DB('127.0.0.1','root','oakridge','authc') as db:
# recursively find all user_id given a site's org_id
# https://stackoverflow.com/questions/20215744/how-to-create-a-mysql-hierarchical-recursive-query
sql = """
SELECT * FROM user_info WHERE id in (
SELECT user_id FROM user_organization_permission WHERE org_id in (
SELECT id FROM (
SELECT id, parent_id FROM organization_info ORDER BY id DESC) dummy1, (SELECT @cid := %s) dummy2
WHERE find_in_set(id, @cid) and length(@cid := concat(@cid, ',', parent_id)
)
)
) ORDER BY %s %s LIMIT %s,%s
""" % (req.params['org_id'], req.params['sort_key'], req.params['order'], int(req.params['page']) * int(req.params['page_size']), req.params['page_size'])
dict_array = db.query_and_fetchall_json (sql, exclude_column=('password', 'wechat_openid', 'user_label'))
if not dict_array:
error = (2,'no record found')
else:
error = (0,'')
result= {
'error': error,
'data': dict_array
}
resp.status = falcon.HTTP_200
except Exception as e:
result= { 'error': (100, str(e)) }
resp.status = falcon.HTTP_500
resp.body=json.dumps(result)
# handle ops on one admin
@falcon.before (validate_api_key)
class OneAdmin (object):
def on_get (self, req, resp, uid):
try:
with DB('127.0.0.1','root','oakridge','authc') as db:
sql = """SELECT * FROM user_info WHERE id = %s""" % uid
dict_array = db.query_and_fetchall_json(sql, exclude_column=('password', 'wechat_openid', 'user_label'))
if not dict_array:
error = (2,'no uid %s found' % uid)
else:
error = (0,'')
result= {
'error': error,
'data': dict_array
}
resp.status = falcon.HTTP_200
except Exception as e:
result= { 'error': (101, str(e)) }
resp.status = falcon.HTTP_500
resp.body=json.dumps(result)
|
17,790 | ad146ce47e102cbc91ba90045a4f52fae855c6de | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import viewsets
from cars.serializers import CarSerializer
from cars.models import Car
class CarViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Car.objects.all()
serializer_class = CarSerializer |
17,791 | 27eb75dc262396c71384ef24423914b1053d0621 | import os
from os.path import isfile,join
from bs4 import BeautifulSoup
import re
list=os.listdir("htmls")
symbols=['"',"'",'@','#','$','~','`',':',';','^','*','_','?','%','(',')','{','}','[',']','+','=','<','>','|','\\','/']
def corpusGen():
for file in list:
if isfile (join("htmls",file)):
with open('htmls/'+file, 'r') as content_file:
content = content_file.read()
content=content.decode('utf-8')
soup= BeautifulSoup(content,"lxml")
for script in soup.findAll('script',src=False):
script.decompose()
text=soup.get_text()
text=retainCommaDot(text)
text=removeUrls(text)
newText=''
for char in text:
if ord(char) > 128 or char in symbols or ord(char) == 11 or ord(char)==9 or ord(char)==13:
char=''
elif ord(char) >= 65 and ord(char)<=91:
char=chr(ord(char)+32)
newText+=char
newFileName=file.replace("html","txt")
newFileName=newFileName.replace("_",'')
writeFile=open('htmls/corpus/'+newFileName,"w")
writeFile.write(newText)
writeFile.flush()
def removeUrls(content):
reUrl='http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
urls=re.findall(reUrl,content)
for url in urls:
#print url
content=content.replace(url,'')
return content
def retainCommaDot(content):
punctList=[',','.']
digits=['0','1','3','4','5','6','7','8','9']
counter=0
newStr=''
for char in content:
if char in punctList and ( content[counter-1] not in digits or content[counter+1] not in digits) and counter != (len(content)-1):
char=''
newStr=newStr+char
counter+=1
return newStr
corpusGen()
|
17,792 | dad995f5d029d61136f0a7570fd361e4af34899f | from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
class Category(models.Model):
name = models.CharField(max_length=50, help_text=_('Category name'))
slug = models.SlugField(verbose_name=_('URL'), help_text=_('URL'))
is_active = models.BooleanField(default=True)
class Meta:
verbose_name_plural = _('Categories')
def get_absolute_url(self):
return reverse('blog_category_detail', {'slug': self.slug})
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=300, help_text=_('Title of the Entry'), db_index=True)
body = models.TextField(blank=False, verbose_name=_('Content'))
slug = models.SlugField(unique=True, verbose_name=_('URL'), help_text=_('URL'))
user = models.ForeignKey('auth.User')
category = models.ForeignKey('Category', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_published = models.BooleanField(default=True)
views = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name_plural = _('Posts')
def __str__(self):
return self.title |
17,793 | f46ac199856ea7508be83125a522f1f6a5e501b0 | ### Jay Authement ###
### Version: [1.0.0] ###
### Original Pi Weather Station ###
import time
import math
import grovepi
import json
# LED digital port declarations #
GrLED = 2
BlLED = 3
RdLED = 4
# Temp/Humidity sensor port and output declaration #
tempSensor = 5
blue = 0
# Light sensor analog port declaration #
lightSensor = 0
# Light sensor threshold #
threshold = 100
# Sets the ports delcared above to outputs #
grovepi.pinMode(GrLED, "OUTPUT")
grovepi.pinMode(BlLED, "OUTPUT")
grovepi.pinMode(RdLED, "OUTPUT")
# Sets the lightSensor port to an input #
grovepi.pinMode(lightSensor, "INPUT")
# Data list declaration #
data = []
while True:
try:
# Gather information every 30min / 1800s #
time.sleep(1800)
# Sets the variable to hold the sensor value #
sensor_value = grovepi.analogRead(lightSensor)
# Formula used to calculate the resistance of the sensor in K
resistance = (float)(1023 - sensor_value)*10 / sensor_value
# Runs the code if it's daytime #
if sensor_value > threshold:
# Reads temp and humidity from sensor in port 5 #
[temp, humidity] = grovepi.dht(tempSensor, blue)
# Formula to convert celcius to farenheit #
farenheit = (temp * 9/5) + 32
# Only prints and runs following code if the sensor reading is not nan #
if math.isnan(temp) == False and math.isnan(humidity) == False:
print("Current temperature is: %.02fF and the current humidity is %.02f%%" % (farenheit, humidity) + ".")
# Appending collected data and json creation for the temperature and humidity data collected #
data.append([farenheit, humidity])
with open('sensorData.json', 'w') as outfile:
json.dump(data, outfile)
# Weather conditions that will trigger the LED's #
if farenheit > 60 and farenheit < 85 and humidity < 80:
grovepi.digitalWrite(GrLED, 1)
grovepi.digitalWrite(BlLED, 0)
grovepi.digitalWrite(RdLED, 0)
if farenheit > 85 and farenheit < 95 and humidity < 80:
grovepi.digitalWrite(GrLED, 0)
grovepi.digitalWrite(BlLED, 1)
grovepi.digitalWrite(RdLED, 0)
if farenheit > 95:
grovepi.digitalWrite(GrLED, 0)
grovepi.digitalWrite(BlLED, 0)
grovepi.digitalWrite(RdLED, 1)
if humidity > 80:
grovepi.digitalWrite(GrLED, 1)
grovepi.digitalWrite(BlLED, 1)
grovepi.digitalWrite(RdLED, 0)
# This runs if the light sensor does not detect daylight #
else:
print("Night time. Checking again in 30 minutes.")
grovepi.digitalWrite(GrLED, 0)
grovepi.digitalWrite(BlLED, 0)
grovepi.digitalWrite(RdLED, 0)
except IOError:
print("Critical ERROR! Critical ERROR! ABORT! ABORT!")
|
17,794 | 0a01a95984005150817600abf3e2270e5c4a0df3 | # Monary - Copyright 2011-2014 David J. C. Beach
# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.
from .monary import Monary
from .datehelper import *
version = "0.3.0"
__version__ = version
|
17,795 | 753691228da1fd079171f097e661ad4c36347f25 | '''
This is a sorting technique that involves building a binary heap from a given
array and then using the heap to sort the array.
A binary heap is a complete binary tree where the parent node is either
greater or smaller that its two child nodes.
'''
def make_heap(list_of_values, n, index):
largest = index
left_child_index = 2 * index + 1
right_child_index = 2 * index + 2
if (left_child_index < n and list_of_values[index]
< list_of_values[left_child_index]):
largest = left_child_index
if (right_child_index < n and list_of_values[largest]
< list_of_values[right_child_index]):
largest = right_child_index
if largest != index:
list_of_values[index], list_of_values[largest] = (
list_of_values[largest], list_of_values[index])
make_heap(list_of_values, n, largest)
def heap_sort(list_of_values):
n = len(list_of_values)
for index in range(n, -1, -1):
make_heap(list_of_values, n, index)
for index in range(n-1, 0, -1):
list_of_values[index], list_of_values[0] = (
list_of_values[0], list_of_values[index])
make_heap(list_of_values, index, 0)
list_of_values = [400, 1, 29, 90, 12, 4, 78, 100, 90]
heap_sort(list_of_values)
print('Sorted array is %s' % list_of_values)
|
17,796 | 212cba5c6150c67258386bc53d4828b0aca77805 | # encoding: utf-8
"""
This is the script that can be run on a hourly basis in order to catch all the news items from various sources.
"""
import pytz
import os
from datetime import datetime
from dateutil.parser import parse as duparse
from elasticsearch import Elasticsearch
import sys
if '__file__' in vars():
project_path = os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir, os.path.pardir))
print('\n Adding path: ', project_path)
sys.path.append(project_path)
# Own code
from RSS import news_rss as rss
es = Elasticsearch()
rss_obj = rss.RssUrl()
res_text = rss_obj.get_all_content()
news_source_list = rss_obj.news_source_list
update_file = 'last_update.txt'
update_path = '/var/log/domotica'
update_file_path = os.path.join(update_path, update_file)
if os.path.isfile(update_file_path):
with open(update_file_path, 'r') as f:
last_update = f.read()
else:
# Just make some fake earlier date..
last_update = '1999-12-31 01:01:01.01+02:00'
# And create the file..
with open(update_file_path, 'w') as f:
f.write(last_update)
# With this we parse the date that was found in the file
last_update = duparse(last_update)
# Here we loop over all the news sources and add all the items to ElasticSearch
# But only if their publication date is after our latest update
for news_source in news_source_list:
# Keep track of items per news source
new_items = 0
old_items = 0
news_content = res_text[news_source]
i_content = news_content[0]
for i_content in news_content:
content_date = duparse(i_content['date'])
if content_date > last_update:
# A check to see if our date comparisson is correct
n_space = ' '*(len(str(content_date))-len('Content') + 1)
print('Content' + n_space + 'Last update')
print(content_date, last_update)
new_items += 1
es.index(index='dutch_news', body=i_content, doc_type='_doc')
else:
old_items += 1
# Overview of the implemented and seen items
print('News soure: ', news_source)
print('Old {0} items'.format(old_items))
print('New {0} items'.format(new_items))
print('---------------\n')
timezone_cor = pytz.timezone('Europe/Amsterdam')
current_time = str(datetime.now(timezone_cor))
with open(update_file_path, 'w') as f:
f.write(current_time)
|
17,797 | e43ff1873c270d9bb0e013fbbf2bf12da5977aa5 | #! /usr/bin/env python2.7
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
import unittest
import os
import logging
import tempfile
from yotta.lib import component
from yotta.lib.fsutils import rmRf
test_json = '''{
"name": "something",
"version": "0.0.7",
"description": "some description.",
"private": false,
"homepage": "https://github.com/somewhere/something",
"bugs": {
"url": "about:blank",
"email": "project@hostname.com"
},
"author": "James Crosby <James.Crosby@arm.com>",
"licenses": [
{
"type": "Copyright (C) 2013 ARM Limited, all rights reserved.",
"url": "about:blank"
}
],
"dependencies": {
"toolchain": "ARM-RD/toolchain",
"libc": "ARM-RD/libc",
"libobjc2": "ARM-RD/libobjc2 @>0.0.7",
"yottos-platform": "ARM-RD/yottos-platform @0.0.3",
"emlib": "ARM-RD/emlib",
"nsobject": "ARM-RD/nsobject",
"nslog": "ARM-RD/nslog",
"nsassert": "ARM-RD/nsassert",
"thisdoesnotexist": "ARM-RD/thisdoesnotexist"
},
"testDependencies": {
"atestdep": "~0.2.3"
},
"targetDependencies": {
"sometarget": {
"atargetdep": "~1.3.4"
}
},
"testTargetDependencies": {
"sometarget": {
"anothertargetdep": "~1.3.4"
},
"someothertarget": {
"adifferenttargetdep": "~1.3.4"
}
}
}
'''
deps_in_order = [
'toolchain', 'libc', 'libobjc2', 'yottos-platform', 'emlib',
'nsobject', 'nslog', 'nsassert', 'thisdoesnotexist'
]
test_deps_in_order = deps_in_order + ['atestdep']
logging.basicConfig(
level=logging.ERROR
)
class ComponentTestCase(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
rmRf(self.test_dir)
def test_creation(self):
# test things about components that don't (and shouldn't) require
# hitting the network
with open(os.path.join(self.test_dir, 'module.json'), 'w') as f:
f.write(test_json)
c = component.Component(self.test_dir)
self.assertTrue(c)
self.assertEqual(c.getName(), 'something')
self.assertEqual(str(c.getVersion()), '0.0.7')
deps = c.getDependencies()
self.assertEqual(list(deps.keys()), deps_in_order)
test_deps = c.getDependencies(test=True)
self.assertEqual(list(test_deps.keys()), test_deps_in_order)
if __name__ == '__main__':
unittest.main()
|
17,798 | 4c8a1c738480c928ee0127aada88ff28cc2cc8f9 | import os
DEFAULT_ASSETS_FOLDER = os.path.join(os.getcwd(), "asset_files")
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 5000
class ConfigSections:
KEEPER_CONTRACTS = 'keeper-contracts'
RESOURCES = 'resources'
class BaseURLs:
BASE_PROVIDER_URL = '/api/v1/provider'
SWAGGER_URL = '/api/v1/docs' # URL for exposing Swagger UI (without trailing '/')
API_URL = 'http://localhost:5000/spec'
ASSETS_URL = BASE_PROVIDER_URL + '/assets'
|
17,799 | 01c1ac4ea09abd66bbdef0f80fba3b9823829a1f | import logging
from django.conf import settings
from slack.services import send_slack_message
from questions.models import Submission
logger = logging.getLogger(__name__)
def send_submission_slack(submission_pk: str):
submission = Submission.objects.get(pk=submission_pk)
text = get_text(submission)
logging.info("Notifying Slack of Submission<%s>", submission_pk)
send_slack_message(settings.SLACK_MESSAGE.CLIENT_INTAKE, text)
# Mark request as sent
Submission.objects.filter(pk=submission.pk).update(is_alert_sent=True)
def get_text(submission: Submission):
return (
f"A client has just submitted their {submission.topic} questionnaire answers for review.\n"
f"Their submission id is {submission.pk}.\n"
"Check your email at webmaster@anikalegal.com"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.