index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,400 | e4684923aef4e6953771a7fd1eb8b1759b40f82a | import os #To find out what files we are using
import numpy as np
import cv2
filename = 'video.mp4'
frames_per_seconds = 30;
my_res = '720p'
def change_res(cap,width, height):
cap.set(3, width)
cap.set(4,height)
STD_DIMENSIONS = {
"480p": (680, 480),
"720p": (1280, 720),
"1080p":(1920, 1080),
"4k" : (4120, 2160),
}
def get_dims(cap, res='1080p'):
width,height = STD_DIMENSIONS['480p']
if res in STD_DIMENSIONS:
width, height = STD_DIMENSIONS[res]
change_res(cap, width, height)
return width, height
VIDEO_TYPE = {
'avi' : cv2.VideoWriter_fourcc(*'MPEG'),
'mp4' : cv2.VideoWriter_fourcc(*'MPEG'),
}
def get_video_type(filename):
filename, ext = os.path.splitext(filename)
if ext in VIDEO_TYPE:
return VIDEO_TYPE[ext]
return VIDEO_TYPE['avi']
cap = cv2.VideoCapture(0)
dims = get_dims(cap, res=my_res)
video_type_cv2 = get_video_type(filename)
out = cv2.VideoWriter(filename, video_type_cv2, frames_per_seconds, dims) #width, height
while True:
#Provides frames
ret, frame = cap.read()
out.write(frame)
cv2.imshow('Frame1', frame) #Displays the feed
if cv2.waitKey(2) & 0xFF == ord('q'):
break
#Release the feed
cap.release()
out.release()
cv2.destroyAllWindows()
|
985,401 | cd32651c6cecdc419533e377e44ad4a2aa4ec8cd | from .response_related_utils import *
from .response_related_utils import __all__ as response_all
from .vector_utils import *
from .vector_utils import __all__ as vector_all
__all__ = response_all + vector_all
|
985,402 | 0311608764e16d1d17c41d17f669eb2e45f7ddab | nested_list = [list(range(1,4)), list(range(4,7)), list(range(8,10))]
print(f'{nested_list =}')
print(f'{[[single_list for single_list in nested_list] for single_list in nested_list] =}')
print(f'{[["X" if num % 2 == 0 else "O" for num in range(1,4)] for num in range(1,4)] =}') |
985,403 | c7cdb24f635479bf0c24490965866a21520bb061 | # Program to determine square root of any given number
# Programmer: Mukul Dharwadkar
# Date: Apr 15 2020
# Python version 3
def get_number():
response = None
while response is None:
response = float(input("Please choose a number to find a square root of: "))
return response
def create_guess(inp):
guess = inp / 2
return guess
def check_square(num, orig_num):
square = num * num
while square - orig_num > 0.001 or square - orig_num < 0:
num = find_new_guess(num, orig_num)
square = num * num
print(f"The square root of {orig_num} is {num:.2f}")
def find_new_guess(old_guess, orig_num):
new_guess = (old_guess + (number / old_guess))/2
return new_guess
number = get_number()
check_square(create_guess(number), number) |
985,404 | 8f92411cc26a028037a833757c486e5a78c17108 | from flask import Flask, render_template, request, url_for
from utils import get_prediction, preprocess
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
try:
if request.method == 'POST':
news = request.form['news']
class_names = ['True', 'Fake']
alert_class_names = ['success', 'danger']
if news == '':
return render_template('index.html')
elif len(str(news).split()) <= 4:
prediction_class = class_names[1]
alert_class = alert_class_names[1]
return render_template('index.html', news=news, prediction=prediction_class, alert_class=alert_class)
else:
embedded_docs = preprocess(news)
prediction = get_prediction(embedded_docs)
prediction_class = class_names[prediction]
alert_class = alert_class_names[prediction]
return render_template('index.html', news=news, prediction=prediction_class, alert_class=alert_class)
else:
return render_template('index.html')
except Exception as e:
print(e)
if __name__ == '__main__':
app.run(debug=True)
|
985,405 | ae9752bade86a65f347b33ffbb5c871c68159652 | import math
#inputFile = open("test.txt", "r")
inputFile = open("A-small-attempt1.in","r")
output = open("Round1AAsmall.txt","w")
cases = int(inputFile.readline())
for case in range(1,cases+1):
r, p = map(int, inputFile.readline().strip().split(" "))
m = (r + 1) / 2.0
n = math.floor((0.25 * (math.sqrt(16 * (m ** 2) - 24 * m + 8 * p + 9) - 1))*2) / 2.0
#print n - m + 1
output.write("Case #%d: %d" % (case, n - m + 1))
if case != cases:
output.write("\n")
inputFile.close()
output.close() |
985,406 | 5a100b392f50b9904204d08bbfe5af4c23b2af58 | '''
通过sqlite引擎,读取数据库数据
'''
import jieba
import pandas
import wordcloud
import json
from sqlalchemy import create_engine
path = '/Users/lawyzheng/Desktop/Code/'
# 连接数据库
engine = create_engine('sqlite:///' + path + 'spider.db')
df = pandas.read_sql('tb_toutiao_hot', con=engine, index_col='index')
# 获取文章abstract数据
abstract_list = df.abstract.to_list()
abstract_list = list(map(jieba.cut, abstract_list))
abstract_list = list(map(list, abstract_list))
abstract = " ".join([' '.join(l) for l in abstract_list])
# abstract = "\n".join(abstract_list)
# 获取文章的tags数据
tags_list = df.article_tags.to_list()
tags = ' '.join(tags_list)
# 清洗数据
with open('/Users/lawyzheng/Desktop/Code/Public_Code/stopwords.txt', 'r') as f:
stopwords = f.read()
stopwords = set(stopwords.split('\n'))
wc = wordcloud.WordCloud(width=800, height=600, font_path='msyh.ttf', stopwords=stopwords)
wc.generate(abstract + tags)
image = wc.to_image()
image.show()
|
985,407 | 5ff249f8520a70dc07130e36a8020193404072eb | biggest=0
for i in range(999):
for j in range(999):
product = i*j
mystring = str(product)
if mystring==mystring[::-1]:
if product > biggest:
biggest = product
print biggest
|
985,408 | 2d82ddb4a635293e80d349c2b38b27951e04e590 | # https://www.acmicpc.net/problem/2851 문제 제목 : 슈퍼 마리오 , 언어 : Python, 날짜 : 2019-08-14, 결과 : 성공
import sys
total_num = 0
last_num = 0
mode = 0
for _ in range(10):
total_num += int(sys.stdin.readline())
if mode == 0 and abs(total_num-100) > abs(last_num-100):
mode=1
if mode==0:
last_num = total_num
print(last_num)
|
985,409 | 426f221847b24fa4b63984ee95b39e0f9efaf4fb | # Code adapted from Tensorflow Object Detection Framework
# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
# Tensorflow Object Detection Detector
import numpy as np
import tensorflow as tf
import cv2
import time
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end_time = time.time()
# print("Elapsed Time:", end_time-start_time)
im_height, im_width,_ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0,i,0] * im_height),
int(boxes[0,i,1]*im_width),
int(boxes[0,i,2] * im_height),
int(boxes[0,i,3]*im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
if __name__ == "__main__":
model_path = 'packages/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb'
odapi = DetectorAPI(path_to_ckpt=model_path)
# Set your standard for human detection here (lower means less strict, higher is more strict)
threshold = 0.7
cap = cv2.VideoCapture('data/4-25-19_crop2.mp4')
peds = 0
jays = 0
frame = 0
start_time = time.time()
print("Start time: " + str(start_time))
while True:
r, img = cap.read()
# If you want to shrink the video
shrinker = 1
# Width and height
frame_w = int(cap.get(3)*shrinker)
frame_h = int(cap.get(4)*shrinker)
# Resize the video
img = cv2.resize(img, (frame_w, frame_h))
# Rectangle 1 multipliers
x1 = .01
x2 = .9
y1 = .42
y2 = .55
# Rectangle 1 (drawn from x1, x2, ...)
pts1 = np.array([[int(frame_w * x1), int(frame_h * y1)], \
[int(frame_w * x1), int(frame_h * y2)], \
[int(frame_w * x2), int(frame_h * y2)], \
[int(frame_w * x2), int(frame_h * y1)]], np.int32)
# Reshape and draw
pts1 = pts1.reshape((-1,1,2))
cv2.polylines(img, [pts1], True, (0,255,255), 4)
# Multipliers to find crosswalk signal
csx1 = .688
csx2 = .72
csy1 = .03
csy2 = .065
# Crosswalk signal
ul = [int(frame_w * csx1), int(frame_h * csy1)]
bl = [int(frame_w * csx1), int(frame_h * csy2)]
br = [int(frame_w * csx2), int(frame_h * csy2)]
ur = [int(frame_w * csx2), int(frame_h * csy1)]
lt_pts = np.array([ul, bl, br, ur])
# Reshape and draw
lt_pts = lt_pts.reshape((-1,1,2))
cv2.polylines(img, [lt_pts], True, (255,0,255), 2)
# Read crosswalk signal
signal = img[ul[1]:bl[1], ul[0]:br[0]]
# Color detection
boundaries = [
([100, 100, 100], [255, 255, 255])
]
# Loop over the boundaries
for (lower, upper) in boundaries:
# Create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# Mask on the signal and get a point count
signal_mask = cv2.inRange(signal, lower, upper)
point_count = np.count_nonzero(signal_mask)
boxes, scores, classes, num = odapi.processFrame(img)
# Visualization of the results of a detection.
for i in range(len(boxes)):
# Class 1 represents human
if classes[i] == 1 and scores[i] > threshold:
box = boxes[i]
cv2.rectangle(img,(box[1],box[0]),(box[3],box[2]),(255,0,0),2)
# This gets the centroid of that rectangle
cx = int((box[1]+box[3])/2)
cy = int((box[0]+box[2])/2)
cv2.circle(img,(cx,cy), 5, (0,0,255), -1)
# Check if person's bounding box falls in jaywalk check zone 1
if ((cx >= int(frame_w * x1)) & (cx <= int(frame_w * x2)) & (cy >= \
int(frame_h * y1)) & (cy <= int(frame_h * y2))): # | \
# If the crosswalk signal says go:
if point_count > 25:
peds += 1
else:
jays += 1
cv2.imshow("cnn_preview", img)
# Print counts
print("Peds: " + str(int(peds)))
print("Jays: " + str(int(jays)))
frame += 1
print("Time elapsed in video: {} minutes".format(frame/120))
process_time = time.time()
print("Processing time thus far: {} minutes".format(str(round(\
(process_time-start_time)/60, 3))))
print("")
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
# Measure elapsed time for script
end_time = time.time()
print("Elapsed Time: " + str(end_time-start_time))
results = {'peds': peds, 'jays': jays}
with open('tensor_results.csv', 'w') as f:
w = csv.DictWriter(f, results.keys())
w.writeheader()
w.writerow(results)
|
985,410 | ca186385bd8da0661118def7d68ad478860b28eb | '''
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Contiguous subarray (containing at least one number)
For ex: [-2, 1], [-3,4,-1]
'''
# https://dev.to/13point5/maximum-subarray-3c5l
'''
Brute Force Method
def maxSubArray(nums):
# case SubArray Length = 1
ans = max(nums)
# case SubArray Length > 1
for subLength in range(2, len(nums)+1):
# print(subLength)
ans = max(ans, splitSubArray(ans, nums, subLength))
return ans
def splitSubArray(ans, nums, subLength):
# [1,2,3,4]
# [1,2], [2,3], [3,4]
for i in range(0, len(nums)-1):
# print(sum(nums[i: i + subLength]))
ans = max(ans, sum(nums[i: i + subLength]))
# print(ans)
return ans
'''
# https://www.youtube.com/watch?v=jnoVtCKECmQ&t=352s
# O(n) Solution:
def maxSubArray(nums):
# When to compare a Maximum, let initial maximum value as -inf
max_sum = -float('inf')
current_sum = 0
for i in range(0, len(nums)):
current_sum += nums[i]
if max_sum < current_sum:
max_sum = current_sum
if current_sum < 0:
# Start over the new sum at the next index
# as previous indexes contains negative numbers, which not help for Max Sum
current_sum = 0
return max_sum
def main():
print(maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
#print(maxSubArray([-2, -1]))
# print(maxSubArray([1]))
if __name__ == "__main__":
main()
|
985,411 | 10a03547d26d9e1161d1728347a8a13b4aa2eb6d | class Solution:
def maximum69Number(self, num: int) -> int:
s = str(num)
if "6" not in s:
return num
else:
i = s.index("6")
return num + 3 * (10 ** (len(s) - i - 1))
if __name__ == "__main__":
print(Solution().maximum69Number(9669)) # 9969
print(Solution().maximum69Number(9996)) # 9999
print(Solution().maximum69Number(9999)) # 9999
|
985,412 | e23082acd24f78253cb82995ec563c764655c137 | # -*- coding: utf-8 -*-
"""
The :class:`Control` object is the central part of pyMolDyn.
It is responsible for initiating the calculation process and visualizing
the results.
It contains instances of :class:`core.calculation.calculation.Calculation` and
:class:`visualization.visualization.Visualization` and manages the
interaction between them.
The calculation can be started in an different thread. To do that,
the ``calculationcallback`` attribute can be set
to a custom function. This custom function has to take two parameters:
``func`` and ``settings``, and has to call ``func`` with the parameter
``settings``. Here is an easy example::
def callback(func, settings):
func(settings)
control.calculationcallback = callback
And with threads::
class CalculationThread(QtCore.QThread):
def __init__(self, parent, func, settings):
QtCore.QThread.__init__(self, parent)
self.func = func
self.settings = settings
def run(self):
self.func(self.settings)
class GUI:
def __init__(self, ...):
...
self.control.calculationcallback = self.calculationcallback
def calculationcallback(self, func, settings):
thread = CalculationThread(self, func, settings)
thread.finished.connect(self.control.update)
thread.start()
"""
import os.path
import threading
from config.configuration import config
import core.calculation as calculation
import visualization
# TODO: make an instance of this available everywhere
class Control(object):
"""
The central controller class that contains the application logic.
It contains the following attributes:
`calculation` : :class:`core.calculation.calculation.Calculation`
`visualization` : :class:`visualization.visualization.Visualization`
"""
def __init__(self):
self.config = config
if not os.path.isdir(os.path.expanduser(config.Path.cache_dir)):
os.makedirs(os.path.expanduser(config.Path.cache_dir))
self._calculation = calculation.Calculation()
self._visualization = None # will be initialized when needed
self.results = None
self.calculationcallback = Control.defaultcallback
self.lock = threading.Lock()
def _calculate(self, settings):
with self.lock:
self.results = None
self.results = self.calculation.calculate(settings)
def calculate(self, settings):
"""
Start a calculation and create prepare the results to be visualized.
To make it possible to start the calculation in a thread, the
function ``self.calculation`` is called, which then starts the
calculation itself.
**Parameters:**
`settings` :
:class:`core.calculation.calculation.CalculationSettings` object
"""
# TODO: only call when something is calculated
self.calculationcallback(self._calculate, settings.copy())
def update(self, was_successful=lambda : True):
"""
Visualize previously calculated results. It has to be called from
the same thread which uses the OpenGL context, usually the
event handler of the GUI.
"""
with self.lock:
if was_successful and self.results is not None:
self.visualization.setresults(self.results[-1][-1])
def visualize(self, filename, frame, resolution=None):
"""
Visualize the given frame. if the ``resolution`` parameter is not set,
this function uses the highest resolution available to show
calculation results.
"""
results = self.calculation.getresults(filename, frame, resolution)
self.results = [[results]]
self.update()
@staticmethod
def defaultcallback(func, settings):
func(settings)
@property
def calculation(self):
return self._calculation
@property
def visualization(self):
if self._visualization is None:
self._visualization = visualization.Visualization()
return self._visualization
|
985,413 | e938e8642dec45bbfb3fbbde2a6951a368b66c29 | import pygame
from math import *
from random import *
import numpy as np
import sys
from time import time
from ctypes import *
gravity = cdll.LoadLibrary("fGravity.so")
calc = gravity.calcAndUpdate
calc.argtypes=[POINTER(c_float),c_int]
calc.restype=POINTER(c_float)
SCREEN_WIDTH, SCREEN_HEIGHT = 600,600
BG_COLOR = 150, 150, 80
pygame.init()
fg = 0.1 #fg = 0.1 for nice clumping. At 1, clumps into large mass
damp = 0.001
fe = 100
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
ms = []
debug = False
critical_factor = 0.008
num = 500
class mass:
def __init__(self,x,y,vx,vy,radius,charge):
self.x = x
if charge =="+":
self.charge = 1
self.m = 1
elif charge =="-":
self.charge = -1
self.m = 1
else:
print "Error in charge"
self.y = y
self.vx = vx
self.vy = vy
self.rad=radius
def draw(self):
if self.charge ==1:
pygame.draw.circle(screen,(255,0,0),(int(self.x),int(self.y)),self.rad,0)
elif self.charge ==-1:
pygame.draw.circle(screen,(0,0,255),(int(self.x),int(self.y)),self.rad,0)
else :
pygame.draw.circle(screen,(0,0,0),(int(self.x),int(self.y)),self.rad,0)
def app_force(self,other):
dx = self.x -other.x
dy = self.y -other.y
r = pow(dx**2+dy**2,0.5)
fxs = -(fg*self.m*other.m-self.charge*other.charge*fe)*dx/(r**3*self.m)
fys = -(fg*self.m*other.m-self.charge*other.charge*fe)*dy/(r**3*self.m)
fxo =(fg*self.m*other.m-self.charge*other.charge*fe)*dx/(r**3*other.m)
fyo=(fg*self.m*other.m-self.charge*other.charge*fe)*dy/(r**3*other.m)
if r>(10):
self.vx+=fxs
self.vy+=fys
other.vx+=fxo
other.vy+=fyo
elif r>3 :
self.vx+=fxs*critical_factor*r**2
self.vy+=fys*critical_factor*r**2
other.vx+=fxo*critical_factor*r**2
other.vy+=fyo*critical_factor*r**2
self.vx -= damp*(self.vx-other.vx)*abs(self.vx)*abs(self.charge-other.charge)*abs(self.x-other.x)
self.vy -= damp*(self.vy-other.vy)*abs(self.vy)*abs(self.charge-other.charge)*abs(self.y-other.y)
other.vx -=damp*(other.vx-self.vx)*abs(other.vx)*abs(self.charge-other.charge)*abs(self.x-other.x)
other.vy -=damp*(other.vy-self.vy)*abs(other.vy)*abs(self.charge-other.charge)*abs(self.y-other.y)
def update(self):
# self.x +=self.vx
# print self.vx
# self.y +=self.vy
if not 1<self.x<SCREEN_WIDTH:
self.vx=(-self.vx)
if not 1<self.y<SCREEN_HEIGHT:
self.vy=(-self.vy)
self.draw()
def nparray(self):
return np.array([self.x,self.y,self.vx,self.vy,self.m,self.charge],dtype=np.float32)
if debug == True:
ms.append(mass(400,300,-1.2,0.9,3,"-"))
ms.append(mass(200,300,0,0,3,"+"))
num =2
frame_rate=1000
else :
for i in range(0,num/2):
ms.append(mass(randint(0,SCREEN_WIDTH-10),randint(0,SCREEN_HEIGHT-10),0,0,3,"-"))
for i in range(num/2,num):
ms.append(mass(randint(0,SCREEN_WIDTH-10),randint(0,SCREEN_HEIGHT-10),0,0,3,"+"))
fullarray=ms[0].nparray()
# time_passed = clock.tick(frame_rate)
for x in ms[1:]:
fullarray=np.append(fullarray,x.nparray())
aptr = fullarray.ctypes.data_as(POINTER(c_float))
start = time()
count = 0
running = True
while running:
print "FPS = ",1/(time()-start)
start = time()
# time_passed = clock.tick(frame_rate)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
# Redraw the background
#if not debug:
screen.fill(BG_COLOR)
cptr = calc(aptr,num)
for x in range(num):
try:
if cptr[6*x+5]==1:
pygame.draw.circle(screen,(255,0,0),(int(cptr[6*x+0]),int(cptr[6*x+1])),3,0)
elif cptr[6*x+5]==-1:
pygame.draw.circle(screen,(0,0,255),(int(cptr[6*x+0]),int(cptr[6*x+1])),3,0)
except OverflowError:
print "OverFlow"
pass
except Exception, e:
print e
aptr = cptr
pygame.display.update()
if pygame.key.get_focused():
press = pygame.key.get_pressed()
if press[273]==1:
for x in xrange(0,num):
ms[x].y-=1*num/2
if press[274]==1:
for x in xrange(0,num):
ms[x].y+=1*num/2
if press[275]==1:
for x in xrange(0,num):
ms[x].x+=1*num/2
if press[276]==1:
for x in xrange(0,num):
ms[x].x-=1*num/2
|
985,414 | e203856afc0123a7b3c410c72aaa9f4520bcc2a0 | #!/usr/bin/env python
# coding=utf-8
"""
Description
=====================
Main application file. Runs a program for deciding who has chores today and then emails them with their list.
Requirements
====================
Raspberry Pi with SENSE HAT
Joystick Functionality
=====================
DOWN : List all chores on rota and current assignee
UP : Weather from SENSE HAT
UP*2 : Stop program and exit
RIGHT : Random fact from uselessfacts API
RIGHT*2: Toggle emails
LEFT : Current date and time
LEFT*2: Show controls
MIDDLE : Random event chooser
Operation Times:
======================
Emailing:
- will send emails at a given hour and minute (see EMAIL_TIME_HOUR & EMAIL_TIME_MINUTE)
Powering off (to save energy):
- will power off at a given time (see POWER_OFF_HOUR & POWER_OFF_MINUTE)
Notes:
====================
Email addresses will need to be stored in a seperate file within the Raspberry Pi's filesystem.
Can be run using a crontab job on the Raspberry Pi e.g. @reboot python <PATH TO FILE>/home_chores.py &
"""
from sense_hat import SenseHat
import time
import datetime
import smtplib
from common import emailer, chores, collect_facts
import threading
import subprocess
import random
import json
import os
# set a random seed
random.seed(datetime.datetime.now())
# instatiate and clear the Sense Hat
sense = SenseHat()
sense.clear()
sense.low_light = True
sense.set_rotation(180)
EMAIL_ADRESSES_TXT = '/home/pi/Documents/home_chores_project/email_addresses.txt'
sense.show_message('Hello :)')
with open(EMAIL_ADRESSES_TXT, 'rb') as json_file:
EMAIL_ADDRESSES = json.load(json_file)
EMAIL_TIME_HOUR = 7
EMAIL_TIME_MINUTE = 30
POWER_OFF_HOUR = 10
POWER_OFF_MINUTE = 1
SEND_EMAILS = True
EMAIL_SENT_TODAY = False
SCROLL_SPEED = (0.05)
RANDOM_EVENTS = ['Board Game', 'Book Club', 'Garden Time',
'Movie Time', 'You Decide', 'Craft Club', 'Party Game']
r = (255, 0, 0)
b = (0, 100, 255)
y = (255, 255, 0)
g = (0, 255, 0)
n = (0, 0, 0)
t = (255, 50, 0)
BACK_COLOUR = n
TEXT_COLOUR = t
CURRENT = ''
LAST = 's'
def watch_pi():
while True:
global CURRENT, LAST, SEND_EMAILS
for event in sense.stick.get_events():
CURRENT = event.direction
if event.action == 'pressed':
if CURRENT != LAST:
if event.direction == 'up':
print('getting weather...')
get_weather(sense)
elif event.direction == 'down':
print('getting chores...')
chore_message, _ = chores.get_chores()
sense.show_message(chore_message, back_colour=BACK_COLOUR,
text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
elif event.direction == 'left':
print('getting date...')
get_date(sense)
elif event.direction == 'right':
print('getting facts...')
fact = collect_facts.collect_facts()
sense.show_message(
fact, back_colour=BACK_COLOUR, text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
elif event.direction == 'middle':
print('getting random event...')
rand_event = RANDOM_EVENTS[random.randint(
0, len(RANDOM_EVENTS)-1)]
sense.show_message('Random event is: %s' % (rand_event), back_colour=BACK_COLOUR,
text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
else:
pass
LAST = event.direction
else:
## FOR DOUBLE CLICKS ##
if event.direction == 'up':
# power off
sense.show_message(
'powering off...', text_colour=r, scroll_speed=SCROLL_SPEED)
time.sleep(10)
os._exit(1)
elif event.direction == 'down':
pass
elif event.direction == 'left':
# show controls
sense.show_message('Controls: U: Chores D: Weather L: Date R: Controls M: Random Event', back_colour=BACK_COLOUR,
text_colour=TEXT_COLOUR, scroll_speed=(0.03))
elif event.direction == 'right':
# toggle emails
sense.show_message('toggling emails. set to: %s ' %
(not SEND_EMAILS), back_colour=BACK_COLOUR, text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
SEND_EMAILS = not SEND_EMAILS
elif event.direction == 'middle':
print('dance baby...')
pass
else:
pass
LAST = 'a'
time.sleep(5)
def make_email(sender, name, email_add, current_date):
_, all_chores = chores.get_chores()
fact = collect_facts.collect_facts()
daily_chores = ("".join(['<li>%s</li>']*len(all_chores['daily']
[name[0]]))) % tuple(all_chores['daily'][name[0]])
weekly_chores = ("".join(['<li>%s</li>']*len(all_chores['weekly']
[name[0]]))) % tuple(all_chores['weekly'][name[0]])
emailSubject = "Chores for %s" % (current_date)
emailContent = "Hello %s! <br><br> Your Daily Chores for today are: <br> <ul> %s </ul> <br><br> Chores which will \
need to be completed by this Sunday are: <br> <ul> %s </ul> <br><br>. Have a good day, <br>Raspberry Pi out. <br><br> Daily Fact: %s " % (name, daily_chores, weekly_chores, fact)
sender.sendmail(email_add, emailSubject, emailContent)
return
def distribute_emails():
global SEND_EMAILS, EMAIL_SENT_TODAY
while True:
if SEND_EMAILS:
current_time = time.localtime()
if current_time.tm_hour == EMAIL_TIME_HOUR and current_time.tm_min == EMAIL_TIME_MINUTE and not EMAIL_SENT_TODAY:
sense.show_message('sending emails', back_colour=BACK_COLOUR,
text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
chores.read_and_update_day_counter()
sender = emailer.Emailer()
for name in EMAIL_ADDRESSES.keys():
current_date = datetime.datetime.now().strftime('%d %b')
make_email(sender, name, EMAIL_ADDRESSES[name], current_date)
sense.show_message('all emails sent', back_colour=BACK_COLOUR,
text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
EMAIL_SENT_TODAY = True
elif current_time.tm_hour == 1 and current_time.tm_min == 0:
EMAIL_SENT_TODAY = False
elif current_time.tm_hour == POWER_OFF_HOUR and current_time.tm_min == POWER_OFF_MINUTE:
sense.show_message(
'powering off...', text_colour=r, scroll_speed=SCROLL_SPEED)
time.sleep(10)
os._exit(1)
time.sleep(30)
pass
def get_weather(sense):
temp = sense.get_temperature()
temp = round(temp, 1)
humidity = sense.get_humidity()
humidity = round(humidity, 1)
pressure = sense.get_pressure()
pressure = round(pressure, 1)
sense.show_message("Temperature: %s.C Humidity: %s%% Pressure: %smb" % (temp, humidity, pressure),
back_colour=BACK_COLOUR, text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
return
def get_date(sense):
local_time = datetime.datetime.now().strftime('%d %b %H:%M')
sense.show_message("Date: %s " % (local_time), back_colour=BACK_COLOUR,
text_colour=TEXT_COLOUR, scroll_speed=SCROLL_SPEED)
return
if __name__ == "__main__":
t1 = threading.Thread(target=watch_pi)
t2 = threading.Thread(target=distribute_emails)
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
while True:
pass
|
985,415 | d7a080d522be62d0a45f6ee4a50dbeb2d69123f2 | from django.contrib import admin
from .models import UserCreateNews, CategoriesNews
# Register your models here.
admin.site.register(UserCreateNews)
admin.site.register(CategoriesNews)
|
985,416 | e1fa99065e6ee7ae15f2bfe51a9c3f019632fe84 | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
from ludwig.constants import TIED
logger = logging.getLogger(__name__)
def topological_sort(graph_unsorted):
"""Repeatedly go through all of the nodes in the graph, moving each of the nodes that has all its edges
resolved, onto a sequence that forms our sorted graph.
A node has all of its edges resolved and can be moved once all the nodes its edges point to, have been moved from
the unsorted graph onto the sorted one.
"""
# This is the list we'll return, that stores each node/edges pair
# in topological order.
graph_sorted = []
# Convert the unsorted graph into a hash table. This gives us
# constant-time lookup for checking if edges are unresolved, and
# for removing nodes from the unsorted graph.
graph_unsorted = dict(graph_unsorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges does not contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node, edges in list(graph_unsorted.items()):
if edges is None:
edges = []
for edge in edges:
if edge in graph_unsorted:
break
else:
acyclic = True
del graph_unsorted[node]
graph_sorted.append((node, edges))
if not acyclic:
# Uh oh, we've passed through all the unsorted nodes and
# weren't able to resolve any of them, which means there
# are nodes with cyclic edges that will never be resolved,
# so we bail out with an error.
raise RuntimeError("A cyclic dependency occurred")
return graph_sorted
def topological_sort_feature_dependencies(features):
# topological sorting of output features for resolving dependencies
dependencies_graph = {}
output_features_dict = {}
for feature in features:
dependencies = []
if "dependencies" in feature:
dependencies.extend(feature["dependencies"])
if TIED in feature:
dependencies.append(feature[TIED])
dependencies_graph[feature["name"]] = dependencies
output_features_dict[feature["name"]] = feature
return [output_features_dict[node[0]] for node in topological_sort(dependencies_graph)]
if __name__ == "__main__":
graph_unsorted = [(2, []), (5, [11]), (11, [2, 9, 10]), (7, [11, 8]), (9, []), (10, []), (8, [9]), (3, [10, 8])]
logger.info(topological_sort(graph_unsorted))
graph_unsorted = [("macro", ["action", "contact_type"]), ("contact_type", None), ("action", ["contact_type"])]
logger.info(topological_sort(graph_unsorted))
|
985,417 | 7f4bdd42b2ecbc386b3078642b5004781c1e883a | #!/usr/bin/env python3
import time
import timeit
import gym
import ptan
import os
import sys
sys.path.append(os.getcwd())
from lib import common
import torch
import torch.nn as nn
# Results:
# Original sync, number=100, cuda=True, speed=7634.508 runs/s
# Original sync, number=1000, cuda=True, speed=8606.037 runs/s
# Original sync, number=10000, cuda=True, speed=8822.823 runs/s
# Original sync, number=100000, cuda=True, speed=8842.458 runs/s
#
# Original sync, number=100, cuda=False, speed=779.575 runs/s
# Original sync, number=1000, cuda=False, speed=767.816 runs/s
# Original sync, number=10000, cuda=False, speed=770.027 runs/s
# Original sync, number=100000, cuda=False, speed=755.772 runs/s
# New sync, async=False
# New sync, number=100, cuda=True, speed=6001.022 runs/s
# New sync, number=1000, cuda=True, speed=6087.863 runs/s
# New sync, number=10000, cuda=True, speed=6083.333 runs/s
# New sync, number=100000, cuda=True, speed=6096.957 runs/s
# async=True
# New sync, number=100, cuda=True, speed=5574.816 runs/s
# New sync, number=1000, cuda=True, speed=6006.258 runs/s
# New sync, number=10000, cuda=True, speed=6053.777 runs/s
# New sync, number=100000, cuda=True, speed=6074.822 runs/s
CUDA = True
REPEAT_NUMBER = 100
def make_env():
return ptan.common.wrappers.wrap_dqn(gym.make("PongNoFrameskip-v4"))
def new_sync(tgt_net, src_net):
assert isinstance(tgt_net, nn.Module)
assert isinstance(src_net, nn.Module)
for tgt, src in zip(tgt_net.parameters(), src_net.parameters()):
tgt.data.copy_(src.data, broadcast=False, async=True)
if __name__ == "__main__":
env = make_env()
net = common.AtariA2C(env.observation_space.shape, env.action_space.n)
if CUDA:
net.cuda()
print("Initial sleep 20 seconds")
time.sleep(20)
tgt_net = ptan.agent.TargetNet(net)
ns = globals()
ns.update(locals())
for number in [100, 1000, 10000, 100000]:
t = timeit.timeit('tgt_net.sync()', number=number, globals=ns)
print("Original sync, number=%d, cuda=%s, speed=%.3f runs/s" % (number, CUDA, number / t))
for number in [100, 1000, 10000, 100000]:
t = timeit.timeit('new_sync(tgt_net.target_model, net)', number=number, globals=ns)
print("New sync, number=%d, cuda=%s, speed=%.3f runs/s" % (number, CUDA, number / t))
|
985,418 | cb5d0bfcc390f49c4a5d09e1ad7bc71ac0542411 | # Generated by Django 2.2.6 on 2019-12-02 18:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project_core', '0088_organisation_mandatory_fields'),
]
operations = [
migrations.RenameField(
model_name='personposition',
old_name='data_policy',
new_name='privacy_policy',
),
]
|
985,419 | 4d970a14bd2f7763b30dc87bb1cecc84607932e6 | import re
from pygments.lexer import RegexLexer, words
from pygments.token import *
class PCFLexer(RegexLexer):
name = 'PCF'
aliases = ['pcf']
filenames = ['*.pcf']
tokens = {
'root': [
(r'\d+', Number),
(words(('else','in','ifz','fun','fix','let','then'), suffix=r'\b'), Keyword),
(r'(=|\+|-|\*|/|->)', Operator),
(r'.', Text),
]
}
|
985,420 | d661fb1049e11cbf456f1cd6878daf742a5103e5 | #%%
'Curve fitting input code'
'Uses the Lmfit module to fit the data'
from NewFitAll import NewFit # 1 Gaussian
from NewFitAll import NewFit2 # 2 Gaussians
from NewFitAll import NewFit3 # 3 Gaussians
from NewFitAll import NewFit4 # 4 Gaussians
from NewFitAll import NewFit5 # 5 Gaussians
from NewFitAll import NewFit7 # 7 Gaussians
#%%
'W4 Data'
mu1= 0.3
sig1 = 1.0
amp1 = 450 * np.sqrt(2*np.pi)*sig1
mu2 = 10
sig2 = 2.0
amp2 = 650* np.sqrt(2*np.pi)*sig2
mu3 = 20
sig3 = 2.0
amp3 = 550* np.sqrt(2*np.pi)*sig2
mu4 = 30.0
sig4 = 2.0
amp4 = 400* np.sqrt(2*np.pi)*sig2
mu5 = 40.0
sig5 = 2.0
amp5 = 190* np.sqrt(2*np.pi)*sig2
mu6 = 50.0
sig6 = 2.0
amp6 = 100* np.sqrt(2*np.pi)*sig2
mu7 = 60.0
sig7 = 2.0
amp7 = 50* np.sqrt(2*np.pi)*sig2
#%%
'W3 Data'
mu1= 7
sig1 = 1.0
amp1 = 450 * np.sqrt(2*np.pi)*sig1
mu2 = 14
sig2 = 2.0
amp2 = 650* np.sqrt(2*np.pi)*sig2
mu3 = 21
sig3 = 2.0
amp3 = 550* np.sqrt(2*np.pi)*sig2
mu4 = 28
sig4 = 2.0
amp4 = 400* np.sqrt(2*np.pi)*sig2
mu5 = 35
sig5 = 2.0
amp5 = 190* np.sqrt(2*np.pi)*sig2
mu6 = 41
sig6 = 2.0
amp6 = 100* np.sqrt(2*np.pi)*sig2
mu7 = 49
sig7 = 2.0
amp7 = 50* np.sqrt(2*np.pi)*sig2
#%%
'W2 Data'
mu1= 0
sig1 = 1.0
amp1 = 450 * np.sqrt(2*np.pi)*sig1
mu2 = 5
sig2 = 2.0
amp2 = 650* np.sqrt(2*np.pi)*sig2
mu3 = 10
sig3 = 2.0
amp3 = 550* np.sqrt(2*np.pi)*sig2
mu4 = 15
sig4 = 2.0
amp4 = 400* np.sqrt(2*np.pi)*sig2
mu5 = 20
sig5 = 2.0
amp5 = 190* np.sqrt(2*np.pi)*sig2
mu6 = 25
sig6 = 2.0
amp6 = 100* np.sqrt(2*np.pi)*sig2
mu7 = 30
sig7 = 2.0
amp7 = 50* np.sqrt(2*np.pi)*sig2
#%%
'W1 Data'
mu1= 0
sig1 = 1.0
amp1 = 250 * np.sqrt(2*np.pi)*sig1
mu2 = 3
sig2 = 2.0
amp2 = 200* np.sqrt(2*np.pi)*sig2
mu3 = 5
sig3 = 2.0
amp3 = 55* np.sqrt(2*np.pi)*sig2
mu4 = 8
sig4 = 2.0
amp4 = 25* np.sqrt(2*np.pi)*sig2
#%%
'W6 W7 Data'
mu1= 0
sig1 = 1.0
amp1 = 250 * np.sqrt(2*np.pi)*sig1
mu2= 0
sig2 = 1.0
amp2 = 250 * np.sqrt(2*np.pi)*sig1
#%%
'W6 dark count data'
mu1= 7
sig1 = 1.0
amp1 = 3500 * np.sqrt(2*np.pi)*sig1
mu2= 7
sig2 = 2
amp2 = 500 * np.sqrt(2*np.pi)*sig1
mu3= 15
sig3 = 2.0
amp3 = 500 * np.sqrt(2*np.pi)*sig1
#%%
'Cut the Data'
x = [];
y = [];
#for i in range(0,120):
# x.append(bincentroid[i]);
# y.append(CountsData[i]);
x = bincentroid;
y = CountsData;
#plt.plot(x,y);
'Choose the fit type'
temporary = [];
#temporary = NewFit(amp1, mu1, sig1,x,y);
#temporary = NewFit2(amp1,amp2, mu1,mu2, sig1,sig2,x,y);
temporary = NewFit3(amp1,amp2,amp3, mu1,mu2,mu3, sig1,sig2,sig3,x,y);
#temporary = NewFit4(amp1,amp2,amp3,amp4, mu1,mu2,mu3,mu4, sig1,sig2,sig3,sig4,x,y);
#temporary = NewFit5(amp1,amp2,amp3,amp4,amp5,mu1,mu2,mu3,mu4,mu5,sig1,sig2,sig3,sig4,sig5,x,y);
#temorary = NewFit7(amp1,amp2,amp3,amp4,amp5, amp6,amp7, mu1,mu2,mu3,mu4,mu5,mu6,mu7, sig1,sig2,sig3,sig4,sig5,sig6,sig7, x,y)
|
985,421 | 29987e450cb0cd01f2a6ade2a7e214bfeb68baaf | from django.urls import path
from school.views import (
ClassroomListView, StudentListView,
TeacherListView, SubjectListView
)
urlpatterns = [
path('classrooms/', ClassroomListView.as_view(), name='classroom-list'),
path('students/', StudentListView.as_view(), name='student-list'),
path('teachers/', TeacherListView.as_view(), name='teacher-list'),
path('subjects/', SubjectListView.as_view(), name='subject-list'),
]
|
985,422 | 5b2168cfac5df9878c3d6b423d7a79f08d409de4 | def solution(n, computers):
answer = 0
network = [[] for _ in range(n)]
visited = [False] * n
for i in range(len(computers)):
for j in range(len(computers[0])):
if computers[i][j] == 1 and i != j:
network[i].append(j)
for i in range(n):
if dfs(i, network, visited):
answer += 1
return answer
def dfs(com, network, visited):
if visited[com]:
return False
visited[com] = True
for i in network[com]:
dfs(i, network, visited)
return True
|
985,423 | 06577e1e64aa7dbcb91b12d16f7bb46b2d50bab3 | import os
from collections import deque
import PIL.Image as Image
import erdos
from erdos import Message, ReadStream, Timestamp, WriteStream
import numpy as np
from pylot.perception.messages import LanesMessage
class PerfectLaneDetectionOperator(erdos.Operator):
"""Operator that uses the simulator to perfectly detect lanes.
Args:
pose_stream (:py:class:`erdos.ReadStream`): Stream on which pose
info is received.
open_drive_stream (:py:class:`erdos.ReadStream`): Stream on which open
drive string representations are received. The operator can
construct HDMaps out of the open drive strings.
detected_lane_stream (:py:class:`erdos.WriteStream`): Stream on which
the operator writes
:py:class:`~pylot.perception.messages.LanesMessage` messages.
flags (absl.flags): Object to be used to access absl flags.
"""
def __init__(self, pose_stream: ReadStream, open_drive_stream: ReadStream,
center_camera_stream: ReadStream,
detected_lane_stream: WriteStream, flags):
pose_stream.add_callback(self.on_pose_update)
center_camera_stream.add_callback(self.on_bgr_camera_update)
erdos.add_watermark_callback([pose_stream, center_camera_stream],
[detected_lane_stream],
self.on_position_update)
self._flags = flags
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._bgr_msgs = deque()
self._pose_msgs = deque()
self._frame_cnt = 0
@staticmethod
def connect(pose_stream: ReadStream, open_drive_stream: ReadStream,
center_camera_stream: ReadStream):
detected_lane_stream = erdos.WriteStream()
return [detected_lane_stream]
def destroy(self):
self._logger.warn('destroying {}'.format(self.config.name))
def run(self):
# Run method is invoked after all operators finished initializing.
# Thus, we're sure the world is up-to-date here.
if self._flags.execution_mode == 'simulation':
from pylot.map.hd_map import HDMap
from pylot.simulation.utils import get_map
self._map = HDMap(
get_map(self._flags.simulator_host, self._flags.simulator_port,
self._flags.simulator_timeout),
self.config.log_file_name)
from pylot.simulation.utils import get_world
_, self._world = get_world(self._flags.simulator_host,
self._flags.simulator_port,
self._flags.simulator_timeout)
def on_opendrive_map(self, msg: Message):
"""Invoked whenever a message is received on the open drive stream.
Args:
msg (:py:class:`~erdos.message.Message`): Message that contains
the open drive string.
"""
self._logger.debug('@{}: received open drive message'.format(
msg.timestamp))
from pylot.simulation.utils import map_from_opendrive
self._map = map_from_opendrive(msg.data)
def on_bgr_camera_update(self, msg: Message):
self._logger.debug('@{}: received BGR frame'.format(msg.timestamp))
self._bgr_msgs.append(msg)
def on_pose_update(self, msg: Message):
self._logger.debug('@{}: received pose message'.format(msg.timestamp))
self._pose_msgs.append(msg)
@erdos.profile_method()
def on_position_update(self, timestamp: Timestamp,
detected_lane_stream: WriteStream):
"""Invoked on the receipt of an update to the position of the vehicle.
Uses the position of the vehicle to get future waypoints and draw
lane markings using those waypoints.
Args:
pose_msg: Contains the current location of the ego vehicle.
"""
self._logger.debug('@{}: received watermark'.format(timestamp))
if timestamp.is_top:
return
bgr_msg = self._bgr_msgs.popleft()
pose_msg = self._pose_msgs.popleft()
vehicle_location = pose_msg.data.transform.location
self._frame_cnt += 1
if self._map:
lanes = self._map.get_all_lanes(vehicle_location)
if self._flags.log_lane_detection_camera:
camera_setup = bgr_msg.frame.camera_setup
frame = np.zeros((camera_setup.height, camera_setup.width),
dtype=np.dtype("uint8"))
binary_frame = frame.copy()
for lane in lanes:
lane.collect_frame_data(frame,
binary_frame,
camera_setup,
inverse_transform=pose_msg.data.
transform.inverse_transform())
self._logger.debug('@{}: detected {} lanes'.format(
bgr_msg.timestamp, len(lanes)))
if self._frame_cnt % self._flags.log_every_nth_message == 0:
instance_file_name = os.path.join(
self._flags.data_path,
'{}-{}.png'.format("lane",
bgr_msg.timestamp.coordinates[0]))
binary_file_name = os.path.join(
self._flags.data_path,
'{}-{}.png'.format("binary_lane",
bgr_msg.timestamp.coordinates[0]))
instance_img = Image.fromarray(frame)
binary_img = Image.fromarray(binary_frame)
instance_img.save(instance_file_name)
binary_img.save(binary_file_name)
self._logger.debug(
'@{}: Created binary lane and lane images in {}'.
format(pose_msg.timestamp, self._flags.data_path))
else:
for lane in lanes:
lane.draw_on_world(self._world)
else:
self._logger.debug('@{}: map is not ready yet'.format(
pose_msg.timestamp))
lanes = []
output_msg = LanesMessage(pose_msg.timestamp, lanes)
detected_lane_stream.send(output_msg)
|
985,424 | 6aff1fd9d68f9153ae403200d7fc8166ef036139 | ##==============================================================================
## Yining Song (20675284)
## CS 116 Spring 2017
## Assignment 01, Problem 2
##==============================================================================
import math
import check
## normal_distribution(x, mean, std_dev) produces the corresponding value of
## normal distribution with x, mean, and standard deviation std_dev.
## normal_distribution: Float Float Float -> Float
## requires: x, mean, std_dev > 0
## Examples: normal_distribution(3, 5, 2) => 0.120
def normal_distribution(x, mean, std_dev):
a = 1 / (std_dev * (math.sqrt (2 * math.pi)))
b = 1 / (math.e ** (((x - mean) ** 2) / (2 * ((std_dev) ** 2))))
return a * b
## Testing normal_distribution:
check.within("Test 1", normal_distribution(3, 5, 2), 0.120, 0.001)
check.within("Test 2", normal_distribution(1, 1, 1), 0.398, 0.001)
|
985,425 | 3ccb9b83b1ca5536a4d440ce85184b10881b3991 | #!/usr/bin/python
#
#
# I saw these in some other protocol thing.. im assuming they're required
from lib.transports import *
from lib.bruters import *
global MASTER_DATAFLOW
SPIPE_VERSION1 = int(0x10000001)
SPIPE_VERSION2 = int(0x20000001)
SPIPE_VERSION3 = int(0x30000001)
SPIPE_VERSION4 = int(0x40000001)
KEY_PACKAGE_TYPE = int(1)
DATA_PACKAGE_TYPE = int(2)
class spipeHeader():
"SPIPE Header class"
def __init__(self):
self.header = ""
# Every SPIPE header begins with 'PO'
self.headerID = StringPDT("PO")
# version
self.version = int(0)
# offset to datablock
self.offsetDataBlock = int(0)
# data count
self.dataCount = int(0)
# data block length
self.dataBlockLength = int(0)
# sender GUID (guranteed uniq ID)
self.guidLength = int(0)
# package type
self.packageType = int(0)
# computer name
self.computerName = ""
def buildHeader():
self.header += self.headerID
self.header += self.version
self.header += self.offsetDataBlock
self.header += self.dataCount
self.header += int(0)
self.header += self.dataBlockLength
self.header += self.guid
self.header += self.packageType
self.header += self.computerName
#
|
985,426 | 9e936aab0cd7b4fa7ba2c635286a61e2595b0947 | #!/usr/bin/env python
#
# This script just takes the scan settings as command line args. You will need to hardcode your API keys and scanner name below.
# The way the script works is by kicking off a scan and waiting for the results to download them in '.nessus' format.
# If you launch with '&', it will put it into the background so that you can continue to use the CLI while the scan runs.
#
# Requirements: Python 2.7+, Tenable.io API access/secret keys
#
# Author: ThisTooShallXSS (https://github.com/thistooshallxss)
#
# Check if you have 'pip' installed. If not:
# curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py"; python get-pip.py
# Then run:
# pip install tenable_io
#
# python ezscan.py 'Example Scan - Basic' 'basic' '172.26.68.0/24' &
# python ezscan.py 'Example Scan - Shellshock' 'shellshock' '172.26.68.12' &
# python ezscan.py 'Example Scan - Discovery' 'discovery' '172.26.0.0/16' &
#
# Templates: (As of 1/17/17) - Note: Some of these require credentials.
# asv, wannacry, intelamt, discovery, basic, patch_audit, webapp, malware, mobile,
# mdm, compliance, pci, offline, cloud_audit, scap, shellshock, ghost, drown, badlock,
# shadow_brokers, spectre_meltdown, advanced, agent_advanced, agent_basic, agent_compliance, agent_scap, agent_malware
#
import sys
from tenable_io.client import TenableIOClient
from tenable_io.api.scans import ScanExportRequest, ScanCreateRequest
from tenable_io.api.models import ScanSettings
scanName = sys.argv[1]
scanTemplate = sys.argv[2]
scanTarget = sys.argv[3]
# Replace with your own user's API access and secret keys
accessKey = ''
secretKey = ''
# Choose a scanner to use for the purposes of automated scanning
scannerName = 'tnsappliance-123456'
# Create a folder on Tenable.io where the API generated scans will go.
# Otherwise, we will simply put them into the default 'My Scans' folder.
folderName = 'My Scans'
# templates_list = [t.name for t in client.editor_api.list('scan').templates]
# print(templates_list)
# Establish the login session using our client API helper.
client = TenableIOClient(access_key=accessKey, secret_key=secretKey)
# Fetch a list of all scanners on the account and group them into a dictionary {scannerName: scannerId}
scanners = {scanner.name: scanner.id for scanner in client.scanners_api.list().scanners}
# Fetch a list of all folders on the account and group them into a dictionary {folderName: folderId}
folders = {folder.name: folder.id for folder in client.folders_api.list().folders}
# Fetch the template uuid to be used in our call to launch the scan.
template = client.scan_helper.template(name=scanTemplate)
# Create the scan and use the corresponding scanner id for the scanner name supplied
scan_id = client.scans_api.create(
ScanCreateRequest(
template.uuid,
ScanSettings(
scanName,
scanTarget,
folder_id=folders[folderName],
scanner_id=scanners[scannerName]
)
)
)
# Get the scanRef object using the previously returned scan id
scan = client.scan_helper.id(scan_id)
# launch & download the scan result
scan.launch().download('{}.nessus'.format(scanName), scan.histories()[0].history_id, format=ScanExportRequest.FORMAT_NESSUS)
|
985,427 | eb1f34d86d836b08dffa439fc25754ebee2d7833 | # class Solution:
# def check(self, height, k):
# if k == 0 or max(height[0:k]) <= height[k]: return False
# if k == len(height) - 1 or max(height[k:]) <= height[k]: return False
# return True
# def trap(self, height):
# tot = 0
# n = len(height)
# if n == 0 or n == 1: return 0
# h = max(height)
# for i in range(0,h):
# for k in range(0,n):
# if height[k] == i and self.check(height, k):
# height[k] += 1
# tot += 1
# return tot
class Solution:
def trap(self, height):
n = len(height)
if n <= 2: return 0
left = [-1] * n
right = [-1] * n
max_idx = 0
max_height = height[0]
left[0] = 0
for i in range(1, n):
if height[i] >= max_height:
max_idx = i
max_height = height[i]
left[i] = max_idx
right[n-1] = n - 1
max_idx = n - 1
max_height = height[n-1]
for i in range(n-2, -1, -1):
if height[i] >= max_height:
max_idx = i
max_height = height[i]
right[i] = max_idx
tot = 0
for i in range(0, n):
tot += min(height[left[i]], height[right[i]]) - height[i]
return tot
s = Solution()
print(s.trap([0,1,0,2,1,0,1,3,2,1,2,1])) |
985,428 | 8b42cfcc91499a3f99c29245ae339f292b0e362e | def main():
import sys
import time
start=time.time()
K=int(sys.stdin.readline())
if K%2==0:
print(-1)
return
L=[7%K]
L2=[7%K]
if L[0]==0:
print(1)
return
idx=0
while True:
L2.append((L2[idx]*10)%K)
L.append((L[idx]+L2[idx+1])%K)
if L[-1]==0:
print(len(L))
return
idx+=1
if time.time()-start>=1.5:
print(-1)
return
main() |
985,429 | 084b3394533007fb147c2494dce47bdd3f81069b | """
all operations of api are in app.py file
"""
import json
import uuid
from aiohttp import web
from models import (
save_new_agent,
get_agent,
update_agent,
delete_agent,
add_system,
get_system,
delete_system,
)
class AgentMethods(web.View):
"""
ALL METHODS OF AGENT ARE IMPLEMENTED HERE
"""
async def post(self):
"""
description: creating agent
params: agentip
"""
try:
uu_id = str(uuid.uuid1()).replace("-", "")
agent = await self.request.json()
agent_ip = agent["agentip"]
# agent_ip = self.request.json('agentip')
save_new_agent(uu_id, agent_ip)
print(f"Creating agent with uuid {uu_id} and ip {agent_ip} ")
response_obj = {
"status": "success",
"message": f"agent created successfully with ip {agent_ip}",
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
if str(error_delete) == "UNIQUE constraint failed: agent.uuid":
response_obj = {
"status": "failed",
"message": "agent uuid present already",
}
else:
response_obj = {"status": "failed", "message": str(error_delete)}
print(str(error_delete))
return web.Response(text=json.dumps(response_obj), status=500)
async def get(self):
"""
description: Get agent info
params: uuid
"""
try:
uid = await self.request.json()
uu_id = uid["uuid"]
data = get_agent(uu_id)
print(
f"uuid : - {data.uuid} agentip:- {data.agentip} created_date:-{data.agentdate}"
)
response_obj = {
"status": "success",
"message": f"uuid {data.uuid} agentip {data.agentip} created_date {data.agentdate}",
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
return web.Response(text=f"No Record Found {error_delete}", status=404)
async def put(self):
"""
description: updating agent
params: old ip , new ip
"""
try:
data = await self.request.json()
old_ip = data["oldip"]
new_ip = data["newip"]
update_agent(old_ip, new_ip)
response_obj = {
"status": "success",
"message": f"agent updated successfully from old-ip {old_ip} to new-ip:{new_ip}",
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
return web.Response(text=f"{str(error_delete)}", status=500)
async def delete(self):
"""
description: deleting agent
params: agentip
"""
try:
data = await self.request.json()
agent_ip = data["agentip"]
response_msg = delete_agent(agent_ip)
response_obj = {
"status": "success",
"message": response_msg,
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
return web.Response(text=f"{str(error_delete)}", status=500)
class SystemMethods(web.View):
"""
ALL METHODS OF SYSTEM API ARE IMPLEMENTED HERE
"""
async def post(self):
"""
description: creating system
params: agentid , sysname(system name)
"""
try:
data = await self.request.json()
agent_id = data["agentid"]
sys_name = data["sysname"]
sys_id = str(uuid.uuid1()).replace("-", "")
add_system(agent_id, sys_name, sys_id)
print(f"Creating system on agent :- {agent_id} with name :- {sys_name}")
response_obj = {
"status": "success",
"message": f"Creating system on agent :- {agent_id} with name :- {sys_name}",
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
return web.Response(text=f"{str(error_delete)}", status=500)
async def get(self):
"""
description: getting system
params: agentid
"""
try:
data = await self.request.json()
agent_id = data["agentid"]
info = get_system(agent_id)
response_obj = {
"status": "success",
"message": f"systemname:-{info.sysname} id:-{info.sid} ",
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
return web.Response(text=f"{str(error_delete)}", status=500)
async def delete(self):
"""
description: deleting system
params: sysid
"""
try:
data = await self.request.json()
sys_id = data["sysid"]
delete_system(sys_id)
response_obj = {
"status": "success",
"message": f"system deleted successfully systemid {sys_id}",
}
return web.Response(text=json.dumps(response_obj), status=200)
except Exception as error_delete:
return web.Response(text=f"{str(error_delete)}", status=500)
|
985,430 | 327736f07000a0aef9533a5d24ccfebbf705835d | import os
from paste import deploy
from oslo.config import cfg
from openstack.common import excutils
from openstack.common.gettextutils import _
from openstack.common import log as logging
import webob
import eventlet
import eventlet.wsgi
import greenlet
import socket
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = 16384
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X.")
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
config_path = config_path or CONF.api_paste_config
if os.path.exists(config_path):
self.config_path = config_path
else:
self.config_path = CONF.find_file(config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
"""
LOG.debug(_("Loading app %(name)s from %(path)s") %
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
class Request(webob.Request):
def best_match_content_type(self):
"""Determine the most acceptable content-type.
Based on:
1) URI extension (.json/.xml)
2) Content-type header
3) Accept* headers
"""
# First lookup http request path
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
_format = parts[1]
if _format in ['json', 'xml']:
return 'application/{0}'.format(_format)
#Then look up content header
type_from_header = self.get_content_type()
if type_from_header:
return type_from_header
ctypes = ['application/json', 'application/xml']
#Finally search in Accept-* headers
bm = self.accept.best_match(ctypes)
return bm or 'application/json'
def get_content_type(self):
allowed_types = ("application/xml", "application/json")
if "Content-Type" not in self.headers:
LOG.debug(_("Missing Content-Type"))
return None
_type = self.content_type
if _type in allowed_types:
return _type
return None
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = 1000
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
self._socket = eventlet.wrap_ssl(self._socket,
**ssl_kwargs)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self._socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support") % self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': self._socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
|
985,431 | 685090be2915d6307c0d5225daebec3391f5e626 | import pickle
import pandas as pd
from resources.functions import print_with_time
import os
def extract_metrics(analysis_directory:str, level0_results_path:str, structured_results_path:str, ensemble_results_path:str, balanced_test_set_results:str=None):
metrics = ["w_fscore", "w_precision", "w_recall", "ma_fscore", "recall_b", "auc"]
columns_order = ["Model", "Data Type", "Precision", "Recall", "W F-Score", "U F-Score", "TP Rate", "AUC"]
if not os.path.exists(os.path.join(analysis_directory, "filtered_media_kfold.csv")):
all_metrics = pd.read_csv(level0_results_path)
all_metrics_mean = all_metrics.groupby(by="model").mean().reset_index()
print(all_metrics_mean)
results_df = all_metrics_mean[metrics + ['model']]
print(results_df)
results_df.loc[:, "Precision"] = results_df["w_precision"]
results_df.loc[:, "Recall"] = results_df["w_recall"]
results_df.loc[:, "W F-Score"] = results_df["w_fscore"]
results_df.loc[:, "U F-Score"] = results_df["ma_fscore"]
results_df.loc[:, "TP Rate"] = results_df["recall_b"]
results_df.loc[:, "AUC"] = results_df["auc"]
results_df = results_df.drop(columns=metrics)
results_df.loc[:, "Data Type"] = results_df['model'].apply(lambda x: 'TTS' if 'textual' in x else 'STS')
results_df.loc[:, "Model"] = results_df['model'].apply(lambda x: x.split('_')[0])
results_df = results_df.drop(columns=['model'])
results_df = results_df.round(decimals=3)
results_df = results_df.sort_values(by='Data Type')
results_df[columns_order].to_csv(os.path.join(analysis_directory, "filtered_media_kfold.csv"), index=False)
if not os.path.exists(os.path.join(analysis_directory, "filtered_structured_only.csv")):
structured_models = ["LinearSVC", "LogisticRegression", "GaussianNB", "DecisionTreeClassifier", "MLPClassifier"]
structured_results = pd.read_csv(structured_results_path)
structured_results.loc[:, "Precision"] = structured_results["w_precision"]
structured_results.loc[:, "Recall"] = structured_results["w_recall"]
structured_results.loc[:, "W F-Score"] = structured_results["w_fscore"]
structured_results.loc[:, "U F-Score"] = structured_results["ma_fscore"]
structured_results.loc[:, "TP Rate"] = structured_results["recall_b"]
structured_results.loc[:, "AUC"] = structured_results["auc"]
structured_results = structured_results.drop(columns=metrics)
structured_results.loc[:, "Data Type"] = "Structured"
structured_results.loc[:, "Model"] = structured_results['model']
structured_results = structured_results.drop(columns=['model'])
structured_results = structured_results.round(decimals=3)
structured_results = structured_results.sort_values(by='Model')
structured_results[columns_order].to_csv(os.path.join(analysis_directory, "filtered_structured_only.csv"),
index=False)
if not os.path.exists(os.path.join(analysis_directory, 'filtered_ensemble_results.csv')):
ensemble_models = ["LinearSVC", "LogisticRegression", "GaussianNB", "DecisionTreeClassifier", "MLPClassifier"]
columns_order = ["Model", "Origin", "Precision", "Recall", "W F-Score", "U F-Score", "TP Rate", "AUC"]
ensemble_results = pd.read_csv(ensemble_results_path)
ensemble_results = ensemble_results[(ensemble_results['origin'] == 'struct_pred') |
(ensemble_results['origin'] == 'both_pred')]
ensemble_results = ensemble_results[ensemble_results['model'].isin(ensemble_models)]
ensemble_results.loc[:, 'Origin'] = ensemble_results['origin'].apply(
lambda x: "Structured + STS" if x == 'struct_pred' else "Structured + STS + TTS")
ensemble_results.loc[:, 'Model'] = ensemble_results['model']
ensemble_results.loc[:, "Precision"] = ensemble_results["w_precision"]
ensemble_results.loc[:, "Recall"] = ensemble_results["w_recall"]
ensemble_results.loc[:, "W F-Score"] = ensemble_results["w_fscore"]
ensemble_results.loc[:, "U F-Score"] = ensemble_results["ma_fscore"]
ensemble_results.loc[:, "TP Rate"] = ensemble_results["recall_b"]
ensemble_results.loc[:, "AUC"] = ensemble_results["auc"]
ensemble_results = ensemble_results.round(decimals=3)
ensemble_results = ensemble_results.sort_values(by='Origin')
ensemble_results[columns_order].to_csv(os.path.join(analysis_directory, 'filtered_ensemble_results.csv'),
index=False)
if not os.path.exists(os.path.join(analysis_directory, "filtered_test_data.csv")):
if balanced_test_set_results is not None:
columns_order = ["Model", "Data Type", "Precision", "Recall", "W F-Score", "U F-Score", "TP Rate", "AUC"]
evaluation_metrics = None
for fold in range(5):
fold_evaluation_metrics_path = balanced_test_set_results.format(fold)
fold_evaluation_df = pd.read_csv(fold_evaluation_metrics_path)
if evaluation_metrics is None:
evaluation_metrics = fold_evaluation_df
else:
evaluation_metrics = evaluation_metrics.append(fold_evaluation_df)
results_df = evaluation_metrics[metrics + ['model']]
print(results_df)
results_df.loc[:, "Precision"] = results_df["w_precision"]
results_df.loc[:, "Recall"] = results_df["w_recall"]
results_df.loc[:, "W F-Score"] = results_df["w_fscore"]
results_df.loc[:, "U F-Score"] = results_df["ma_fscore"]
results_df.loc[:, "TP Rate"] = results_df["recall_b"]
results_df.loc[:, "AUC"] = results_df["auc"]
results_df = results_df.drop(columns=metrics)
results_df.loc[:, "Data Type"] = results_df['model'].apply(lambda x: 'TTS' if 'textual' in x else 'STS')
results_df.loc[:, "Model"] = results_df['model'].apply(lambda x: x.split('_')[0])
results_df = results_df.drop(columns=['model'])
results_df = results_df.round(decimals=3)
results_df = results_df.sort_values(by='Data Type')
results_df[columns_order].to_csv(os.path.join(analysis_directory, "filtered_test_data.csv"), index=False) |
985,432 | 174a7724281d340aff8ec5afc0ddf7e92c99798d | """
Polynomial Curve Fitting
Dataset - Univariate_Dataset.csv
Single variable input
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split,KFold
class polynomial_regression:
"""
A class used to calculate & plot the polynomial curve fitting of given dataset
Methods
-------
read_data():
Reads data and converts it into a numpy array
design_matrix(X):
Computes the design matrix
calculate_weights(design_matrix,Y):
Calculates the weights of the polynomial basis functions and performs the specified regularisation
train(X,Y):
Calls the design_matrix & calculate_weights functions
make_prediction(X,weights):
Makes the resultant prediction of Y for regression using weights and input X
plot(y_pred,y_true, x):
Scatter plot of True y_test value vs Predicted y_test value
Overlay- Scatter plot of true y_test value vs x_test value, Line plot of y_pred vs x_test
cross_validation()
Performs k-Fold cross validation of dataset
"""
def __init__(self, lambda_, D, sample_size, regularization=None):
"""
Attributes
----------
D : int
An integer representing the degree of complexity of regression
regularization : str
a string that determines the type of regularization
lambda_ : float
A floating point integer hyperparameter used to reduce overfitting
sample_size : int
An integer used to specify the number of data samples to be taken to perform regressionParameters
"""
self.regularization = regularization
self.D = D
self.lambda_ = lambda_
self.random_sample_size = sample_size
def read_data(self):
data = pd.read_csv('function0.csv')
data = data.sample(self.random_sample_size).to_numpy()
return data
def design_matrix(self, X):
"""
Attributes
----------
X : float array
input training data
"""
design_matrix = [X ** i for i in range(self.D)]
return np.transpose(design_matrix)
def calculate_weights(self,design_matrix,Y):
"""
Attributes
----------
design_matrix : float array
design_matrix as computed by float array
Y : float array
Output training data
"""
# L2 Regularisation #
if self.regularization == 'L2':
pseudo_inv = np.dot(
np.linalg.inv((np.dot(np.transpose(design_matrix), design_matrix)) + self.lambda_ * np.identity(n=self.D)),
np.transpose(design_matrix))
return np.dot(pseudo_inv, Y)
# No Regularisation #
else:
pseudo_inv = np.dot(
np.linalg.inv((np.dot(np.transpose(design_matrix), design_matrix))),
np.transpose(design_matrix))
return np.dot(pseudo_inv, Y)
def train(self,X,Y):
"""
Attributes
----------
X : float array
Input training data
Y : float array
Output training data
"""
design_matrix = self.design_matrix(X)
weights = self.calculate_weights(design_matrix,Y)
return weights,design_matrix
def make_prediction(self,X,weights):
"""
Attributes
----------
X : float array
Input training data
weights : float array
weights of the basis functions
"""
design_matrix = self.design_matrix(X)
return np.dot(design_matrix,weights)
def plot(self, y_true, y_pred, x):
"""
Attributes
----------
y_true : float array
Output test data
y_pred : float array
predicted output
x : float array
Input test data
"""
# Scatter plot of y_true vs y_pred
plt.scatter(y_true, y_pred)
plt.plot(np.linspace(np.min(y_true), np.max(y_true), 10), np.linspace(np.min(y_true), np.max(y_true), 10), 'r') #Line to compare with the expected prediction
plt.xlabel('Y TRUE')
plt.ylabel('Y PREDICTED')
plt.title('Lambda = ' + str(self.lambda_))
plt.show()
# Scatter plot of x_test vs y_pred & plot of x_test vs y_test
x = np.reshape(x, newshape=[-1])
order = np.argsort(x)
plt.scatter(x=x, y=y_true)
plt.plot(x[order], y_pred[order], 'red')
plt.xlabel('X')
plt.ylabel('Y')
plt.title('PREDICTION \n Lambda = ' + str(self.lambda_))
plt.legend(['prediction', 'data-points'])
plt.show()
def cross_validation(self):
"""
Kfold cross validation - We split dataset into K parts and train on K-1 parts and validate on the left out part
"""
kfold = KFold(10, shuffle=True, random_state=1)
data = self.read_data()
# error from each kth iteration
errors = []
for train, test in kfold.split(data):
#Splitting into test and training data
X_test, Y_test = data[test][:, 1], data[test][:, 2]
X_train, Y_train = data[train][:, 1], data[train][:, 2]
#Training on the split data
weights, design_matrix = self.train(X_train, Y_train)
y_pred = self.make_prediction(X_test, weights)
self.plot(y_true=Y_test, y_pred=y_pred, x=X_test)
#error matrix
errors.append(np.mean(y_pred - Y_test) ** 2)
#cross-validation parameter taken as mean of errors obtained from each iteration
print("%0.10f mean with a standard deviation of %0.10f across the k-folds" % (np.mean(errors), np.std(errors)))
"""
Creates a class variable regressor
Calls the function cross validation in the class polynomial_regression
"""
def main():
regressor = polynomial_regression(D=3, regularization='L2', lambda_=0.1, sample_size=900)
regressor.cross_validation()
if __name__ == '__main__':
main()
|
985,433 | a12fcb1b2dd8521ff3d0f547cc2997caa53bc783 | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<person>\w+)/info/$', views.person, name='person')
url(r'^$', views.farriers, name='farriers'),
url(r'^(?P<horse_name>+)/info/', views.horse_info, name='horse_info'),
url(r'^(?P<person>\w+)/info/$', views.person, name='person'),
url(r'^every_person/$', views.every_person, name='every_person'),
]
|
985,434 | 8d7566c7831cb056da4b429ffd980f014ffe4bae | from setuptools import find_packages
from setuptools import setup
setup(
name='pms',
version='1.0',
packages=find_packages(),
package_data={'pms': ['config/deep_model.yaml']},
include_package_data=True,
install_requires=["google-cloud-storage"]
)
|
985,435 | 1fce097dfdfe45e12bbff15cf6dd2f8cc85e3bba | ## TOPICS ##
'''
Hey, it's a CMS!
Title/slug/description will be used to create the list page at /topics/
Title/description will also be used to create the header on the detail page
at /topics/{{ slug }}/. The contents of an individual page should go inside
{{ template_name }}, which belongs in /templates/topics.
Screenshots of survey questions should be placed in /static/img/questions,
and the filenames listed in {{ question_images }} for each entry.
Possible topics for matching table metadata:
'topics': ['poverty', 'health insurance', 'marital status', 'citizenship', 'mortgage', 'occupancy', 'education', 'sex', 'public assistance', 'income', 'disability', 'migration', 'housing', 'family type', 'group quarters', 'physical characteristics', 'employment', 'commute', 'tenure', 'place of birth', 'fertility', 'veterans', 'families', 'costs and value', 'language', 'technical', 'roommates', 'children', 'grandparents', 'age', 'race', 'seniors', 'ancestry']
'''
TOPICS_LIST = [
{
'title': 'Age and Sex',
'slug': 'age-sex',
'topics': ['sex', 'children', 'age', 'seniors'],
'description': 'How the Census approaches the topics of age and sex.',
'template_name': 'age_sex.html',
'question_images': ['age-sex.png',],
'question_pdfs': [
('Age','http://www.census.gov/acs/www/Downloads/QbyQfact/age.pdf'),
('Sex','http://www.census.gov/acs/www/Downloads/QbyQfact/sex.pdf')
]
},
{
'title': 'Births',
'slug': 'births',
'topics': ['births', 'children', 'families'],
'description': 'The Michigan Department of Health and Human Services collects a number of vital statistics, including data from the birth certificates of each birth recorded in the state. Through this data we report a variety of health indicators and demographic information related to births. If there are less fewer than 6 observations, the information is suppressed for privacy reasons.',
'template_name': 'births.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Children',
'slug': 'children',
'topics': ['family type', 'families', 'children'],
'description': 'Tables concerning Children. Helpful to consider in relation to Families.',
'template_name': 'children.html',
'question_images': ['relationship.png',],
'question_pdfs': [
('Questions on Family Relationships','http://www.census.gov/acs/www/Downloads/QbyQfact/relationship.pdf'),
]
},
{
'title': 'Commute',
'slug': 'commute',
'topics': ['employment', 'commute'],
'description': 'Commute data from the American Community Survey.',
'template_name': 'commute.html',
'question_images': ['commuting.png',],
'question_pdfs': [
('Vehicles Available','http://www.census.gov/acs/www/Downloads/QbyQfact/vehicle.pdf'),
('Place of Work and Journey to Work','http://www.census.gov/acs/www/Downloads/QbyQfact/PJ_work.pdf')
]
},
{
'title': 'Education',
'slug': 'education',
'topics': ['education', 'children', 'families'],
'description': 'The Michigan Center for Educational Performance and Information (CEPI) coordinated the collection of education data for the State of Michigan. This data includes M-STEP test data, graduation, and drop out statistics. Through this data we report information related to both English Language Arts (ELA) and Math M-STEP testing that takes place during the 3rd grade, and graduation rate information. Data from CEPI is published for both traditional public schools and charter schools. The numbers that we report on the State of the Detroit Child from the tables below include both public and charter schools combined. If there are fewer than 10 observations, the information is suppressed for privacy reasons. ',
'template_name': 'education.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Families',
'slug': 'families',
'topics': ['family type', 'families', 'marital status'],
'description': 'Families are an important topic in the ACS and a key framework for considering many kinds of data.',
'template_name': 'families.html',
'question_images': ['relationship.png',],
'question_pdfs': [
('ACS Question on Householder Relationships','http://www.census.gov/acs/www/Downloads/QbyQfact/relationship.pdf'),
]
},
{
'title': 'Geography',
'slug': 'geography',
'description': "Geography is fundamental to the Census Bureau's process of tabulating data. Here are the key concepts you need to understand.",
'template_name': 'geography.html',
},
{
'title': 'Health Insurance',
'slug': 'health-insurance',
'topics': ['health insurance',],
'description': 'The ACS has a number of questions that deal with health insurance and many corresponding tables.',
'template_name': 'health-insurance.html',
'question_images': ['health-insurance.png',],
'question_pdfs': [
('Questions on Health Insurance Coverage','http://www.census.gov/acs/www/Downloads/QbyQfact/health_insurance.pdf'),
]
},
{
'title': 'Hospital visits through Medicaid',
'slug': 'hospital-visits-through-medicaid',
'topics': ['public health', 'children', 'families', 'health care'],
'description': 'The Michigan Department of Health and Human Services collects data on patients under 18 years of age who visited the hospital through Medicaid. This dataset includes information about the number of these visits that were to the Emergency Room, as well as the number of visits that were related to Asthma or Diabetes. If there were fewer than 6 observations, the information is suppressed for privacy reasons.',
'template_name': 'medicaid.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Free and Reduced School Lunch',
'slug': 'free-and-reduced-school-lunch',
'topics': ['education', 'children', 'families', 'nutrition'],
'description': 'The Center for Educational Performance and Information (CEPI) collects the count of how many students (K-12) are eligible for free and/or reduced school lunch. Free and reduced lunch eligibility is based on income thresholds based on household size. Non-public school and home-school students are excluded from CEPI\'s data.',
'template_name': 'school-lunch.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'College Readiness',
'slug': 'college-readiness',
'topics': ['education', 'children', 'families', 'higher education'],
'description': 'This dataset contains college readiness information, by Metropolitan Statistical Area (MSA), for the state of Michigan. This dataset is reporting score information from the 2017-2018 school year. Students were considered ready for college if they scored at or above the benchmark scores. The SAT Benchmarks represent the likelihood of success in entry-level college courses. The benchmark for Evidenced-Based Reading and Writing (EBRW) is 480 and 530 for Math. The SAT total score reported for Michigan is the combined Evidenced-Based Reading and Writing, and Math Student Score. The Total Score range is 400 - 1600. Data Driven Detroit obtained this data from MiSchoolData.org in October 2018 at a building level and aggregated the data to a MSA level. See "Additional Notes" tab for more information.',
'template_name': 'college-readiness.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'College Enrollment',
'slug': 'college-enrollment',
'topics': ['education', 'children', 'families', 'higher education'],
'description': 'This dataset contains college enrollment information, by U.S. Census Block Group, for the state of Michigan. College enrollment was defined as the number of public high school students who graduated in 2017, who enrolled in a college or university. This dataset includes enrollment in two-year and four-year institutions of higher education.',
'template_name': 'college-enrollment.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Blood Lead Level',
'slug': 'blood-lead-level',
'topics': ['health', 'children', 'families', 'public health'],
'description': 'Data on the number of tests for elevated blood lead level is collected by the Michigan Department of Health and Human Services at the individual level. For this analysis, D3 anonymized the data and aggregated it up to various geographies. Geographies with less than 6 individuals with elevated blood lead levels (EBLLs) are suppressed to preserve anonymity. EBLLs are defined as a blood lead level greater than 4.5 micrograms per deciliter (the unit of measure used to determine lead levels which is micrograms of lead per deciliter of blood). There are two different types of samples, capillary and venous. Capillary testing is done by a pricking the skin of a fingertip or heel capillary. Venous testing is done by drawing blood directly from an arm vein. Some children receive both types of testing, but not all children receive venous testing. Venous is preferred for accuracy and used in the SDC tool as the method of identifying EBLL.',
'template_name': 'blood-lead-level.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Child Care',
'slug': 'child-care',
'topics': ['child care', 'children', 'families'],
'description': 'Great Start to Quality measures the quality of early childhood programs and providers in Michigan by using more than 40 program quality indicators aligned with Michigan\'s Early Childhood Standards of Quality for Infant and Toddler Programs and Early Childhood Standards of Quality for Prekindergarten. The program quality indicators are standards used to measure the quality of a program in a specific area. Each program quality indicator falls into one of five categories.',
'template_name': 'child-care.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Infant Mortality',
'slug': 'infant-mortality',
'topics': ['births', 'children', 'families', 'health care'],
'description': 'Data on the number of infant deaths, unsafe sleep related infant deaths, and infant deaths due to assault or maltreatment collected by the State of Michigan Office of Vital Statistics at the state, county, county subdivision, and congressional district levels in Michigan.',
'template_name': 'infant-mortality.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Immunization',
'slug': 'immunization',
'topics': ['public health', 'children', 'families', 'health care'],
'description': 'Data on the number of fully or partially immunized people collected by the Michigan Care Improvement Agency at the county, county subdivision (in Wayne County), congressional district, State House district, State Senate district, and Census tract levels in Michigan. For this analysis, MDHHS determined immunization status using a National Immunization Survey (NIS) - like methodology, which is a count of both valid and invalid doses. This allows for comparisons with other states. Full immunization, denoted as "fully immunized," includes 4 DTaP, 3 Polio, 1 MMR, 3 Hib, 3 HepB, 1 Varicella, 4 PCV, and 2 HepA vaccinations. Partial immunization, minus HepA and minus PCV, lack HepA and/or PCV vaccinations, respectively. ',
'template_name': 'immunization.html',
'question_images': [],
'question_pdfs': [
]
},
{
'title': 'Race and Hispanic Origin',
'slug': 'race-hispanic',
'topics': ['race',],
'description': 'Race is a complex issue, and no less so with Census data. A large proportion of Census tables are broken down by race.',
'template_name': 'race_hispanic.html',
'question_images': ['race.png',],
'question_pdfs': [
('Race','http://www.census.gov/acs/www/Downloads/QbyQfact/race.pdf'),
('Hispanic or Latino Origin','http://www.census.gov/acs/www/Downloads/QbyQfact/hispanic.pdf')
]
},
{
'title': 'Migration',
'slug': 'migration',
'topics': ['migration', 'tenure'],
'description': 'How the Census deals with migration data.',
'template_name': 'migration.html',
'question_images': ['migration.png',],
'question_pdfs': [
('Questions related to Residence One Year Ago from ACS','http://www.census.gov/acs/www/Downloads/QbyQfact/residence.pdf')
]
},
{
'title': 'Poverty',
'slug': 'poverty',
'topics': ['poverty', 'public assistance', 'income'],
'description': 'Poverty data and how it is used within the ACS.',
'template_name': 'poverty.html',
'question_images': ['income.png',],
'question_pdfs': [
('Questions related to Income and Poverty from ACS','http://www.census.gov/acs/www/Downloads/QbyQfact/income.pdf')
]
},
{
'title': 'Public Assistance',
'slug': 'public-assistance',
'topics': ['poverty', 'public assistance'],
'description': 'Public assistance data from the ACS.',
'template_name': 'public-assistance.html',
'question_images': ['public-assistance.png',],
'question_pdfs': [
('Questions on Income Sources from ACS','http://www.census.gov/acs/www/Downloads/QbyQfact/income.pdf'),
('Question about Food Stamps from ACS','http://www.census.gov/acs/www/Downloads/QbyQfact/food_stamp.pdf')
]
},
{
'title': 'Same-Sex Couples',
'slug': 'same-sex',
'topics': ['marital status',],
'description': 'Although Census does not ask about them directly, there are a number of ways to get at data about same-sex couples using ACS data.',
'template_name': 'same-sex.html',
'question_images': ['same-sex.png',],
'question_pdfs': [
('Questions on Relationships from ACS','http://www.census.gov/acs/www/Downloads/QbyQfact/relationship.pdf'),
('Question on Gender from ACS','http://www.census.gov/acs/www/Downloads/QbyQfact/sex.pdf')
]
},
{
'title': 'Income',
'slug': 'income',
'topics': ['poverty', 'income'],
'description': 'How the Census approaches the topic of income.',
'template_name': 'income.html',
'question_images': ['income.png',],
'question_pdfs': [
('All Income Questions from the Census','http://www.census.gov/acs/www/Downloads/QbyQfact/income.pdf')
]
},
{
'title': 'Table Codes',
'slug': 'table-codes',
'description': 'While Census Reporter hopes to save you from the details, you may be interested to understand some of the rationale behind American Community Survey table identifiers.',
'template_name': 'table-codes.html',
},
{
'title': 'Employment',
'slug': 'employment',
'topics': ['income', 'employment'],
'description': 'While the ACS is not always the best source for employment data, it provides interesting information for small geographies that other sources do not cover.',
'short_description': 'Interesting information for small geographies that other sources do not cover.',
'template_name': 'employment.html',
'question_images': ['employment.png',],
'question_pdfs': [
('Labor Force Status','http://www.census.gov/acs/www/Downloads/QbyQfact/labor.pdf'),
('Work Status','http://www.census.gov/acs/www/Downloads/QbyQfact/work_status.pdf'),
('Class of Worker; Industry; Occupation','http://www.census.gov/acs/www/Downloads/QbyQfact/worker.pdf'),
]
},
{
'title': 'Seniors',
'slug': 'seniors',
'topics': ['grandparents', 'seniors'],
'description': 'In addition to basic Census data about age, there are a small number of Census tables which focus directly on data about older Americans, and on grandparents as caregivers.',
'short_description': 'Data about older Americans, and on grandparents as caregivers.',
'template_name': 'seniors.html',
'question_images': ['seniors.png',],
'question_pdfs': [
('Age','http://www.census.gov/acs/www/Downloads/QbyQfact/age.pdf'),
('Grandparents as Caregivers','http://www.census.gov/acs/www/Downloads/QbyQfact/grandparents.pdf'),
]
},
{
'title': 'Getting Started',
'slug': 'getting-started',
'description': "The Census is a big subject and there's a lot to learn, but you don't have to learn it all at once. Here's some help knowing the lay of the land.",
'short_description': "The Census is a big subject and there's a lot to learn, but you don't have to learn it all at once.",
'template_name': 'getting_started.html',
},
{
'title': 'Veterans and Military',
'slug': 'veterans',
'topics': ['veterans'],
'description': 'Data collected about past and present members of the U.S. Armed Forces.',
'template_name': 'veterans.html',
'question_images': ['veteran.png', 'veteran_period.png', 'va_rating.png'],
'question_pdfs': [
('Veteran Status and Period of Military Service','http://www.census.gov/acs/www/Downloads/QbyQfact/veteran.pdf'),
('VA Service-Connected Disability Rating Status','http://www.census.gov/acs/www/Downloads/QbyQfact/VA_rating.pdf'),
# ('The Migration of Military Spouses using the 2007-2011 5-Year American Community Survey', 'http://www.census.gov/hhes/migration/files/Military_Migration_paper_final_13-04-29.pdf')
]
},
]
TOPICS_MAP = { topic['slug']: topic for topic in TOPICS_LIST }
|
985,436 | e2cab9c536b95274594ad57290b790757e3d1096 | n1 = input()
n2 = input()
n3 = input()
print(f"{n1}{n2}{n3}") |
985,437 | c6233177ea0d1260db06a1f5d6719c8898f4c17c | default_app_config = 'datacenter.apps.DatacenterConfig' |
985,438 | 1eb195efe4cfcfc574f6215894e9ae4179e25850 | import socket as sk
def communicate(host, port, request):
s = sk.socket(sk.AF_INET, sk.SOCK_STREAM)
s.connect((host, port))
s.send(request)
response = s.recv(1024)
s.close()
return response
|
985,439 | 59bf5f3d545882c90bccadaabc4b45fa4b496867 | import unittest
from base import Base
from selenium.webdriver.common.by import By
class Homework3(Base):
def test_challenge2(self):
self.driver.get("https://www.doterra.com/US/en/product-education-blends")
oils = self.driver.find_elements(By.XPATH, "//*[@id=\"content\"]//a/span[@class=\"title\"]")
doTerra = 0
digestZen = 0
misc = 0
for i in range(len(oils)):
oilName = oils[i].text
if ("doterra" in oilName.lower()):
doTerra += 1
elif ("digestzen" in oilName.lower()):
digestZen += 1
else:
misc += 1
print("doTERRA: ", doTerra)
print("DigestZen: ", digestZen)
print("Misc: ", misc)
if __name__ == '__main__':
unittest.main() |
985,440 | e4b53266fcd85758346484d464b5bd3365d3c162 | from .manage import app
|
985,441 | 8ab7755b97807128417bac4f094d911412b42748 | length=int(input("enter the length of rectangle:"))
breadth=int(input("enter the breadth of rectangle:"))
area=length*breadth
perimeter=2*(length+breadth)
print("/n area of rectangle is:%2d",%area)
print("/n perimeter of rectangle is:%2d" %perimeter)
|
985,442 | 9fa01a0fb87f28e09baf72824570ab1a0dd2853b | # -*- coding: utf-8 -*-
# @Time : 2020/4/21 16:18
# @python : python 3.7
# @Author : 水萍
# @File : 调整奇数位于偶数前面.py
# @Software: PyCharm
def exchange(nums):
i = 0
j = len(nums)-1
if len(nums) == 1:
return nums
if nums == []:
return nums
while i != j:
if nums[i] % 2 != 0:
i += 1
else:
if nums[j] %2 != 0:
nums[i], nums[j] = nums[j], nums[i]
else:
j -= 1
return nums
if __name__ == "__main__":
nums = [1,2,3,4]
res = exchange(nums)
print(res) |
985,443 | 0f9fa44976d006eff7cf4a3ae2f6854cfb47ce60 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 16:44:47 2018
@author: nguyentran
IoTSE defines 3 types of entity:
IoTContent: represent a named IoT content, encapsulating ID, metadata, and content
Query: represent a named query, encapsulating query_ID, query_content (a dictionary)
ResultSet: represent a named collection of IoT content - score tuples
"""
from ..cs_kernel.entity_base import Entity as Entity
import copy
class IoTContent(Entity):
def __init__(self, ID = "", metadata = {}, content = {}, iot_content_dict = None):
super().__init__()
if iot_content_dict is not None:
self.from_dict(iot_content_dict)
else:
self.ID = ID
self.metadata = metadata
self.content = content
def from_dict(self, iot_content_dict):
self.ID = iot_content_dict["ID"]
self.metadata = iot_content_dict["metadata"]
self.content = iot_content_dict["content"]
class Query(Entity):
def __init__(self, query_ID = "", query_content = {}, query_dict = None):
super().__init__()
if query_dict is not None:
self.from_dict(query_dict)
else:
self.query_ID = query_ID
self.query_content = query_content
def from_dict(self, query_dict):
self.query_ID = query_dict["query_ID"]
self.query_content = query_dict["query_content"]
class ResultSet(Entity):
def __init__(self, query_ID = "", query_instance = {}, results = None, result_set_dict = None):
super().__init__()
if result_set_dict is not None:
self.from_dict(result_set_dict)
else:
self.query_ID = query_ID
if type(query_instance) is not dict and type(query_instance) is not Query:
raise TypeError("query_instance must be either a dictionary or an instance of Query entity")
if type(query_instance) is Query:
query_instance = query_instance.to_dict()
self.query_instance = query_instance
if results is None:
self.results = []
else:
self.results = results
def add_IoTContent_score(self, IoTContent = {}, score = {}):
self.results.append((IoTContent, score))
def to_dict(self):
temp_list = []
for result in self.results:
try:
temp_list.append((result[0].to_dict(), result[1]))
except AttributeError:
return self.__dict__
temp_obj = copy.copy(self)
temp_obj.results = temp_list
return temp_obj.__dict__
def from_dict(self, result_set_dict):
self.query_ID = result_set_dict["query_ID"]
self.query_instance = result_set_dict["query_instance"]
self.results = result_set_dict["results"] |
985,444 | 746c24da79f6137c4a1ffaff3a015be7c2af5600 | from rest_framework import serializers
from evaluador_peso import models as model_pesos
class PersonaSerializer(serializers.ModelSerializer):
class Meta:
model = model_pesos.Persona
fields = "__all__" |
985,445 | 0bc5f0e0545b298535898003d0f58088d781b143 | import tkinter as tk
from ui.captcha_ import Captcha
from ui.private_entry import PrivateEntry
from tkinter import messagebox
from tkinter import ttk
from ui.window import Window
from security.password_strength import PasswordStrength
import itertools
class MasterDialogBase(Window):
def __init__(self, parent, db, encryption):
self.db = db
self.encryption = encryption
self.password_var = tk.StringVar()
# noinspection PyProtectedMember
assert self.encryption._db == self.db # TODO: remove this when encryption/db is cleaned up
super().__init__(parent, title="Master password")
def _body(self, frame):
self.resizable(False, False)
self._error_label = ttk.Label(frame)
self._error_label.grid(row=99)
def _buttonbox(self):
box = ttk.Frame(self)
ttk.Frame(box).pack(side=tk.LEFT, fill=tk.X, expand=True)
w = ttk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
w.pack(side=tk.LEFT, padx=2, pady=2)
w = ttk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=tk.LEFT, padx=2, pady=2)
ttk.Frame(box).pack(side=tk.LEFT, fill=tk.X, expand=True)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack(fill=tk.BOTH, expand=True)
def close(self):
self.cancel()
def ok(self, event=None):
self.set_error_message('')
if self.validate():
self.withdraw()
self.update_idletasks()
self.apply()
self.parent.focus_set()
self.destroy()
else:
self.on_fail()
self.initial_focus.focus()
def validate(self):
pass
def apply(self):
try:
self.encryption.password = self.password_var.get()
self.password_var.set('\0' * len(self.password_var.get()))
except:
messagebox.showerror(title="Failure", message=f"Failed to set master password. Operation aborted.")
raise
def set_error_message(self, message):
self._error_label.configure(text=message)
def on_fail(self):
pass
def cancel(self, event=None):
pass
class MasterDialogCheck(MasterDialogBase):
def __init__(self, parent, db, encryption):
super().__init__(parent, db, encryption)
def _body(self, frame):
super()._body(frame)
rowcount = itertools.count()
ttk.Label(frame, text="Enter master password:").grid(row=next(rowcount))
self.entry = PrivateEntry(frame, textvariable=self.password_var, width=38)
self.entry.grid(row=next(rowcount))
self.captcha = Captcha(frame)
self.captcha.grid(row=next(rowcount))
return self.entry
def on_fail(self):
super().on_fail()
self.captcha.refresh()
self.entry.delete(0, tk.END)
def validate(self):
if not self.captcha.validate():
self.set_error_message("Invalid captcha!")
return False
elif not self.db.check_master_pwd(self.entry.get()):
self.set_error_message("Invalid password!")
return False
return True
def cancel(self, event=None):
self.parent.destroy()
class MasterDialogInit(MasterDialogBase):
def __init__(self, parent, db, encryption):
super().__init__(parent, db, encryption)
def _body(self, frame):
super()._body(frame)
rowcount = itertools.count()
ttk.Label(frame, text="Set master password:").grid(row=next(rowcount))
self.new_entry = PrivateEntry(frame, textvariable=self.password_var, width=38)
self.new_entry.grid(row=next(rowcount))
self.label = ttk.Label(frame)
ttk.Label(frame, text="Confirm master password:").grid(row=next(rowcount))
self.confirm_entry = PrivateEntry(frame, width=38)
self.confirm_entry.grid(row=next(rowcount))
PasswordStrength(frame, self.password_var).grid(row=next(rowcount))
return self.new_entry
def cancel(self, event=None):
self.parent.destroy()
def validate(self):
if self.new_entry.get() != self.confirm_entry.get():
self.set_error_message("Passwords don't match!")
return False
return True
class MasterDialogChange(MasterDialogBase):
def __init__(self, parent, db, encryption):
super().__init__(parent, db, encryption)
def _body(self, frame):
super()._body(frame)
rowcount = itertools.count()
ttk.Label(frame, text="Enter current master password:").grid(row=next(rowcount))
self.old_entry = PrivateEntry(frame, width=38)
self.old_entry.grid(row=next(rowcount))
ttk.Label(frame, text="Set new master password:").grid(row=next(rowcount))
self.new_entry = PrivateEntry(frame, textvariable=self.password_var, width=38)
self.new_entry.grid(row=next(rowcount))
self.label = ttk.Label(frame)
ttk.Label(frame, text="Confirm master password:").grid(row=next(rowcount))
self.confirm_entry = PrivateEntry(frame, width=38)
self.confirm_entry.grid(row=next(rowcount))
PasswordStrength(frame, self.password_var).grid(row=next(rowcount))
return self.old_entry
def cancel(self, event=None):
self.parent.focus_set()
self.destroy()
def on_fail(self):
super().on_fail()
self.old_entry.delete(0, tk.END)
def validate(self):
if self.new_entry.get() != self.confirm_entry.get():
self.set_error_message("Passwords don't match!")
return False
elif not self.db.check_master_pwd(self.old_entry.get()):
self.set_error_message("Invalid password!")
return False
return True
|
985,446 | afcc92a4eb91aabd934051d5b765425c77f02ab8 | # Zip
# This is our known way of combining a list
L1 = [1, 2, 3]
L2 = [3, 4, 5]
L3 = []
print('{}\n{}'.format(L1, L2))
for i in range(len(L1)): L3.append(L1[i] + L2[i])
print(L3)
# Here is the zip function, which combines the lists items, but in tuples
L4 = list(zip(L1, L2))
print(L4)
# Here is combining them like the iterative function above
L4_sums = []
for (x1, x2) in L4:
L4_sums.append(x1 + x2)
print(L4_sums)
# Here is combining the zip function and combination into one function
L5 = [x1 + x2 + x3 for (x1, x2, x3) in list(zip(L1, L2, L3))]
print(L5)
|
985,447 | 1040ab555209f0257aeb5e945a256b17f485376c | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Location of the setuptools hooks for manipulating setup.py metadata.
"""
import os
from pbr import requires
def __inject_parsed_file(value, func):
TOKEN = '#:'
new_reqs = []
old_tokens = []
for req in value:
if req.startswith(TOKEN):
old_tokens.append(req)
req_file = req[len(TOKEN):]
new_reqs.extend(func(req_file))
for val in old_tokens:
value.remove(val)
value.extend(new_reqs)
def inject_requires(dist, attr, value):
__inject_parsed_file(value, requires.parse_requirements)
def inject_dependency_links(dist, attr, value):
__inject_parsed_file(value, requires.parse_dependency_links)
def inject_version(dist, attr, value):
"""Manipulate the version provided to setuptools to be one calculated
from git.
If the setuptools version starts with the token #:, we'll take over
and replace it with something more friendly."""
import setuptools
version = dist.metadata.version
if version and version.startswith("#:"):
# Modify version number
if len(version[2:]) > 0:
(version_module, version_object) = version[2:].split(":")
else:
version_module = "%s" % dist.metadata.name
version_object = "version_info"
vinfo = __import__(version_module).__dict__[version_object]
versioninfo_path = os.path.join(vinfo.package, 'versioninfo')
versioninfo_dir = os.path.join(os.path.dirname(versioninfo_path), "..")
own_repo = os.path.isdir(versioninfo_dir)
dist.metadata.version = vinfo.canonical_version_string(always=own_repo)
# Inject cmdclass values here
import cmdclass
dist.cmdclass.update(cmdclass.get_cmdclass(versioninfo_path))
# Inject long_description
for readme in ("README.rst", "README.txt", "README"):
if dist.long_description is None and os.path.exists(readme):
dist.long_description = open(readme).read()
dist.include_package_data = True
# Set sensible default for test_suite
if dist.test_suite is None:
dist.test_suite = 'nose.collector'
if dist.packages is None:
dist.packages = setuptools.find_packages(exclude=['tests',
'tests.*'])
|
985,448 | f1788b07f39b8a58e1a5f69566aa08aa07953ead | # import asyncio
import logging
import traceback
import argparse
import subprocess
import sys
import schedule
from rpi2mqtt.config import Config
from rpi2mqtt.binary import *
from rpi2mqtt.temperature import *
from rpi2mqtt.ibeacon import Scanner
from rpi2mqtt.switch import Switch
from rpi2mqtt.thermostat import HestiaPi
import time
try:
from beacontools import BeaconScanner, IBeaconFilter
except:
print("Unable to load beacontools")
# setup CLI parser
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config",
help="Path to config.yaml")
parser.add_argument('-d', '--dry-run',
help='Test drive config without triggering callbacks.')
parser.add_argument('--generate-config',
help="Generate config.yaml template.",
action='store_true')
parser.add_argument('--install-service',
help='Install rpi2mqtt as systemd service.',
action='store_true')
parser.add_argument('--install-user-service',
help='Install rpi2mqtt as user systemd service.',
action='store_true')
def main():
config = None
args = parser.parse_args()
if args.generate_config:
Config.generate_config('config.yaml')
sys.exit(0)
if args.install_service:
username = input("User to run service as [pi]: ") or 'pi'
config_path = input("Enter full path to config.yaml: ")
# _path = input("Path rpi2mqtt executable (run `which rpi2mqtt`): ")
_path = subprocess.check_output(['which', 'rpi2mqtt']).decode().strip()
install_service(username, _path, config_path, 'system')
sys.exit(0)
if args.install_user_service:
username = input("User to run service as [pi]: ") or 'pi'
config_path = input("Enter full path to config.yaml: ")
# _path = input("Path rpi2mqtt executable (run `which rpi2mqtt`): ")
_path = subprocess.check_output(['which', 'rpi2mqtt']).decode().strip()
install_service(username, _path, config_path, 'user')
sys.exit(0)
scanner = None
if args.config:
config = Config.get_instance(filename=args.config)
if not config:
logging.error("No configuration file present.")
sys.exit(1)
# start MQTT client
from rpi2mqtt.mqtt import MQTT
MQTT.setup()
sensor_list = []
if len(config.sensors) > 0:
for _, sensor in config.sensors.items():
s = None
if sensor.type == 'dht22':
s = DHT(sensor.pin, sensor.topic, sensor.name, 'sensor', sensor.type)
elif sensor.type == 'ibeacon':
s = Scanner(sensor.name, sensor.topic, sensor.beacon_uuid, sensor.beacon_away_timeout)
elif sensor.type == 'switch':
s = Switch(sensor.name, sensor.pin, sensor.topic)
elif sensor.type == 'reed':
s = ReedSwitch(sensor.name, sensor.pin, sensor.topic, sensor.normally_open, sensor.get('device_type'))
elif sensor.type == 'bme280':
s = BME280(sensor.name, sensor.topic)
elif sensor.type == 'hestiapi':
s = HestiaPi(**sensor)
elif sensor.type == 'onewire':
s = OneWire(sensor.name, sensor.topic)
else:
logging.warn('Sensor {} found in config, but was not setup.'.format(sensor.name))
if s:
sensor_list.append(s)
try:
scanner = BeaconScanner(sensor_list[1].process_ble_update) # TODO update to search sensor list and setup scanner accordingly.
scanner.start()
except:
logging.error("Beacon scanner did not start")
else:
logging.warn("No sensors defined in {}".format(args.config))
schedule.every().day.at("01:00").do(MQTT.refresh_subscriptions)
try:
while True:
for sensor in sensor_list:
sensor.callback()
time.sleep(config.polling_interval)
MQTT.ping_subscriptions()
schedule.run_pending()
except:
traceback.print_exc()
MQTT.client.loop_stop()
if scanner:
scanner.stop()
def install_service(username, _path, config_path, _type):
template = """[Unit]
Description=rpi2mqtt Service
After=network-online.target
[Service]
# replace user with an existing system user
Restart=on-failure
User={username}
ExecStart={_path} -c {config_path}
[Install]
WantedBy=multi-user.target
""".format(username=username, _path=_path, config_path=config_path)
# return template
if _type == 'user':
filename = '~/.config/systemd/user/rpi2mqtt.service'
else:
filename = '/etc/systemd/system/rpi2mqtt.service'
with open(filename, 'w') as f:
f.write(template)
if __name__ == '__main__':
main() |
985,449 | 16b68558d9f97e236a3f48478353ba1ae0a5c886 | print()
# The following block of code shows a nested dictionary containing data on three pet owners and their pet's information.
pets= {
'rover': {
'ownername': 'doug',
'pettype': 'dog',
'color': 'black',
},
'jessica': {
'ownername': 'danielle',
'pettype': 'rabbit',
'color': 'black',
},
'felix': {
'ownername': 'stine',
'pettype': 'cat',
'color': 'tan',
},
}
# The following block of code iterates through data on each owner and pet's info, then prints that data on a new line for each person.
for name, pets_info in pets.items():
print("\nPet's name: " + name.title())
Owner = pets_info['ownername']
Pet = pets_info['pettype']
Color = pets_info['color']
print("\tOwner: " + Owner.title())
print("\tPet Type: " + Pet.title())
print("\tPet Color: " + Color.title())
print() |
985,450 | 9a743839166573159779706ce2c82f28aed6eea2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wot.io
import wot.ot
def main(argv):
parser = wot.ot.OptimalTransportHelper.create_base_parser('Compute transport maps between pairs of time points')
parser.add_argument('--format', help=wot.commands.FORMAT_HELP, default='loom', choices=wot.commands.FORMAT_CHOICES)
args = parser.parse_args(argv)
ot_helper = wot.ot.OptimalTransportHelper(args)
params_writer = None
# if args.solver is 'floating_epsilon':
# params_writer = open(args.out + '_params.txt', 'w')
# params_writer.write('t1' + '\t' + 't2' + '\t' + 'epsilon' + '\t' + 'lambda1' + '\t' + 'lambda2' +
# '\n')
def callback(cb_args):
result = cb_args['result']
# if args.solver is 'floating_epsilon':
# params_writer.write(
# str(cb_args['t0']) + '\t' + str(cb_args['t1']) + '\t' + str(result['epsilon']) + '\t' + str(
# result['lambda1']) + '\t' + str(
# result['lambda2']) + '\n')
# save the tranport map
if args.verbose:
print('Saving transport map')
filename = args.out + '_' + str(cb_args['t0']) + '_' + str(cb_args['t1'])
row_meta = cb_args['df0'].copy()
row_meta.drop(['cell_growth_rate', 'day'], axis=1, inplace=True)
row_meta['g'] = cb_args['g']
col_meta = cb_args['df1'].copy()
col_meta.drop(['cell_growth_rate', 'day'], axis=1, inplace=True)
wot.io.write_dataset(wot.Dataset(result['transport'], row_meta, col_meta), filename,
output_format=args.format)
ot_helper.compute_transport_maps(callback)
if params_writer is not None:
params_writer.close()
|
985,451 | ff2c5c5883828a83d1fd3e35461cb5e799b828eb | import logging
__author__ = 'Eric'
if __name__ == "__main__":
logging.basicConfig(filename="Networking.log", level=logging.INFO) |
985,452 | 0341446572c31d3c120cbe47ca917d494a6e6f5a | # <<BEGIN-copyright>>
# Copyright (c) 2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by the LLNL Nuclear Data and Theory group
# (email: mattoon1@llnl.gov)
# LLNL-CODE-683960.
# All rights reserved.
#
# This file is part of the FUDGE package (For Updating Data and
# Generating Evaluations)
#
# When citing FUDGE, please use the following reference:
# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, "Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008
#
#
# Please also read this link - Our Notice and Modified BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of LLNS/LLNL nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Additional BSD Notice
#
# 1. This notice is required to be provided under our contract with the U.S.
# Department of Energy (DOE). This work was produced at Lawrence Livermore
# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
#
# 2. Neither the United States Government nor Lawrence Livermore National Security,
# LLC nor any of their employees, makes any warranty, express or implied, or assumes
# any liability or responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that its use
# would not infringe privately-owned rights.
#
# 3. Also, reference herein to any specific commercial products, process, or services
# by trade name, trademark, manufacturer or otherwise does not necessarily constitute
# or imply its endorsement, recommendation, or favoring by the United States Government
# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the United States Government or
# Lawrence Livermore National Security, LLC, and shall not be used for advertising or
# product endorsement purposes.
#
# <<END-copyright>>
import sys
sys.path.insert( 0, '../../../../../lib' )
import os
import pointwiseXY_C
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print __file__
CPATH = '../../../../Test/UnitTesting/integrate'
os.system( 'cd %s; make -s clean; ./integrationWithWeight_sqrt_x -v > v' % CPATH )
def skipBlankLines( ls ) :
i = 0
for i, l in enumerate( ls ) :
if( l.strip( ) != '' ) : break
ls = ls[i:]
if( ( len( ls ) == 1 ) and ( ls[0].strip( ) == '' ) ) : ls = []
return( ls )
def getIntegerValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = int( ls[0].split( '=' )[1] )
return( ls[1:], value )
def getDoubleValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = float( ls[0].split( '=' )[1] )
return( ls[1:], value )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.12e' % v1, '%.12e' % v2
sv1, sv2 = '%.7e' % float( sv1 ), '%.7e' % float( sv2 )
if( sv1 != sv2 ) : print '<%s> <%s>' % ( sv1, sv2 )
if( sv1 != sv2 ) : raise Exception( '%s: values %e and %e diff by %e at %d for label = %s' % ( __file__, v1, v2, v2 - v1, i, label ) )
def getXYData( ls ) :
ls, length = getIntegerValue( 'length', ls )
ls, interpolation = getIntegerValue( 'interpolation', ls )
interpolationStr = [ 'lin-lin', 'log-lin', 'lin-log', 'log-log', 'flat' ][interpolation]
data = [ map( float, ls[i].split( ) ) for i in xrange( length ) ]
data = pointwiseXY_C.pointwiseXY_C( data, initialSize = len( data ), overflowSize = 10, interpolation = interpolationStr )
ls = ls[length:]
ls = skipBlankLines( ls )
return( ls, data )
def checkIntegration( count, xMin, xMax, data, sum ) :
V = data.integrateWithWeight_sqrt_x( xMin, xMax )
compareValues( 'sum', count, V, sum )
invV = data.integrateWithWeight_sqrt_x( xMax, xMin )
if( V != -invV ) : raise Exception( '%s: at %d V = %g != -invV = %g' % ( __file__, count, V, invV ) )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
count = 0
while( len( ls ) ) :
count += 1
ls, xMin = getDoubleValue( 'xMin', ls )
ls, xMax = getDoubleValue( 'xMax', ls )
ls, data = getXYData( ls )
ls = skipBlankLines( ls )
l, ls = ls[0], ls[1:]
sum = float( l.split( 'sum = ' )[1].split( )[0] )
checkIntegration( count, xMin, xMax, data, sum )
|
985,453 | 84db940c7e24aef489410e745ec97e09c5daf78c | '''
-Medium-
*Bit*
A character in UTF8 can be from 1 to 4 bytes long, subjected to the following
rules:
For 1-byte character, the first bit is a 0, followed by its unicode code.
For n-bytes character, the first n-bits are all one's, the n+1 bit is 0,
followed by n-1 bytes with most significant 2 bits being 10.
This is how the UTF-8 encoding would work:
Char. number range | UTF-8 octet sequence
(hexadecimal) | (binary)
--------------------+---------------------------------------------
0000 0000-0000 007F | 0xxxxxxx
0000 0080-0000 07FF | 110xxxxx 10xxxxxx
0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
Given an array of integers representing the data, return whether it is a
valid utf-8 encoding.
Note:
The input is an array of integers. Only the least significant 8 bits of each
integer is used to store the data. This means each integer represents only
1 byte of data.
Example 1:
data = [197, 130, 1], which represents the octet sequence: 11000101 10000010
00000001.
Return true.
It is a valid utf-8 encoding for a 2-bytes character followed by a 1-byte
character.
Example 2:
data = [235, 140, 4], which represented the octet sequence: 11101011
10001100 00000100.
Return false.
The first 3 bits are all one's and the 4th bit is 0 means it is a 3-bytes character.
The next byte is a continuation byte which starts with 10 and that's correct.
But the second continuation byte does not start with 10, so it is invalid.
'''
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
mask4 = 0b00001111
mask3 = 0b00000111
mask2 = 0b00000011
mask1 = 0b00000000
mask10 = 0b00000010
n = len(data)
i = 0
conbytes = 0
while i < n:
if mask1 | data[i]>>7 == mask1:
i += 1
continue
match = False
if mask4 & data[i]>>4 == mask4 \
and (0b1<<3) & data[i] == 0:
match = True
conbytes = 3
elif mask3 & data[i]>>5 == mask3 \
and (0b1<<4) & data[i] == 0:
match = True
conbytes = 2
elif mask2 & data[i]>>6 == mask2 \
and (0b1<<5) & data[i] == 0:
match = True
conbytes = 1
if not match:
return False
if i+conbytes > n-1: return False
j = i+1
while j <= i+conbytes:
if mask10 & data[j]>>6 != mask10:
return False
j += 1
i = j
return True
if __name__ == "__main__":
print(Solution().validUtf8([197, 130, 1]))
print(Solution().validUtf8([235, 140, 4]))
print(Solution().validUtf8([240,162,138,147,145]))
print(Solution().validUtf8([248,130,130,130])) |
985,454 | ccfe1d80e3735c313e9046eb76994709cb10c3e4 | import json
import os
from datetime import datetime
import sys
sys.path.append("../")
from causal_graphs.graph_visualization import visualize_graph
from causal_graphs.graph_export import load_graph
from causal_graphs.graph_real_world import load_graph_file
from causal_graphs.graph_definition import CausalDAG
from causal_discovery.utils import set_cluster
from experiments.utils import set_seed, get_basic_parser, test_graph
if __name__ == '__main__':
parser = get_basic_parser()
parser.add_argument('--graph_files', type=str, nargs='+',
help='Graph files to apply ENCO to. Files must be .pt, .npz, or .bif files.')
args = parser.parse_args()
# Basic checkpoint directory creation
current_date = datetime.now()
if args.checkpoint_dir is None or len(args.checkpoint_dir) == 0:
checkpoint_dir = "checkpoints/%02d_%02d_%02d__%02d_%02d_%02d/" % (
current_date.year, current_date.month, current_date.day, current_date.hour, current_date.minute, current_date.second)
else:
checkpoint_dir = args.checkpoint_dir
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, "args.json"), "w") as f:
json.dump(vars(args), f, indent=4)
set_cluster(args.cluster)
for gindex, graph_path in enumerate(args.graph_files):
# Seed setting for reproducibility
set_seed(args.seed)
# Load graph
if graph_path.endswith(".bif"):
graph = load_graph_file(graph_path)
elif graph_path.endswith(".pt"):
graph = CausalDAG.load_from_file(graph_path)
elif graph_path.endswith(".npz"):
graph = load_graph(graph_path)
else:
assert False, "Unknown file extension for " + graph_path
graph_name = graph_path.split("/")[-1].rsplit(".", 1)[0]
if graph_name.startswith("graph_"):
graph_name = graph_name.split("graph_")[-1]
file_id = "%s_%s" % (str(gindex+1).zfill(3), graph_name)
# Visualize graph
if graph.num_vars <= 100:
figsize = max(3, graph.num_vars ** 0.7)
visualize_graph(graph,
filename=os.path.join(checkpoint_dir, "graph_%s.pdf" % (file_id)),
figsize=(figsize, figsize),
layout="circular" if graph.num_vars < 40 else "graphviz")
s = "== Testing graph \"%s\" ==" % graph_name
print("="*len(s)+"\n"+s+"\n"+"="*len(s))
# Start structure learning
test_graph(graph, args, checkpoint_dir, file_id)
|
985,455 | 058cdf51492156857a66df71fbb9b3a4c06c20dd |
def is_encoding(char):
return not ((char.isalpha()) | (char.isdigit()) | (char == ' ') | (char == '<') | (char == '>') | (char == '\r') | (char == '\n'))
def decode(message):
bin_string = get_binary(message)
split = [bin_string[8*i:8*(i+1)] for i in range(len(bin_string)//8)]
bytes = [int(i, 2) for i in split]
decoded = ''.join(chr(i) for i in bytes)
return decoded
def get_binary(message):
secret = get_encoded(message)
bin_string = ""
x = 2
while(x<len(secret)):
if secret[x] != '\\':
bin_string+='0'
x+=1
elif secret[x] == '\\':
bin_string+='1'
x+=2
return bin_string
def get_encoded(message):
secret = ""
for char in message:
if is_encoding(char):
secret += char
return secret
def get_binary_from_plaintext(message):
bin_message = ''.join(format(ord(x), 'b').zfill(8) for x in message)
return bin_message
def get_space(text):
count = 0
for char in text:
if is_encoding(char):
count+=1
return count//8
def hide_message(message,text):
space = get_space(text)
if len(message) > space:
raise SpaceError(space,len(message))
else:
binary_string = get_binary_from_plaintext(message)
x=0
new_message = ''
for char in text:
if is_encoding(char) & (x<len(binary_string)):
if binary_string[x]=='1':
new_message = new_message + "\\" + char
else:
new_message += char
x+=1
else:
new_message += char
return new_message
class SpaceError(Exception):
def __init__(self,space,size):
self.space = space
self.size = size
def __str__(self):
return "Secret is %d bytes long. Message can only hide %d bytes." % (self.size, self.space)
|
985,456 | 9f453bfc6f960ab02713acdbd73c7f4e32be3a3e | numbers={"one","two","three","four","five","six"}
print("\nprint the original set")
print(numbers)
print("\nadding other numbers to the set")
numbers.add("seven")
print("\nprint the modified set")
print(numbers)
print(type(numbers))
print("looping through the set elements...")
for i in numbers:
print(i)
|
985,457 | 42508630be0a03dad234feafc7fbcba1bf9fb5d0 | import pyOcean_cpu as ocean
# Type casting applied to tensor objects
a = ocean.asTensor([1,2,3])
print(a)
b = ocean.int8(a)
print(b)
ocean.float(a, True)
print(a)
|
985,458 | 20779e26bc30bef5afb8703f9c575407301892ed | import sys
from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QLabel, QApplication, qApp, QHBoxLayout, QVBoxLayout
from PyQt5.QtGui import QPalette, QColor
from MainWindow import MainWindow
if __name__ == "__main__":
app = QApplication(sys.argv)
main_window = MainWindow()
qApp.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.Window, QColor(25, 50, 50))
palette.setColor(QPalette.Button, QColor(12, 30, 58))
palette.setColor(QPalette.Foreground, QColor(255, 0, 0))
app.setPalette(palette)
sys.exit(app.exec_())
|
985,459 | 19c1cdbc7fc8078fc0bed27568fa0acfd02f7be0 | # Generated by Django 3.1 on 2020-08-31 16:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0006_auto_20200831_0924'),
]
operations = [
migrations.AlterField(
model_name='maritalstatus',
name='person',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.person'),
),
]
|
985,460 | 56401e733e18c4e72b82c850c6c9244e14adeb3f | TOKEN = '1371826061:AAE2JwCB2R3f8wsGYI_ose5sCmo6GZNiuAU' #Токен бота
admin = 449261488 #Chat_id админа 1
db = 'db.db'
|
985,461 | 99c368323fdd674bd7c10df9496954c399eb21cf | import sys
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
from pyspark.sql import SparkSession, functions as sf, types, Row
spark = SparkSession.builder.appName('NYC TAXI').getOrCreate()
assert spark.version >= '2.3' # make sure we have Spark 2.3+
spark.sparkContext.setLogLevel('WARN')
sc = spark.sparkContext
spark.catalog.clearCache()
def main(input_data):
# main logic starts here
tripdata = spark.read.option("header","true").csv(input_data)
monthly_summary = tripdata.groupBy('medallion', 'hack_license', sf.month('pickup_datetime').alias('p_month')).agg(sf.sum('trip_distance').alias('sum_dist'),
sf.sum('total_amount').alias('sum_amount'),
sf.sum('trip_time_in_secs').alias('sum_trip'),
sf.sum('passenger_count').alias('passenger_count'))
monthly_summary.write.format('json').mode('overwrite').save('analysis_result/driver_monthly')
if __name__ == '__main__':
input_data = sys.argv[1]
main(input_data)
|
985,462 | 1e0fa7330c5ccf2351e436952a0bbfe465f9b1bc | for batch_idx, data in enumerate(data_loader): # Fetch graphs of one batch_size; e.g. 32 graphs
input_node_f_unsorted = data['input_node_f'].float() # Dim: BS * N_max * INF
raw_node_f_unsorted = data['raw_node_f'].float() # Dim: BS * N_max * NF
edge_f_unsorted = data['edge_f'].float() # Dim: BS * N_max * M * EF
y_len_unsorted = data['len'] # list of node numbers in each graph in this batch
y_len_max = max(y_len_unsorted) # denote as N
input_node_f_unsorted = input_node_f_unsorted[:, 0:y_len_max, :] # Dim: BS * (N+1) * INF
raw_node_f_unsorted = raw_node_f_unsorted[:, 0:y_len_max, :] # Dim: BS * N * NF
edge_f_unsorted = edge_f_unsorted[:, 0:y_len_max, :, :] # Dim: BS * N * M * EF
BS, N, M, EF = edge_f_unsorted.shape
# initialize GRU hidden state according to batch size
rnn.hidden = rnn.init_hidden(batch_size=input_node_f_unsorted.size(0))
# sort input # The graph with most node numbers come first
y_len,sort_index = torch.sort(y_len_unsorted,0,descending=True)
y_len = y_len.numpy().tolist()
input_node_f = torch.index_select(input_node_f_unsorted, 0, sort_index)
raw_node_f = torch.index_select(raw_node_f_unsorted, 0, sort_index)
edge_f = torch.index_select(edge_f_unsorted, 0, sort_index)
edge_f_reshape = pack_padded_sequence(edge_f,y_len,batch_first=True).data # SumN * M * EF
# reverse edge_f_reshape, so that their lengths are sorted, add dimension
idx = [i for i in range(edge_f_reshape.size(0) - 1, -1, -1)]
idx = torch.LongTensor(idx)
edge_f_reshape = edge_f_reshape.index_select(0, idx)
edge_rnn_input = torch.cat((torch.ones(edge_f_reshape.size(0), 1, edge_f_reshape.size(2)), edge_f_reshape[:, 0:-1, :]),
dim=1) # should have all-1 row
# Dim: SumN * (M+1) * EF
# output_y = y_reshape # Dim: SumN * M * 1
output_y = edge_f_reshape
# batch size for output module: sum(y_len)
output_y_len = []
output_y_len_bin = np.bincount(np.array(y_len))
for i in range(len(output_y_len_bin)-1,0,-1):
count_temp = np.sum(output_y_len_bin[i:]) # count how many y_len is above i
output_y_len.extend([min(i,M)]*count_temp) # put them in output_y_len; max value should not exceed y.size(2)
# TODO: understand what's going on
output_y = Variable(output_y).cuda() # Dim should be SumN * M * EF
edge_rnn_input = Variable(edge_rnn_input).cuda()
input_node_f = Variable(input_node_f).cuda()
if args.loss_type == "mse":
output_node_f = Variable(raw_node_f).cuda()
else:
output_node_f = Variable(np.argmax(raw_node_f,axis=-1)).cuda()
output_y = pack_padded_sequence(output_y,output_y_len,batch_first=True)
output_y = pad_packed_sequence(output_y,batch_first=True)[0]
# if using ground truth to test
h = rnn(input_node_f, pack=True, input_len=y_len) # Dim: BS * (N+1) * hidden_size_rnn_output
node_f_pred = node_f_gen(h) # Dim: BS * (N+1) * NF
h = pack_padded_sequence(h,y_len,batch_first=True).data # get packed hidden vector
# Dim should be SumN * hidden_size_rnn_output
# reverse h # TODO: why reverse?
idx = [i for i in range(h.size(0) - 1, -1, -1)]
idx = Variable(torch.LongTensor(idx)).cuda()
h = h.index_select(0, idx)
hidden_null = Variable(torch.zeros(args.num_layers-1, h.size(0), h.size(1))).cuda()
output.hidden = torch.cat((h.view(1,h.size(0),h.size(1)),hidden_null),dim=0) # num_layers, SumN, hidden_size
# y_pred = output(output_x, pack=True, input_len=output_y_len)
y_pred_origin = output(edge_rnn_input, pack=True, input_len=output_y_len) # Dim: SumN * (M+1) * EF
# edge_f_pred = edge_f_gen(y_pred) # TODO: check if dim correct
# edge_f_pred = torch.sigmoid(edge_f_pred)
# y_pred = torch.softmax(y_pred, dim=2) # Dim: SumN * M * EF
# clean
# If all elements in output_y_len are equal to M, this code segment has no effect
# print(y_pred)
# print(type(y_pred))
# print(y_pred.shape)
y_pred = pack_padded_sequence(y_pred_origin, output_y_len, batch_first=True)
# print(y_pred)
# print(type(y_pred))
# print(y_pred.data.shape)
y_pred = pad_packed_sequence(y_pred, batch_first=True)[0]
output_y = pack_padded_sequence(output_y,output_y_len,batch_first=True)
output_y = pad_packed_sequence(output_y,batch_first=True)[0]
if args.if_add_train_mask:
# Add mask according to adj
# pick node numbers of each graph according to values of each element in y_len
child_node_f_info = torch.matmul(node_f_pred, torch.FloatTensor(args.node_rules).cuda())
# Unpack y_pred according to y_len.
accumulator = 0
mask_list = []
y_pred_untrain = torch.tensor(y_pred.data, requires_grad=False).cuda()
for idx, each in enumerate(y_len):
y_pred_select = y_pred_untrain.index_select(dim=0, index=torch.LongTensor(list(range(accumulator, accumulator + each))).cuda())
y_pred_select = y_pred_select.index_select(dim=2, index=torch.LongTensor([2]).cuda())
# [2] means receiving edge # TODO: perhaps should add [3] which is bi-directional
adj_prob_from_y_pred = torch.sum(y_pred_select, dim=2)
child_info_batch = child_node_f_info.index_select(dim=0, index=torch.LongTensor([idx]).cuda()).squeeze()
node_f_pred_batch = my_decode_adj_cuda(adj_prob_from_y_pred, child_info_batch, node_f_pred.size(1))
accumulator += each
#if idx != 0:
mask_list.append(node_f_pred_batch)
mask_new = torch.cat(mask_list, dim=0)
node_f_pred_new = torch.mul(mask_new, node_f_pred)
else:
node_f_pred_new = node_f_pred
# use cross entropy loss
loss = 0
if args.loss_type == "mse":
direction_loss = my_cross_entropy(y_pred,output_y)
edge_f_loss = 0
node_f_loss = my_cross_entropy(node_f_pred, output_node_f)
else:
# direction_loss =
# print(node_f_pred.shape)
# print(output_node_f.shape)
# print(output_y.shape)
# direction_loss = binary_cross_entropy_weight(F.sigmoid(y_pred[:,:,-2:]),output_y[:,:,-2:])
# direction_loss = binary_cross_entropy_weight(torch.sigmoid(y_pred[:,:,-2:-1]),output_y[:,:,-2:-1])
# compute loss of last two dimension separately
# direction_loss = my_cross_entropy(torch.sigmoid(y_pred[:,:,-4:]),output_y[:,:,-4:],if_CE=True)
direction_loss = my_cross_entropy(y_pred[:,:,-4:], torch.argmax(output_y[:,:,-4:],dim=2),if_CE=True)
# edge_f_loss = my_cross_entropy(y_pred[:,:,:-2], torch.argmax(output_y[:,:,:-2],dim=2))
edge_f_loss = 0
node_f_loss = my_cross_entropy(node_f_pred_new, output_node_f,if_CE=True) #+ \
# binary_cross_entropy_weight(edge_f_pred, output_edge_f)
loss = args.edge_loss_w * (edge_f_loss + direction_loss) + args.node_loss_w * node_f_loss
loss.backward()
# update deterministic and lstm
optimizer_output.step()
optimizer_rnn.step()
scheduler_output.step()
scheduler_rnn.step()
if epoch % args.epochs_log==0 and batch_idx==0: # only output first batch's statistics
print('Epoch: {}/{}, train loss: {:.6f}, node_f_loss: {:.6f}, edge_f_loss: {:.6f}, direction_loss:{:.6f}, num_layer: {}, hidden: {}'.format(
# epoch, args.epochs,loss.data, node_f_loss.data, edge_f_loss.data, args.num_layers, args.hidden_size_rnn))
epoch, args.epochs,loss.data, node_f_loss.data, edge_f_loss, direction_loss.data, args.num_layers, args.hidden_size_rnn))
# logging
log_value('loss_'+args.fname, loss.data, epoch*args.batch_ratio+batch_idx)
feature_dim = N*M
loss_sum += loss.data*feature_dim
|
985,463 | a2b64528419b1111ff180157f318aaa5b90d9190 | """
Created on Sun Nov 19 16:35:26 2017
@author: Malte
"""
# Notes on a hierachical regression model (see http://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/)
# for prediction of behavioral, cognitive and self-report data
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
data = pd.read_csv('all_data.csv')
with pm.Model() as hierarchical_model:
mu_a = pm.Normal('mu_alpha', mu=0., sd=1)
sigma_a = pm.HalfCauchy('sigma_alpha', beta=1)
mu_b = pm.Normal('mu_beta', mu=0., sd=1)
sigma_b = pm.HalfCauchy('sigma_beta', beta=1)
# Intercept for each modality with parameterised EEG predictor, distributed around group mean mu_a
a = pm.Normal('alpha', mu=mu_a, sd=sigma_a, shape=len(data.eeg.unique()))
# Intercept for each modality with parameterised MRI predictor, distributed around group mean mu_a
b = pm.Normal('beta', mu=mu_b, sd=sigma_b, shape=len(data.mri.unique()))
# Model error
eps = pm.HalfCauchy('eps', beta=1)
# Expected value from EEG and MRI predictor
rt_est = a[eeg_idx] + b[mri_idx]
# Data likelihood
y_like = pm.Normal('y_like', mu=rt_est, sd=eps, observed=data.rt)
with hierarchical_model:
hierarchical_trace = pm.sample(njobs=4)
|
985,464 | 765a880eb989734ef2962e70091c1c3494128190 | """Tests for _sketches.py."""
import numpy as np
from numpy.testing import assert_, assert_equal
from scipy.linalg import clarkson_woodruff_transform
from scipy.linalg._sketches import cwt_matrix
from scipy.sparse import issparse, rand
from scipy.sparse.linalg import norm
class TestClarksonWoodruffTransform:
"""
Testing the Clarkson Woodruff Transform
"""
# set seed for generating test matrices
rng = np.random.RandomState(seed=1179103485)
# Test matrix parameters
n_rows = 2000
n_cols = 100
density = 0.1
# Sketch matrix dimensions
n_sketch_rows = 200
# Seeds to test with
seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
A_dense = rng.randn(n_rows, n_cols)
A_csc = rand(
n_rows, n_cols, density=density, format='csc', random_state=rng,
)
A_csr = rand(
n_rows, n_cols, density=density, format='csr', random_state=rng,
)
A_coo = rand(
n_rows, n_cols, density=density, format='coo', random_state=rng,
)
# Collect the test matrices
test_matrices = [
A_dense, A_csc, A_csr, A_coo,
]
# Test vector with norm ~1
x = rng.randn(n_rows, 1) / np.sqrt(n_rows)
def test_sketch_dimensions(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
def test_seed_returns_identical_transform_matrix(self):
for A in self.test_matrices:
for seed in self.seeds:
S1 = cwt_matrix(
self.n_sketch_rows, self.n_rows, seed=seed
).todense()
S2 = cwt_matrix(
self.n_sketch_rows, self.n_rows, seed=seed
).todense()
assert_equal(S1, S2)
def test_seed_returns_identically(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch1 = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
sketch2 = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
if issparse(sketch1):
sketch1 = sketch1.todense()
if issparse(sketch2):
sketch2 = sketch2.todense()
assert_equal(sketch1, sketch2)
def test_sketch_preserves_frobenius_norm(self):
# Given the probabilistic nature of the sketches
# we run the test multiple times and check that
# we pass all/almost all the tries.
n_errors = 0
for A in self.test_matrices:
if issparse(A):
true_norm = norm(A)
else:
true_norm = np.linalg.norm(A)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed,
)
if issparse(sketch):
sketch_norm = norm(sketch)
else:
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
n_errors += 1
assert_(n_errors == 0)
def test_sketch_preserves_vector_norm(self):
n_errors = 0
n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
true_norm = np.linalg.norm(self.x)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
self.x, n_sketch_rows, seed=seed,
)
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
n_errors += 1
assert_(n_errors == 0)
|
985,465 | cdd0e8191c86cda0b84e69fca03ec699879fd5cf | from django.contrib import admin
from .models import *
class HorarioAdmin(admin.TabularInline):
model = Actividad
class ActividadAdmin(admin.ModelAdmin):
inlines = (HorarioAdmin,)
admin.site.register(Horario,ActividadAdmin) |
985,466 | e9e27d24dbefc052135fcec91feafb63ac13d2cb | nome = input()
salario = float(input())
vendas = float(input())
resultado = (vendas * 0.15) + salario
print("TOTAL = R$ %.2f" %resultado) |
985,467 | e7be459b73437dbab245bda45e8a57cb4ab86bf6 | import http.server
import socketserver
import webbrowser as web
from urllib.parse import urlparse
import serial
import time
alertMsg = bytearray([2,0,0,0])
path = "C:\\code\\GpioPlayground\\UsbReadWrite\\" # need trailing slash
class GetHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse(self.path)
print( "Headers are ",self.headers)
print( "Parsed path is",parsed_path.path)
self.send_response(200)
self.end_headers()
self.wfile.write("ok".encode())
return
def do_POST(self):
parsed_path = urlparse(self.path)
print( "Headers are ",self.headers)
print( "Parsed path is",parsed_path.path)
content_len = int(self.headers.get('content-length', 0))
print( "Content len is", content_len)
print( "body is: ", self.rfile.read(content_len))
self.send_response(200)
self.end_headers()
self.wfile.write('{"result":"ok"}'.encode())
return
if __name__ == '__main__':
server = socketserver.TCPServer(('', 8081), GetHandler)
print ('Starting server, use <Ctrl-C> to stop')
server.serve_forever()
|
985,468 | ae60d13758059235caa776f28278e8542fecbe53 | from import_export import resources
from libs.models import t_payment
from django.db.models import Count
from django.db import *
class DailyTransactions(resources.ModelResource):
class Meta:
model = t_payment
fields = ('purpose', 'currency', 'amount', 'commitment', 'timestamp') |
985,469 | 7cab02f81a48e8620dd5efb031e09405d624091e |
class Element:
"""
Класс - элемент односвязного списка.
Каждый экземпляр класса хранит:
- данные
- ссылку на следующий эл-т
"""
def __init__(self, value=None, next=None):
self._value = value
self._next = next
class LinkedList:
"""
Класс - односвязный список.
Определены операции:
- добавление в начало списка за О(1)
- добавление в конец списка за О(1)
- удаление из начала списка за О(1)
Все остальные операции будут стоить O(n).
"""
def __init__(self):
self._head = None
self._tail = None
self._length = 0
def push_front(self, x):
"""Добавление в начало списка"""
self._length += 1
self._head = Element(x, self._head)
if self._tail is None: # если список пуст, обновляем его конец
# получаем список из одного эл-та:
self._tail = self._head
def push_back(self, x):
"""Добавление в конец списка"""
self._length += 1
if self._head is None:
# получаем список из одного эл-та:
self._head = self._tail = Element(x, None)
else:
new = Element(x, None) # вставленный эл-т
prev_tail = self._tail # старый хвост
# меняю ссылку старого хвоста с None на new:
prev_tail._next = new
self._tail = new # обновляю значение хвоста
def pop_front(self):
"""Удаляет эл-т из начала списка и возвращает его"""
assert self._head is not None, "List is empty"
x = self._head._value
self._head = self._head._next
self._length -= 1
return x
def print_elems(self):
"""Выводит все эл-ты списка по порядку"""
elem = self._head
while elem is not None:
print(elem._value, end=' ')
elem = elem._next
print()
def list_length(self):
return self._length
def test():
a = LinkedList()
a.push_front(5)
a.push_front(6)
a.push_front(7)
a.push_back(1)
a.print_elems()
print(a.pop_front())
a.print_elems()
print(a.list_length())
if __name__ == '__main__':
test()
|
985,470 | b27300a5ab69a8c89976c8ed6b28b726939922af | import requests
import time
from bs4 import BeautifulSoup
url = 'https://bj.lianjia.com/ershoufang/'
page=('gp')
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':'text/html;q=0.9,*/*;q=0.8',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'gzip',
'Connection':'close',
'Referer':'http://www.baidu.com/link?url=_andhfsjjjKRgEWkj7i9cFmYYGsisrnm2A-TN3XZDQXxvGsM9k9ZZSnikW2Yds4s&wd=&eqid=c3435a7d00006bd600000003582bfd1f'
}
def getprize(lj):
price = lj.find_all('div',attrs={'class':'priceInfo'})
tp = []
for a in price:
totalPrice =a.span.string
tp.append(totalPrice)
return tp
def getinfo(lj):
houseInfo=lj.find_all('div',attrs={'class':'houseInfo'})
hi=[]
for b in houseInfo:
house=b.get_text()
hi.append(house)
return hi
def getgz(lj):
followInfo=lj.find_all('div',attrs={'class':'followInfo'})
fi=[]
for c in followInfo:
follow=c.get_text()
fi.append(follow)
return fi
result=[]
for i in range(1,30):
i = str(i)
a = (url + page + i + '/')
r = requests.get(url=a,headers=headers)
lj = BeautifulSoup(r.text,'html.parser')
p = getprize(lj)
info = getinfo(lj)
gz = getgz(lj)
data = zip(p,info,gz)
for d in data:
ds = list(d)
result.append(ds)
print(result) |
985,471 | f6b0910bc8a5af6a00be65dc1c964b23f9d141f8 | #!/usr/bin/env python
import time
import sys
import roslib
roslib.load_manifest("gesture_rec")
import rospy
import tf
import math
import scipy.stats
from numpy import dot
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from std_msgs.msg import Header, String, Float32MultiArray
from tf.transformations import quaternion_inverse, quaternion_matrix
from object_recognition_msgs.msg import RecognizedObjectArray
global left_arm_origin
global right_arm_origin
global head_origin
global left_arm_point
global right_arm_point
global head_point
global speech
global left_foot
global right_foot
global ground_truth
global storage
global write_speech
global tfl
global last_obj
global num_objs
num_objs = -1
last_obj = "unknown"
global scnd_to_lat_obj
scnd_to_lat_obj = "unknown"
storage = None
ground_truth = "None"
speech = []
write_speech = []
global state_dist
state_dist = dict()
global objects
objects = []
containers = {"pink_bowl": "salt", "green_bowl": "pepper", "light_green_bowl": "vanilla", "yellow_bowl": "chocolate"}
#TEMP HACK
#objects = [("pink_box", (1.4,-0.2,-0.5)), ("purple_cylinder", (1.4, 0.05, -0.5))]
#objects = [("light_green_bowl",(1.2, -0.37, -0.37)), ("green_bowl",(1.2, 0.07,-0.37))]
#objects = [("salt", (??, ??, ??)), ]
global t
t = 0.005
global variance
variance = 0.4
global word_probabilities
global vocabulary
global eps
eps = 0.0001
#global user
#user = 1
global pub
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
#David's Dependencies
import operator
import collections
import random
#David's global variables
ingredient_file = 'src/no_repeat_numbered.txt'
recipe_list = []
unigram_init = False
uni_counts = collections.Counter()
past_bigrams = {}
past_trigrams = {}
smoothing_coefficient = 0
#Recipe File Reader
def file_reader():
global smoothing_coefficient
vocabulary = collections.Counter()
f = open(ingredient_file, 'r')
for line in f:
clean_line = line.rstrip('\n').encode('utf-8')
recipe_list.append(clean_line)
vocabulary[clean_line] += 1
f.close()
#Witten-Bell Smoothing coefficient
smoothing_coefficient = float(len(recipe_list))/(len(recipe_list) + len(vocabulary))
#David's utility functions
def normalize(x): #for dictionary vectors
total = sum(x.values(), 0.0)
for key in x:
x[key] /= total
return x
def weight(x, weight):
for key in x:
x[key] *= weight
return x
#David's transition functions
def unigram_counter(mod):
global unigram_init
global uni_counts
if unigram_init == False:
for line in range(0, len(recipe_list)):
next_ing = recipe_list[line].split(' # ')
if int(next_ing[0]) != mod:
uni_counts[next_ing[1].split(',')[0]] += 1.0
uni_counts = normalize(uni_counts)
unigram_init = True
return uni_counts
def bigram_counter(previous_ingredient, mod):
if previous_ingredient in past_bigrams:
return past_bigrams[previous_ingredient]
else:
b_lam = smoothing_coefficient #need to test for best number
ni = collections.Counter()
for line in range(0, len(recipe_list)):
if previous_ingredient in recipe_list[line]:
next_ing = recipe_list[line + 1].split(' # ')
if int(next_ing[0]) != mod:
ni[next_ing[1].split(',')[0]] += 1.0
ni = normalize(ni)
if not list(ni.items()) or False:
past_bigrams[previous_ingredient] = weight(unigram_counter(mod), 1.0 - b_lam)
else:
past_bigrams[previous_ingredient] = weight(ni, b_lam) + weight(unigram_counter(mod), 1.0 - b_lam)
return past_bigrams[previous_ingredient]
def trigram_counter(prev_ing, prev_ing2, mod):
input_ings = prev_ing + ":" + prev_ing2
if input_ings in past_trigrams:
return past_trigrams[input_ings]
else:
t_lam = smoothing_coefficient
ni = collections.Counter()
for line in range(0, len(recipe_list)):
if prev_ing in recipe_list[line] and prev_ing2 in recipe_list[line+1]:
next_ing = recipe_list[line+2].split(' # ')
if int(next_ing[0]) != mod:
ni[next_ing[1].split(',')[0]] += 1.0
ni = normalize(ni)
if not list(ni.items()):
#return weight(bigram_counter(prev_ing2, mod), 1.0 - t_lam)
past_trigrams[input_ings] = weight(bigram_counter(prev_ing2, mod), 1.0 - t_lam)
else:
#return weight(ni, t_lam) + weight(bigram_counter(prev_ing2, mod), 1.0 - t_lam)
past_trigrams[input_ings] = weight(ni, t_lam) + weight(bigram_counter(prev_ing2, mod), 1.0 - t_lam)
return past_trigrams[input_ings]
#vector utilities
def norm(vec):
total = 0.0
for i in range(len(vec)):
total += vec[i] * vec[i]
return math.sqrt(total)
def sub_vec(v1,v2):
ret = []
for i in range(len(v1)):
ret += [v1[i]-v2[i]]
return tuple(ret)
def add_vec(v1,v2):
ret = []
for i in range(len(v1)):
ret += [v1[i]+v2[i]]
return tuple(ret)
def angle_between(origin, p1, p2):
v1 = sub_vec(p1, origin)
v2 = sub_vec(p2, origin)
return math.acos(dot(v1, v2)/(norm(v1)* norm(v2)))
#callbacks
def speech_callback(input):
global speech
speech = input.data.split()
def object_callback(input):
global objects
global variance
global num_objs
if(len(input.objects) == 1):
variance = 0.1
frame = "/base"
object_frame="/camera_rgb_optical_frame"
objects = []#[("None", (0,0,0))]
(translation,rotation) = tfl.lookupTransform(frame, object_frame, rospy.Time(0))
# (x,y,z) translation (q1, q2, q3, q4) quaternion
#process into
# (object_id, (x,y,z))
for i in range(len(input.objects)):
cur_obj = input.objects[i].type.key
cur_loc = input.objects[i].pose.pose.pose.position
cur_loc_tuple = (cur_loc.x, cur_loc.y, cur_loc.z, 1.0)
quaternified =dot(cur_loc_tuple, quaternion_matrix(rotation))
cur_loc_tuple = (translation[0] + quaternified[0],translation[1] + quaternified[1],translation[2] + quaternified[2])
objects.append((cur_obj, cur_loc_tuple))
if num_objs == -1:
num_objs = len(objects)
def truth_callback(input):
global ground_truth
ground_truth = input.data
def is_arm_null_gesture(arm_origin, arm_point):
if (arm_origin == None or arm_point == None):
return True
else:
min_angle = 10.0 #greater than 3.14, so should always be greatest angle
for obj in objects:
if angle_between(arm_origin, arm_point, obj[1]) < min_angle:
min_angle = angle_between(arm_origin, arm_point, obj[1])
return min_angle > 3.14159/6 or min_angle > angle_between(arm_origin, arm_point, right_foot) or min_angle > angle_between(arm_origin, arm_point, left_foot)
def is_head_null_gesture(origin, point):
return (origin == None or point == None)
def prob_of_sample(sample):
return scipy.stats.norm(0.0, math.sqrt(variance)).pdf(sample)
#fills body points from openni data
def fill_points(tfl):
try:
global user
frame = "/base" #"camera_link"
allFramesString = tfl.getFrameStrings()
onlyUsers = set([line for line in allFramesString if 'right_elbow_' in line])
n = len('right_elbow_')
userIDs = [el[n:] for el in onlyUsers]
user = ''
if len(userIDs) > 0:
mostRecentUID = userIDs[0]
mostRecentTime = tfl.getLatestCommonTime(frame, 'right_elbow_' + mostRecentUID).to_sec()
for uid in userIDs:
compTime = tfl.getLatestCommonTime(frame, 'right_elbow_' + uid).to_sec()
#rospy.loginfo("Diff time " + str(rospy.get_rostime().to_sec() - compTime))
if compTime >= mostRecentTime and rospy.get_rostime().to_sec() - compTime < 5:
user = uid
mostRecentTime = compTime
global left_arm_origin
global right_arm_origin
global head_origin
global head_point
global left_arm_point
global right_arm_point
global left_foot
global right_foot
(to_left_elbow,_) = tfl.lookupTransform(frame,"/left_elbow_" + user, rospy.Time(0))
(to_right_elbow,_) = tfl.lookupTransform(frame,"/right_elbow_" + user, rospy.Time(0))
(to_left_hand,_) = tfl.lookupTransform(frame,"/left_hand_" + user, rospy.Time(0))
(to_right_hand,_) = tfl.lookupTransform(frame,"/right_hand_" + user, rospy.Time(0))
(right_foot,_) = tfl.lookupTransform(frame, "/right_foot_" + user, rospy.Time(0))
(left_foot,_) = tfl.lookupTransform(frame, "/left_foot_" + user, rospy.Time(0))
(to_head,head_rot) = tfl.lookupTransform(frame,"/head_" + user, rospy.Time(0))
left_arm_origin = to_left_hand
left_arm_point = add_vec(to_left_hand, sub_vec(to_left_hand, to_left_elbow))
right_arm_origin = to_right_hand
right_arm_point = add_vec(to_right_hand, sub_vec(to_right_hand, to_right_elbow))
head_origin = to_head
head_temp = dot((0.0,0.0,-1.0,1.0), quaternion_matrix(quaternion_inverse(head_rot)))
head_point = (head_temp[0] + to_head[0], head_temp[1] + to_head[1], head_temp[2] + to_head[2])
#visualization for testing (verify head vector)
# marker = Marker()
# marker.header.frame_id = "camera_link"
# marker.header.stamp = rospy.Time(0)
# marker.type = marker.POINTS
# marker.action = marker.ADD
# marker.scale.x = 0.2
# marker.scale.y = 0.2
# marker.scale.z = 0.2
# marker.color.a = 1.0
# p1 = Point(right_arm_origin[0],right_arm_origin[1],right_arm_origin[2])
# p2 = Point(right_arm_point[0],right_arm_point[1],right_arm_point[2])
# p3 = Point(left_arm_origin[0],left_arm_origin[1],left_arm_origin[2])
# p4 = Point(left_arm_point[0],left_arm_point[1],left_arm_point[2])
# p5 = Point(head_origin[0],head_origin[1],head_origin[2])
# p6 = Point(head_point[0],head_point[1],head_point[2])
# marker.points += [p1, p2, p3, p4, p5, p6]
# pub.publish(marker)
return True
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
left_arm_point = None
right_arm_origin = None
left_arm_origin = None
right_arm_point = None
head_point = None
head_origin = None
left_foot = None
right_foot = None
return False
def baxter_init_response():
plt.ion()
plt.figure(figsize=(10,10))
plt.show()
def plot_respond():
plt.clf()
x = []
for word in state_dist.keys():
x.append(containers[word])
plt.bar(range(len(state_dist.keys())), state_dist.values(), align='center')
plt.xticks(range(len(state_dist.keys())), x, size='small')
font = {'family' : 'normal','weight' : 'bold','size' : 25}
matplotlib.rc('font', **font)
plt.ylim([0,1.0])
plt.draw()
def baxter_respond():
global speech
global last_obj
global scnd_to_lat_obj
most_likely = max(state_dist.iteritems(), key=operator.itemgetter(1))
if not (is_arm_null_gesture(right_arm_origin, right_arm_point) and \
is_arm_null_gesture(left_arm_origin, left_arm_point)) or containers[most_likely[0]] in speech:
if most_likely[1] > 0.9:
print "SEND PICKUP"
scnd_to_lat_obj = last_obj
last_obj = most_likely[0]
pub = rospy.Publisher('fetch_commands', String, queue_size=0)
#rospy.init_node('pointer', anonymous=True)
rate = rospy.Rate(10)
pub.publish(most_likely[0])
rate.sleep()
#print containers[objects[0][0]], speech
#elif len(objects) == 1 and not (is_arm_null_gesture(right_arm_origin, right_arm_point) and \
# is_arm_null_gesture(left_arm_origin, left_arm_point)) or containers[objects[0][0]] in speech:
# pub = rospy.Publisher('fetch_commands', String, queue_size=0)
# print "only one object left"
# scnd_to_lat_obj = last_obj
# last_obj = containers[most_likely[0]]
# #rospy.init_node('pointer', anonymous=True)
# rate = rospy.Rate(10)
# pub.publish(most_likely[0])
# rate.sleep()
speech = []
def update_model():
global state_dist
global speech
global num_objs
#if num_objs != len(objects):
# for obj in objects:
# state_dist[obj[0]] = 1.0/len(state_dist)
# num_objs = len(objects)
# return
prev_dist = state_dist
state_dist = dict()
#if we have no previous model, set to uniform
if len(prev_dist.keys()) == 0:
l = len(objects) *1.0
for obj in objects: #make sure this is a UID
prev_dist[obj[0]] = 1.0/l
for obj in objects:
print objects
obj_id = obj[0]
state_dist[obj_id] = 0.0
# transition update
for prev_id in prev_dist.keys():
if is_arm_null_gesture(right_arm_origin, right_arm_point) and \
is_arm_null_gesture(left_arm_origin, left_arm_point) \
and len(speech)==0:
t= 0.005
else:
t = bigram_counter(last_obj, -1)[containers[obj_id]]
#t = trigram_counter(containers[last_obj], containers[scnd_to_lat_obj], -1)[containers[obj_id]]
t *= 5
#t= 0.005
print "previous object: %s, estimation of object %s: %f" % (last_obj, containers[obj_id], t)
if prev_id == obj_id:
state_dist[obj_id] += (1-t)*prev_dist[prev_id]
else:
state_dist[obj_id] += t*prev_dist[prev_id]
# left arm
if not is_arm_null_gesture(left_arm_origin, left_arm_point):
l_arm_angle = angle_between(left_arm_origin, left_arm_point, obj[1])
#if l_arm_angle > 3.14/4:
# l_arm_angle = 3.14/2
state_dist[obj_id] *= prob_of_sample(l_arm_angle)
#right arm
if not is_arm_null_gesture(right_arm_origin, right_arm_point):
r_arm_angle = angle_between(right_arm_origin, right_arm_point, obj[1])
#if r_arm_angle > 3.14/4:
# r_arm_angle = 3.14/2
state_dist[obj_id] *= prob_of_sample(r_arm_angle)
#head
if False and not is_head_null_gesture(head_origin, head_point):
state_dist[obj_id] *= prob_of_sample(angle_between(head_origin, head_point, obj[1]))
#speech
for word in speech:
if word in vocabulary:
state_dist[obj_id] *= word_probabilities[obj_id].get(word, eps)
#normalize
total = sum(state_dist.values())
for obj in state_dist.keys():
state_dist[obj] = state_dist[obj] / total
global write_speech
write_speech = speech
def load_dict(filename):
global word_probabilities
global vocabulary
word_probabilities = dict()
vocabulary = set()
with open(filename) as f:
lines = f.read().split('\n')
for line in lines:
words = line.split()
print words
word_probabilities[words[0]] = dict()
for i in range(1, len(words)):
word_probabilities[words[0]][words[i]] = word_probabilities[words[0]].get(words[i], 0.0) + 1.0
vocabulary.add(words[i])
for word in word_probabilities.keys():
total = sum(word_probabilities[word].values())
for x in word_probabilities[word]:
word_probabilities[word][x] = word_probabilities[word][x]/ total
def write_output():
global write_speech
if storage:
output = [head_origin, head_point, left_arm_origin, left_arm_point, \
right_arm_origin, right_arm_point, left_foot, right_foot,\
ground_truth, write_speech, objects, max(state_dist.keys(), key=lambda x: state_dist[x]), time.clock()]
storage.write(str(output) + "\n")
write_speech = []
#objects
#arms
#head
#speech
#ground truth
def main():
global speech
global tfl
file_reader()
rospy.init_node('h2r_gesture')
load_dict(sys.argv[1])
tfl = tf.TransformListener()
rospy.Subscriber('speech_recognition', String, speech_callback, queue_size=1)
rospy.Subscriber('publish_detections_center/blue_labeled_objects', RecognizedObjectArray, object_callback, queue_size=1)
rospy.Subscriber('current_object', String, truth_callback, queue_size=1)
rate = rospy.Rate(30.0)
global storage
if len(sys.argv) > 2:
storage = storage = open(sys.argv[2], 'w')
global pub
pub = rospy.Publisher("test_marker", Marker)
marker = Marker()
marker.header.frame_id = "base" #"camera_link"
marker.header.stamp = rospy.Time(0)
marker.type = marker.POINTS
marker.action = marker.ADD
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1.0
# depth, right left, up down
#p1 = Point(1.2, 0.07,-0.37) # color bowl
#p2 = Point(1.2, -0.37, -0.37) #metal bowl
#p3 = Point(1.5, -0.37, -0.3) #plastic spoon
#p4 = Point(1.5, 0.07, -0.3) #silver spoon
#marker.points += [p1,p2,p3,p4]
baxter_init_response()
while not rospy.is_shutdown():
pub.publish(marker)
fill_points(tfl)
update_model()
if not len(state_dist.keys()) == 0:
for obj in objects:
marker.points.append(Point(obj[1][0], obj[1][1], obj[1][2]))
baxter_respond()
plot_respond()
write_output()
rate.sleep()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: rosrun gesture_rec h2r_gesture.py <language model file> <storage file (optional)>"
sys.exit()
main()
|
985,472 | bff14e7bacbc4a372659c410d12cbaceed704436 | # Generated by Django 3.1 on 2021-03-16 04:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0009_renderplan_admin_only'),
]
operations = [
migrations.AddField(
model_name='renderplan',
name='deadline_priority',
field=models.IntegerField(default=1, null=True),
),
]
|
985,473 | 6a76789a3df7ef37f25e5ef61e9aeb3e663f4dc3 | from flask import Flask, request, url_for, render_template, abort
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////home/pi/flask/sqlitest/test.db'
db = SQLAlchemy(app)
class Student(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
birth = db.Column(db.String)
gender = db.Column(db.String)
def __init__(self, id=1, name=None, birth=None, gender=None):
self.id = id
self.name = name
self.birth = birth
self.gender = gender
def __repr__(self):
return '<%r, %r, %r>' % (self.name,self.birth,self.gender)
@app.route('/show')
def show():
#row = db.session.execute("SELECT * from student").fetchall()
#row = Student.query.all()
row = db.session.query(Student).all()
if row:
return render_template('showorm.html', items=row)
return abort(404, "Page not found")
@app.route('/show/<item>')
def showitem(item):
#row = db.session.execute("SELECT * from student where name like "+"'%"+item+"%'").fetchall()
row = db.session.query(Student).filter(Student.name.like("%"+item+"%")).all()
print(row)
if row:
#return render_template('showitem.html', items=row)
return render_template('showorm.html', items=row)
return abort(404, "Page not found")
@app.route('/')
def hello_world():
return 'Hello World!'
|
985,474 | e37a5c5e7cbab364e7b653255d7ac6418718cb91 | def rev(x):
x = x[::-1]
print(x)
x = raw_input()
rev(x)
|
985,475 | d7db1bac945402f886c88eca115a09fbea4e2752 | from optparse import OptionParser
parser = OptionParser()
parser.add_option("--xAxis", dest="xAxis", default='', type="string", action="store", help="[moniker:filname.zpkl for x-axis")
parser.add_option("--yAxis", dest="yAxis", default='', type="string", action="store", help="[moniker:filname.zpkl for y-axis]")
parser.add_option("--vetoList", dest="vetoList", default='', type="string", action="store", help="vetoList pkl file")
parser.add_option("--run", dest="run", default=-1, type="int", action="store", help="which run")
parser.add_option("--percentile", dest="percentile", default=-1, type="float", action="store", help="percentile 0.99 -> plotrange adjusted to keep keep lower 99%")
parser.add_option("--plotDirectory", dest="plotDirectory", default='.', type="string", action="store", help="Where should the plots go?")
parser.add_option("--profile", dest="profile", action="store_true", help="Whether to do profile.")
(options, args) = parser.parse_args()
import ROOT
from DataFormats.FWLite import Events, Handle
from PhysicsTools.PythonAnalysis import *
from math import *
import sys, os, copy, random, subprocess, datetime
from helpers import load as zpklLoad
def load(f):
if f[-5:]=='.zpkl':
print "Loading zpkl file %s"%f
return zpklLoad(f)
else:
res={}
print "Loading root file %s"%f
c = ROOT.TChain("Events")
c.Add(f)
nEvents=c.GetEntries()
branches=c.GetListOfBranches()
brNames=[]
for ib in range(branches.GetSize()):
name = branches.At(ib).GetName()
if name not in ["run","lumi","event"]:
brNames.append(branches.At(ib).GetName())
for i in range(int(nEvents)):
if i%10000==0 and i>0: print "Loading at event %i / %i"%(i,nEvents)
c.GetEntry(i)
key=":".join([str(int(c.GetLeaf(x).GetValue())) for x in ["run","lumi","event"]])
res[key] = {name:c.GetLeaf(name).GetValue() for name in brNames}
# print key, res[key]
return res
ROOT.gStyle.SetOptStat(0)
if not hasattr(ROOT, "tdrStyle"):
ROOT.gROOT.ProcessLine(".L $CMSSW_BASE/src/MetTools/Commons/scripts/tdrstyle.C")
ROOT.setTDRStyle()
ROOT.tdrStyle.SetPadRightMargin(0.18)
ROOT.gROOT.ProcessLine(".L $CMSSW_BASE/src/MetTools/Commons/scripts/useNiceColorPalette.C")
ROOT.useNiceColorPalette(255)
maxEvts=-1
if ':' in options.xAxis:
moniker_x, ifile_x = options.xAxis.split(':')
else:
moniker_x, ifile_x = None, options.xAxis
if ':' in options.yAxis:
moniker_y, ifile_y = options.yAxis.split(':')
else:
moniker_y, ifile_y = None, options.yAxis
vetoList=[]
if options.vetoList!='':
import pickle
vetoList = pickle.load(file(options.vetoList))
evts1 = load(ifile_x)
evts2 = load(ifile_y)
print "File %s for x-axis has %i events"%(ifile_x,len(evts1))
print "File %s for y-axis has %i events"%(ifile_y,len(evts2))
if options.run>0:
for e in evts1.keys():
if not e.startswith(str(options.run)+':'):
del evts1[e]
for e in evts2.keys():
if not e.startswith(str(options.run)+':'):
del evts2[e]
print "After selecting run %i: File %s for x-axis has %i events"%(options.run, ifile_x,len(evts1))
print "After selecting run %i: File %s for y-axis has %i events"%(options.run, ifile_y,len(evts2))
if vetoList!=[]:
for v in vetoList:
if evts1.has_key(v):
del evts1[v]
if evts2.has_key(v):
del evts2[v]
print "After vetoList: File %s for x-axis has %i events"%(ifile_x,len(evts1))
print "After vetoList: File %s for y-axis has %i events"%(ifile_y,len(evts2))
commonKeys = [val for val in evts1.keys() if val in evts2.keys()]
assert len(commonKeys)>0, "0 events!"
print "Found",len(commonKeys),"common events"
ks1 = evts1[commonKeys[0]].keys()
ks1.sort()
ks2 = evts2[commonKeys[0]].keys()
ks2.sort()
if ks1!=ks2:
print "Warning! Not the same keys!"
print "1st:",ks1
print "2nd:",ks2
scatterPlots = [val for val in ks1 if val in ks2]
if not os.path.exists(options.plotDirectory):
os.makedirs(options.plotDirectory)
for s in scatterPlots:
if options.percentile>0:
import numpy as np
l = np.array([evts1[k][s] for k in commonKeys] + [evts2[k][s] for k in commonKeys])
maxVal = np.percentile(l, 100*options.percentile)
else:
maxVal = max([evts1[k][s] for k in commonKeys] + [evts2[k][s] for k in commonKeys])
if not maxVal>0:
print "maxVal non-positive %f"%maxVal
continue
if 'mult' in s:
binning=[min(100,int(maxVal)),0,int(maxVal)]
pbinning=[min(20,int(maxVal)),0,int(maxVal)]
else:
scale = 10**(int(log(maxVal,10))-1)
maximum= (floor(maxVal/scale)+1)*scale
binning=[100,0,maximum]
pbinning=[20,0,maximum]
histo = ROOT.TH2F('h'+s,'h'+s,*(binning+binning))
profile = ROOT.TProfile('p'+s,'p'+s,*(pbinning+[0,maximum]))
for k in commonKeys:
histo.Fill(evts1[k][s], evts2[k][s])
profile.Fill(evts1[k][s], evts2[k][s])
mstr_x = ' ('+moniker_x+')' if moniker_x else ''
mstr_y = ' ('+moniker_y+')' if moniker_y else ''
histo.GetXaxis().SetTitle(s+mstr_x)
histo.GetXaxis().SetLabelSize(0.03)
histo.GetXaxis().SetTitleSize(0.0385)
histo.GetYaxis().SetTitle(s+mstr_y)
histo.GetYaxis().SetLabelSize(0.03)
histo.GetYaxis().SetTitleSize(0.0385)
profile.GetXaxis().SetTitle(s+mstr_x)
profile.GetXaxis().SetLabelSize(0.03)
profile.GetXaxis().SetTitleSize(0.0385)
profile.GetYaxis().SetTitle(s+mstr_y)
profile.GetYaxis().SetLabelSize(0.03)
profile.GetYaxis().SetTitleSize(0.0385)
profile.SetLineColor(ROOT.kGray)
profile.SetMarkerStyle(0)
profile.SetMarkerSize(0)
c1 = ROOT.TCanvas()
histo.Draw('colz')
# fit=True
# tf1=ROOT.TF1("lin","pol1",0,histo.GetXaxis().GetXmax())
# try:
# frs=profile.Fit(tf1,'S')
# fit=(frs.Status()==0)
# except:
# fit=False
c1.SetLogz()
histo.Draw('colz')
if options.profile:
profile.Draw("eh1same")
l=ROOT.TLine(0,0,histo.GetXaxis().GetXmax(),histo.GetXaxis().GetXmax())
l.Draw()
# if fit:
# text=ROOT.TLatex()
# text.SetNDC()
# text.SetTextSize(0.04)
# text.SetTextAlign(11)
# text.SetTextColor(ROOT.kRed)
# text.DrawLatex(0.2,0.9,"Fit: "+str(round(tf1.GetParameter(0),4))+"+("+str(round(tf1.GetParameter(1),4))+")*x")
text=ROOT.TLatex()
text.SetNDC()
text.SetTextSize(0.04)
text.SetTextAlign(11)
text.DrawLatex(0.2,0.9,"CMS Preliminary")
# text.SetTextColor(ROOT.kRed)
c1.Print(options.plotDirectory+'/'+s+'.png')
c1.Print(options.plotDirectory+'/'+s+'.pdf')
c1.Print(options.plotDirectory+'/'+s+'.root')
c1.RedrawAxis()
del profile
del histo
del c1
|
985,476 | 8e56fa74320eca3f0d667822cdc3a6474003f721 | from __future__ import absolute_import
import shutil
import os.path
from tempfile import mkdtemp
from celery import shared_task
from django.conf import settings
from django.core.mail import EmailMessage
from maja_newsletter.mailer import Mailer
from maja_newsletter.models import Newsletter
from maja_newsletter.utils.excel import make_excel_content
from maja_newsletter.utils.vcard import make_vcard_content
from maja_newsletter.settings import EXPORT_FILE_NAME, EXPORT_EMAIL_SUBJECT, VERBOSE_MAILER
@shared_task
def celery_send_newsletter(newsletter_id, *args, **kwargs):
try:
newsletter = Newsletter.objects.get(pk=newsletter_id)
mailer = Mailer(newsletter, verbose=VERBOSE_MAILER)
if mailer.can_send:
mailer.run(send_all=True)
return mailer.can_send
except Newsletter.DoesNotExist:
return False
@shared_task
def export_excel(data, recipient, export_name=None, headers=None, force_csv=False, encoding='utf8'):
filedir = mkdtemp()
output_file = os.path.join(filedir, export_name)
with open(output_file, 'wb') as output:
output, mimetype, file_ext = make_excel_content(data, output, headers, force_csv, encoding)
final_path = '{0}.{1}'.format(output_file, file_ext)
shutil.copyfile(output_file, final_path)
testo = EXPORT_FILE_NAME
message = EmailMessage(EXPORT_EMAIL_SUBJECT, testo, settings.DEFAULT_FROM_EMAIL, [recipient])
message.attach_file(final_path, mimetype=mimetype)
message.send()
shutil.rmtree(filedir)
@shared_task
def export_vcard(data, recipient, export_name=None):
filedir = mkdtemp()
output_file = os.path.join(filedir, export_name)
with open(output_file, 'wb') as output:
output, mimetype, file_ext = make_vcard_content(data, output)
final_path = '{0}.{1}'.format(output_file, file_ext)
shutil.copyfile(output_file, final_path)
testo = EXPORT_FILE_NAME
message = EmailMessage(EXPORT_EMAIL_SUBJECT, testo, settings.DEFAULT_FROM_EMAIL, [recipient])
message.attach_file(final_path, mimetype=mimetype)
message.send()
shutil.rmtree(filedir)
|
985,477 | bb6ca5428dbfac9711b2d1cb5418b92d3fc4c6fe | print('Welcome to the Caesar Cipher, enter a sentence to be ciphered')
string = str(input()).lower()
print('And how many spaces you want to cipher it')
cipherconstant = int(input())
# string = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
# cipherconstant = 2
alphabet = {
'a': 0,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'i': 8,
'j': 9,
'k': 10,
'l': 11,
'm': 12,
'n': 13,
'o': 14,
'p': 15,
'q': 16,
'r': 17,
's': 18,
't': 19,
'u': 20,
'v': 21,
'w': 22,
'x': 23,
'y': 24,
'z': 25
}
alphabet1 = {v: k for k, v in alphabet.items()}
list1 = []
list2 = []
list3 = []
string2 = ''
for c in string:
if c not in alphabet:
list1.append(c)
else:
list1.append('{}'.format(alphabet[c]))
for c in list1:
if c.isdigit() == False:
list2.append(str(c))
else:
if int(c) + cipherconstant > 25:
list2.append(str(int(c) + cipherconstant -26))
else:
list2.append(str(int(c) + cipherconstant))
for c in list2:
if c.isdigit() == False:
list3.append(c)
else:
list3.append('{}'.format(alphabet1[int(c)]))
ciphered = ''.join(list3)
print(ciphered)
|
985,478 | b9a82f8af07bad907cb623b70a96dff46f0cf83d | from django.db import models
class CrudUser(models.Model):
name=models.CharField(max_length=200)
address=models.CharField(max_length=200)
phone=models.IntegerField()
def __str__(self):
return self.name
|
985,479 | 0617f7494a88575834f30f19f89bc9ef6c76a96f | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 21:38:12 2019
@author: crdea
"""
|
985,480 | 16121736eef6e49cf07d3bfbe45610fd8c260646 | from pszt_neural_network.neuron import Neuron
class Layer:
def __init__(self, type, number_of_inputs, number_of_neurons):
"""Create layer.
:param type: type of apply function, if 1 then activation function is sigmoid,
if 0 then activation function is linear
:param number_of_inputs: number of neuron inputs
:param number_of_neurons: number of neurons in layer
"""
self.neuronNumber = number_of_neurons
self.neurons = self.create_layer(self.neuronNumber, type, number_of_inputs)
self.layerInput = []
self.layerOutput = []
@staticmethod
def create_layer(neuron_number, type, number_of_inputs):
"""Create layer.
:param neuron_number: number of neuron in layer
:param type: type of apply function, if 1 then activation function is sigmoid,
if 0 then activation function is linear
:param number_of_inputs: number of neurons in layer
:return: list of neurons belonging to the layer
"""
neurons = []
for i in range(neuron_number):
neuron = Neuron(type, number_of_inputs)
neurons.append(neuron)
return neurons
def get_layer_output(self):
"""Get layer output.
:return: output from the layer
"""
self.layerOutput = []
for neuron in self.neurons:
self.layerOutput.append(neuron.get_neuron_output())
return self.layerOutput
def get_neuron_number(self):
"""Get number of neurons in the layer.
:return: number of neurons in the layer
"""
return self.neuronNumber
def get_layer_neurons(self):
"""Get neurons belonging to the layer.
:return: neurons belonging to the layer
"""
return self.neurons
def get_neuron(self, position):
"""Get neuron at the position indicated.
:param position: neuron position
:return: neuron at the position indicated
"""
return self.neurons[position]
def setInput(self, layerInput):
"""Set layer input.
:param layerInput: layer input
"""
self.layerInput = layerInput
for neuron in self.neurons:
neuron.set_neuron_input(layerInput)
def get_layer_input(self):
"""Get layer input.
:return: layer input
"""
return self.layerInput
|
985,481 | 26a1c19ada359f907d0a5eb169222d707445cdc5 | import climate
import collections
import lmj.cubes
import lmj.plot
import matplotlib.colors
import numpy as np
import pandas as pd
COLORS = {
'marker00-r-head-back': '#9467bd',
'marker01-r-head-front': '#9467bd',
'marker02-l-head-front': '#9467bd',
'marker03-l-head-back': '#9467bd',
'marker07-r-shoulder': '#111111',
'marker13-r-fing-index': '#111111',
'marker14-r-mc-outer': '#111111',
'marker19-l-shoulder': '#111111',
'marker25-l-fing-index': '#111111',
'marker26-l-mc-outer': '#111111',
'marker31-sternum': '#111111',
'marker34-l-ilium': '#2ca02c',
'marker35-r-ilium': '#2ca02c',
'marker36-r-hip': '#2ca02c',
'marker40-r-heel': '#1f77b4',
'marker41-r-mt-outer': '#1f77b4',
'marker42-r-mt-inner': '#1f77b4',
'marker43-l-hip': '#2ca02c',
'marker47-l-heel': '#d62728',
'marker48-l-mt-outer': '#d62728',
'marker49-l-mt-inner': '#d62728',
}
RCM = matplotlib.colors.LinearSegmentedColormap('b', dict(
red= ((0, 0.8, 0.8), (1, 0.8, 0.8)),
green=((0, 0.1, 0.1), (1, 0.1, 0.1)),
blue= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
GCM = matplotlib.colors.LinearSegmentedColormap('b', dict(
red= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
green=((0, 0.6, 0.6), (1, 0.6, 0.6)),
blue= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
BCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
green=((0, 0.5, 0.5), (1, 0.5, 0.5)),
blue= ((0, 0.7, 0.7), (1, 0.7, 0.7)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
OCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 1.0, 1.0), (1, 1.0, 1.0)),
green=((0, 0.5, 0.5), (1, 0.5, 0.5)),
blue= ((0, 0.0, 0.0), (1, 0.0, 0.0)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
PCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 0.6, 0.6), (1, 0.6, 0.6)),
green=((0, 0.4, 0.4), (1, 0.4, 0.4)),
blue= ((0, 0.7, 0.7), (1, 0.7, 0.7)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
# fewf, http://stackoverflow.com/questions/4494404
def contig(cond):
idx = np.diff(cond).nonzero()[0] + 1
if cond[0]:
idx = np.r_[0, idx]
if cond[-1]:
idx = np.r_[idx, cond.size]
return idx.reshape((-1, 2))
def main(root, pattern='*'):
points = collections.defaultdict(list)
for trial in lmj.cubes.Experiment(root).trials_matching(pattern):
trial.load()
trial.add_velocities(7)
lheel = trial.trajectory(40, velocity=True)
lheel['speed'] = np.sqrt((lheel[['vx', 'vy', 'vz']] ** 2).sum(axis=1))
lslow = contig((lheel.speed < 1).values)
rheel = trial.trajectory(47, velocity=True)
rheel['speed'] = np.sqrt((rheel[['vx', 'vy', 'vz']] ** 2).sum(axis=1))
#rslow = contig((rheel.speed < 2).values)
for m in trial.marker_columns:
t = trial.trajectory(m, velocity=True)
t['speed'] = np.sqrt((t[['vx', 'vy', 'vz']] ** 2).sum(axis=1))
for s, e in lslow:
if e - s > 0:
points[m].append(t.iloc[s:e, :])
for m in sorted(points):
print(m, len(points[m]))
kw = dict(s=30, vmin=0, vmax=1, lw=0, alpha=0.5, cmap=BCM)
with lmj.plot.axes3d() as ax:
ax.scatter([0], [0], [0], color='#111111', alpha=0.5, s=200, marker='s', lw=0)
for m, dfs in points.items():
for df in dfs:
# subsample.
sel = np.random.rand(len(df)) < 0.01
ax.scatter(df[sel].x, df[sel].z, df[sel].y, c=df[sel].speed, **kw)
if __name__ == '__main__':
climate.call(main)
|
985,482 | ce886db663dd4715f70342b8d34150565832dd10 | import functools
import math
import operator
import os
import traceback
from datetime import datetime
from xml.etree import ElementTree
from pipeline.hpc.cmd import ExecutionError
from pipeline.hpc.logger import Logger
from pipeline.hpc.resource import IntegralDemand, ResourceSupply, FractionalDemand
from pipeline.hpc.engine.gridengine import GridEngine, GridEngineJobState, GridEngineJob, AllocationRule, \
GridEngineType, _perform_command, GridEngineDemandSelector, GridEngineJobValidator
class SunGridEngine(GridEngine):
_DELETE_HOST = 'qconf -de %s'
_SHOW_PE_ALLOCATION_RULE = 'qconf -sp %s | grep "^allocation_rule" | awk \'{print $2}\''
_REMOVE_HOST_FROM_HOST_GROUP = 'qconf -dattr hostgroup hostlist %s %s'
_REMOVE_HOST_FROM_QUEUE_SETTINGS = 'qconf -purge queue slots %s@%s'
_SHUTDOWN_HOST_EXECUTION_DAEMON = 'qconf -ke %s'
_REMOVE_HOST_FROM_ADMINISTRATIVE_HOSTS = 'qconf -dh %s'
_QSTAT = 'qstat -u "*" -r -f -xml'
_QHOST = 'qhost -q -xml'
_QSTAT_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
_QMOD_DISABLE = 'qmod -d %s@%s'
_QMOD_ENABLE = 'qmod -e %s@%s'
_SHOW_EXECUTION_HOST = 'qconf -se %s'
_KILL_JOBS = 'qdel %s'
_FORCE_KILL_JOBS = 'qdel -f %s'
_BAD_HOST_STATES = ['u', 'E', 'd']
def __init__(self, cmd_executor, queue, hostlist, queue_default):
self.cmd_executor = cmd_executor
self.queue = queue
self.hostlist = hostlist
self.queue_default = queue_default
self.tmp_queue_name_attribute = 'tmp_queue_name'
# todo: Move to script init function
self.gpu_resource_name = os.getenv('CP_CAP_GE_CONSUMABLE_RESOURCE_NAME_GPU', 'gpus')
self.mem_resource_name = os.getenv('CP_CAP_GE_CONSUMABLE_RESOURCE_NAME_RAM', 'ram')
def get_jobs(self):
try:
output = self.cmd_executor.execute(SunGridEngine._QSTAT)
except ExecutionError:
Logger.warn('Grid engine jobs listing has failed.')
return []
jobs = {}
root = ElementTree.fromstring(output)
running_jobs = []
queue_info = root.find('queue_info')
for queue_list in queue_info.findall('Queue-List'):
queue_name = queue_list.findtext('name')
queue_running_jobs = queue_list.findall('job_list')
for job_list in queue_running_jobs:
job_queue_name = ElementTree.SubElement(job_list, self.tmp_queue_name_attribute)
job_queue_name.text = queue_name
running_jobs.extend(queue_running_jobs)
job_info = root.find('job_info')
pending_jobs = job_info.findall('job_list')
for job_list in running_jobs + pending_jobs:
job_requested_queue = job_list.findtext('hard_req_queue')
job_actual_queue, job_host = self._parse_queue_and_host(job_list.findtext(self.tmp_queue_name_attribute))
if job_requested_queue and job_requested_queue != self.queue \
or job_actual_queue and job_actual_queue != self.queue:
# filter out a job with actual/requested queue specified
# if a configured queue is different from the job's one
continue
if not job_requested_queue and not job_actual_queue and not self.queue_default:
# filter out a job without actual/requested queue specified
# if a configured queue is not a default queue
continue
root_job_id = job_list.findtext('JB_job_number')
job_tasks = self._parse_array(job_list.findtext('tasks'))
job_ids = ['{}.{}'.format(root_job_id, job_task) for job_task in job_tasks] or [root_job_id]
job_name = job_list.findtext('JB_name')
job_user = job_list.findtext('JB_owner')
job_state = GridEngineJobState.from_letter_code(job_list.findtext('state'))
job_datetime = self._parse_date(
job_list.findtext('JAT_start_time') or job_list.findtext('JB_submission_time'))
job_hosts = [job_host] if job_host else []
requested_pe = job_list.find('requested_pe')
job_pe = requested_pe.get('name') if requested_pe is not None else 'local'
job_cpu = int(requested_pe.text if requested_pe is not None else '1')
job_gpu = 0
job_mem = 0
hard_requests = job_list.findall('hard_request')
for hard_request in hard_requests:
hard_request_name = hard_request.get('name')
if hard_request_name == self.gpu_resource_name:
job_gpu_request = hard_request.text or '0'
try:
job_gpu = int(job_gpu_request)
except ValueError:
Logger.warn('Job #{job_id} by {job_user} has invalid gpu requirement '
'which cannot be parsed: {request}'
.format(job_id=root_job_id, job_user=job_user, request=job_gpu_request))
Logger.warn(traceback.format_exc())
if hard_request_name == self.mem_resource_name:
job_mem_request = hard_request.text or '0G'
try:
job_mem = self._parse_mem(job_mem_request)
except Exception:
Logger.warn('Job #{job_id} by {job_user} has invalid mem requirement '
'which cannot be parsed: {request}'
.format(job_id=root_job_id, job_user=job_user, request=job_mem_request))
Logger.warn(traceback.format_exc())
for job_id in job_ids:
if job_id in jobs:
job = jobs[job_id]
if job_host:
job.hosts.append(job_host)
else:
jobs[job_id] = GridEngineJob(
id=job_id,
root_id=root_job_id,
name=job_name,
user=job_user,
state=job_state,
datetime=job_datetime,
hosts=job_hosts,
cpu=job_cpu,
gpu=job_gpu,
mem=job_mem,
pe=job_pe
)
return jobs.values()
def _parse_date(self, date):
return datetime.strptime(date, SunGridEngine._QSTAT_DATETIME_FORMAT)
def _parse_queue_and_host(self, queue_and_host):
return queue_and_host.split('@')[:2] if queue_and_host else (None, None)
def _parse_array(self, array_jobs):
result = []
if not array_jobs:
return result
for interval in array_jobs.split(","):
if ':' in interval:
array_borders, _ = interval.split(':')
start, stop = array_borders.split('-')
result += list(range(int(start), int(stop) + 1))
else:
result += [int(interval)]
return result
def _parse_mem(self, mem_request):
"""
See https://linux.die.net/man/1/sge_types
"""
if not mem_request:
return 0
modifiers = {
'k': 1000, 'm': 1000 ** 2, 'g': 1000 ** 3,
'K': 1024, 'M': 1024 ** 2, 'G': 1024 ** 3
}
if mem_request[-1] in modifiers:
number = int(mem_request[:-1])
modifier = modifiers[mem_request[-1]]
else:
number = int(mem_request)
modifier = 1
size_in_bytes = number * modifier
size_in_gibibytes = int(math.ceil(size_in_bytes / modifiers['G']))
return size_in_gibibytes
def disable_host(self, host):
self.cmd_executor.execute(SunGridEngine._QMOD_DISABLE % (self.queue, host))
def enable_host(self, host):
self.cmd_executor.execute(SunGridEngine._QMOD_ENABLE % (self.queue, host))
def get_pe_allocation_rule(self, pe):
exec_result = self.cmd_executor.execute(SunGridEngine._SHOW_PE_ALLOCATION_RULE % pe)
return AllocationRule(exec_result.strip()) if exec_result else AllocationRule.pe_slots()
def delete_host(self, host, skip_on_failure=False):
self._shutdown_execution_host(host, skip_on_failure=skip_on_failure)
self._remove_host_from_queue_settings(host, self.queue, skip_on_failure=skip_on_failure)
self._remove_host_from_host_group(host, self.hostlist, skip_on_failure=skip_on_failure)
self._remove_host_from_administrative_hosts(host, skip_on_failure=skip_on_failure)
self._remove_host_from_grid_engine(host, skip_on_failure=skip_on_failure)
def get_host_supplies(self):
output = self.cmd_executor.execute(SunGridEngine._QHOST)
root = ElementTree.fromstring(output)
for host in root.findall('host'):
for queue in host.findall('queue[@name=\'%s\']' % self.queue):
host_states = queue.find('queuevalue[@name=\'state_string\']').text or ''
if any(host_state in self._BAD_HOST_STATES for host_state in host_states):
continue
host_slots = int(queue.find('queuevalue[@name=\'slots\']').text or '0')
host_used = int(queue.find('queuevalue[@name=\'slots_used\']').text or '0')
host_resv = int(queue.find('queuevalue[@name=\'slots_resv\']').text or '0')
yield ResourceSupply(cpu=host_slots) - ResourceSupply(cpu=host_used + host_resv)
def get_host_supply(self, host):
for line in self.cmd_executor.execute_to_lines(SunGridEngine._SHOW_EXECUTION_HOST % host):
if "processors" in line:
return ResourceSupply(cpu=int(line.strip().split()[1]))
return ResourceSupply()
def get_engine_type(self):
return GridEngineType.SGE
def _shutdown_execution_host(self, host, skip_on_failure):
_perform_command(
action=lambda: self.cmd_executor.execute(SunGridEngine._SHUTDOWN_HOST_EXECUTION_DAEMON % host),
msg='Shutdown GE host execution daemon.',
error_msg='Shutdown GE host execution daemon has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_queue_settings(self, host, queue, skip_on_failure):
_perform_command(
action=lambda: self.cmd_executor.execute(SunGridEngine._REMOVE_HOST_FROM_QUEUE_SETTINGS % (queue, host)),
msg='Remove host from queue settings.',
error_msg='Removing host from queue settings has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_host_group(self, host, hostgroup, skip_on_failure):
_perform_command(
action=lambda: self.cmd_executor.execute(SunGridEngine._REMOVE_HOST_FROM_HOST_GROUP % (host, hostgroup)),
msg='Remove host from host group.',
error_msg='Removing host from host group has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_grid_engine(self, host, skip_on_failure):
_perform_command(
action=lambda: self.cmd_executor.execute(SunGridEngine._DELETE_HOST % host),
msg='Remove host from GE.',
error_msg='Removing host from GE has failed.',
skip_on_failure=skip_on_failure
)
def _remove_host_from_administrative_hosts(self, host, skip_on_failure):
_perform_command(
action=lambda: self.cmd_executor.execute(SunGridEngine._REMOVE_HOST_FROM_ADMINISTRATIVE_HOSTS % host),
msg='Remove host from list of administrative hosts.',
error_msg='Removing host from list of administrative hosts has failed.',
skip_on_failure=skip_on_failure
)
def is_valid(self, host):
try:
self.cmd_executor.execute_to_lines(SunGridEngine._SHOW_EXECUTION_HOST % host)
output = self.cmd_executor.execute(SunGridEngine._QHOST)
root = ElementTree.fromstring(output)
for host_object in root.findall('host[@name=\'%s\']' % host):
for queue in host_object.findall('queue[@name=\'%s\']' % self.queue):
host_states = queue.find('queuevalue[@name=\'state_string\']').text or ''
for host_state in host_states:
if host_state in self._BAD_HOST_STATES:
Logger.warn('Execution host %s GE state is %s which makes host invalid.'
% (host, host_state))
return False
if host_states:
Logger.warn('Execution host %s GE state is not empty but is considered valid: %s.'
% (host, host_states))
return True
except RuntimeError as e:
Logger.warn('Execution host %s validation has failed in GE: %s' % (host, e))
return False
def kill_jobs(self, jobs, force=False):
job_ids = [str(job.id) for job in jobs]
self.cmd_executor.execute((SunGridEngine._FORCE_KILL_JOBS if force else SunGridEngine._KILL_JOBS) % ' '.join(job_ids))
class SunGridEngineDemandSelector(GridEngineDemandSelector):
def __init__(self, grid_engine):
self.grid_engine = grid_engine
def select(self, jobs):
remaining_supply = functools.reduce(operator.add, self.grid_engine.get_host_supplies(), ResourceSupply())
allocation_rules = {}
for job in sorted(jobs, key=lambda job: job.root_id):
allocation_rule = allocation_rules[job.pe] = allocation_rules.get(job.pe) \
or self.grid_engine.get_pe_allocation_rule(job.pe)
if allocation_rule in AllocationRule.fractional_rules():
remaining_demand = FractionalDemand(cpu=job.cpu, gpu=job.gpu, mem=job.mem, owner=job.user)
remaining_demand, remaining_supply = remaining_demand.subtract(remaining_supply)
else:
remaining_demand = IntegralDemand(cpu=job.cpu, gpu=job.gpu, mem=job.mem, owner=job.user)
if not remaining_demand:
Logger.warn('Problematic job #{job_id} {job_name} by {job_user} is pending for an unknown reason. '
'The job requires resources which are already satisfied by the cluster: '
'{job_cpu} cpu, {job_gpu} gpu, {job_mem} mem.'
.format(job_id=job.id, job_name=job.name, job_user=job.user,
job_cpu=job.cpu, job_gpu=job.gpu, job_mem=job.mem),
crucial=True)
continue
yield remaining_demand
class SunGridEngineJobValidator(GridEngineJobValidator):
def __init__(self, grid_engine, instance_max_supply, cluster_max_supply):
self.grid_engine = grid_engine
self.instance_max_supply = instance_max_supply
self.cluster_max_supply = cluster_max_supply
def validate(self, jobs):
valid_jobs, invalid_jobs = [], []
allocation_rules = {}
for job in jobs:
allocation_rule = allocation_rules[job.pe] = allocation_rules.get(job.pe) \
or self.grid_engine.get_pe_allocation_rule(job.pe)
job_demand = IntegralDemand(cpu=job.cpu, gpu=job.gpu, mem=job.mem)
if allocation_rule in AllocationRule.fractional_rules():
if job_demand > self.cluster_max_supply:
Logger.warn('Invalid job #{job_id} {job_name} by {job_user} requires resources '
'which cannot be satisfied by the cluster: '
'{job_cpu}/{available_cpu} cpu, '
'{job_gpu}/{available_gpu} gpu, '
'{job_mem}/{available_mem} mem.'
.format(job_id=job.id, job_name=job.name, job_user=job.user,
job_cpu=job.cpu, available_cpu=self.cluster_max_supply.cpu,
job_gpu=job.gpu, available_gpu=self.cluster_max_supply.gpu,
job_mem=job.mem, available_mem=self.cluster_max_supply.mem),
crucial=True)
invalid_jobs.append(job)
continue
else:
if job_demand > self.instance_max_supply:
Logger.warn('Invalid job #{job_id} {job_name} by {job_user} requires resources '
'which cannot be satisfied by the biggest instance in cluster: '
'{job_cpu}/{available_cpu} cpu, '
'{job_gpu}/{available_gpu} gpu, '
'{job_mem}/{available_mem} mem.'
.format(job_id=job.id, job_name=job.name, job_user=job.user,
job_cpu=job.cpu, available_cpu=self.instance_max_supply.cpu,
job_gpu=job.gpu, available_gpu=self.instance_max_supply.gpu,
job_mem=job.mem, available_mem=self.instance_max_supply.mem),
crucial=True)
invalid_jobs.append(job)
continue
valid_jobs.append(job)
return valid_jobs, invalid_jobs
|
985,483 | b8629abf6139035bccde9ed0b2459d8ebcaceac3 | """Representation of VOI Nearest Scooter Sensors."""
from datetime import timedelta
import logging
from geopy.distance import distance
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_BATTERY_LEVEL,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
LENGTH_METERS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
CONF_TOKEN_FILE = "token_file"
DEFAULT_NAME = "VOI Nearest Scooter"
DEFAULT_TOKEN_FILE = "voi-token.json"
ICON = "mdi:scooter"
ATTRIBUTION = "Data provided by VOI"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TOKEN_FILE, default=DEFAULT_TOKEN_FILE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
name = config.get(CONF_NAME)
token_path = hass.config.path(config.get(CONF_TOKEN_FILE))
latitude = hass.config.latitude
longitude = hass.config.longitude
token_cache = load_json(token_path)
if not token_cache or "authentication_token" not in token_cache:
raise ValueError("Missing or bad token file.")
add_entities([VoiNearestScooterSensor(name, token_path, latitude, longitude)])
class VoiNearestScooterApi:
"""Representation of the VOI API."""
def __init__(self, token_path):
"""Initialize the VOI API."""
self._accessToken = None
self._tokenPath = token_path
def __get_authentication_token(self):
"""Load the authentication token from the token file."""
cache = load_json(self._tokenPath)
return cache["authentication_token"]
def __set_authentication_token(self, token):
"""Save the authentication token to the token file."""
cache = {"authentication_token": token}
save_json(self._tokenPath, cache)
@staticmethod
def __call(method, resource, headers=None, json=None):
"""Call the VOI API and parse the response as JSON."""
result = requests.request(method, resource, headers=headers, json=json)
if result:
try:
return result.json()
except ValueError:
pass
_LOGGER.debug("Erroneous response (%s)", result)
return result
def __authenticate(self):
"""Authenticate to the VOI API."""
body = {"authenticationToken": self.__get_authentication_token()}
result = self.__call("POST", "https://api.voiapp.io/v1/auth/session", json=body)
if result and "accessToken" in result and "authenticationToken" in result:
self._accessToken = result["accessToken"]
self.__set_authentication_token(result["authenticationToken"])
else:
_LOGGER.warning("Authentication failed: Erroneous response (%s)", result)
def __request(self, method, resource, retry=True):
"""Issue an authenticated request to the VOI API."""
headers = {"x-access-token": self._accessToken}
result = self.__call(method, resource, headers=headers)
if result:
return result
elif result.status_code == 401 and retry:
self.__authenticate()
return self.__request(method, resource, retry=False)
else:
raise requests.HTTPError(result)
def get_zones(self, latitude, longitude):
"""Get the list of zones of geo coordinates from the VOI API."""
result = self.__request(
"GET",
"https://api.voiapp.io/v1/zones?lat={}&lng={}".format(latitude, longitude),
)
if result and "zones" in result:
return result["zones"]
def get_vehicles(self, latitude, longitude):
"""Get the list of vehicles of a zone from the VOI API."""
result = self.get_zones(latitude, longitude)
if result and "zone_id" in result[0]:
return self.__request(
"GET",
"https://api.voiapp.io/v1/vehicles/zone/{}/ready".format(
result[0]["zone_id"]
),
)
class VoiNearestScooterSensor(Entity):
"""Representation of a VOI Nearest Scooter Sensor."""
def __init__(self, name, token_path, latitude, longitude):
"""Initialize the VOI Nearest Scooter Sensor."""
self._api = VoiNearestScooterApi(token_path)
self._name = name
self._latitude = latitude
self._longitude = longitude
self._state = None
self._attributes = {}
@property
def name(self):
"""Return the name of the VOI Nearest Scooter Sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the VOI Nearest Scooter Sensor."""
return LENGTH_METERS
@property
def icon(self):
"""Icon to use in the frontend of the VOI Nearest Scooter Sensor."""
return ICON
@property
def state(self):
"""Return the state of the VOI Nearest Scooter Sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes of the VOI Nearest Scooter Sensor."""
return self._attributes
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Fetch new state data for the VOI Nearest Scooter Sensor."""
self._state = None
self._attributes = {}
vehicles = self._api.get_vehicles(self._latitude, self._longitude)
scooter = {}
if vehicles:
for vehicle in vehicles:
location_vehicle = (vehicle["location"][0], vehicle["location"][1])
location_hass = (self._latitude, self._longitude)
vehicle["distance"] = distance(location_vehicle, location_hass).m
scooter = sorted(vehicles, key=lambda item: item["distance"])[0]
if scooter:
self._state = round(scooter["distance"])
self._attributes[ATTR_LATITUDE] = round(scooter["location"][0], 5)
self._attributes[ATTR_LONGITUDE] = round(scooter["location"][1], 5)
self._attributes[ATTR_BATTERY_LEVEL] = round(scooter["battery"])
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
|
985,484 | 1e2013984ed94e71f48fcd844d584585b5b3d59d | """
Parsing of C expressions.
"""
|
985,485 | 95de726eefdaa534d7f75fc46b7e543a73354612 | from sys import stdin
# Carlos Arboleda - ADA - Camilo Rocha
# Potentiometers.py hecho en clase
# Se utilizo la clase segmentation tree para poder hacer las operaciones y luego ir partiendo de a dos en dos y luego devolverme operando para arriba
# Se discutio con la clase y fue hecho en clase
class segtree(object):
def __init__(self,a):
"""create an empty segment tree"""
self.__a = list(a)
self.__s = [ None for i in range(len(a)<<2) ]
self.__build_tree(0,0,len(a))
def __len__(self):
"""return the length of the collection of values"""
return len(self.__a)
def __str__(self):
"""return the string representation of the segment tree"""
return str(self.__s)
def __left(self,i):
"""return the index of the left child of i"""
return 1+(i<<1)
def __right(self,i):
"""return the index of the left child of i"""
return (1+i)<<1
def __build_tree(self,i,low,hi):
"""store the sum of __a[low..hi) in __s[i]"""
ans = None
if low+1==hi:
ans = self.__s[i] = self.__a[low]
else:
mid = low+((hi-low)>>1)
ans = self.__s[i] = self.__build_tree(self.__left(i),low,mid) \
+ self.__build_tree(self.__right(i),mid,hi)
return ans
def query_range(self,i,j):
"""return the sum in the range [i..j)"""
assert 0 <= i <= j <= len(self)
ans = self.__query_aux(0,0,len(self),i,j)
return ans
def __query_aux(self,i,low,hi,start,end):
"""return the sum in the intersection of and __a[low..hi) and __a[start..end)"""
ans = None
if hi<=start or end<=low: ans = 0
elif start<=low and hi<=end: ans = self.__s[i]
else:
mid = low+((hi-low)>>1)
ans = self.__query_aux(self.__left(i),low,mid,start,end) \
+ self.__query_aux(self.__right(i),mid,hi,start,end)
return ans
def updateValue(self,i,x):
"""update the value of the i-th element to be x"""
assert 0 <= i < len(self)
self.__update_aux(0,0,len(self),i,x)
def __update_aux(self,i,low,hi,j,x):
assert low<=j<hi
ans = None
if low+1==hi: ans = self.__a[j] = self.__s[i] = x
else:
mid = low+((hi-low)>>1)
if j<mid: ans = self.__s[i] = self.__update_aux(self.__left(i),low,mid,j,x) + self.__s[self.__right(i)]
else: ans = self.__s[i] = self.__s[self.__left(i)] + self.__update_aux(self.__right(i),mid,hi,j,x)
return ans
def main():
caso=0
k=int(stdin.readline())
#print(type(k))
while k!=0:
listap=[]
for i in range(k):
n=int(stdin.readline())
listap.append(n)
st=segtree(listap) ## se crea el arbol de segmento
accion=stdin.readline()
caso+=1
print("Case "+str(caso)+":")
while accion[0]!='E': ## solucion del profesor para no buscar END si no algo que empiece por E
letra=accion.split()[0]
x,y=[int (x) for x in accion.split()[1:]] # letra x y
#print(letra,x,y,"sirve")
if letra=="M":
print(st.query_range(x-1,y))
#pass
elif letra=="S":
st.updateValue(x-1,y)
accion=stdin.readline()
k=int(stdin.readline())
if k==0:
print(end="")
else:
print()
main()
|
985,486 | 9ec9e613d8070197d46b4568ed9bf341d2b91d46 | #coding:utf8
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Questions(models.Model):
class Meta():
verbose_name_plural = 'Вопросы от посетителей'
verbose_name = 'Вопросы от посетителей'
name=models.CharField(max_length=255,verbose_name=u'ФИО')
email=models.EmailField(blank=True,null=True,verbose_name=u'email')
quest=models.TextField(blank=True,null=True,verbose_name=u'Вопрос')
date_create = models.DateTimeField(auto_now_add=True, verbose_name=u'Дата созданния')
def __unicode__(self):
return '%s|%s|%s' % (self.id, self.date_create, self.quest[0:10])
|
985,487 | 60a7a5bc0648aac5dddfb36e5d071689e5b66730 | from fastai.vision import Path, ImageDataBunch, cnn_learner, get_transforms, imagenet_stats, models, error_rate
import numpy as np
path = Path('/usr/local/airflow/data/')
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2,
ds_tfms=get_transforms(), size=224, num_workers=0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(4)
learn.save('stage-1')
learn.export()
|
985,488 | f5281884bcf267ca26c3d43d7e0aa9df645b0917 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import time
ACCOUNT_EMAIL = "your-email"
ACCOUNT_PASSWORD = "your-password"
PHONE = "your-phone"
chrome_driver_path = "/Users/dorukhanuzun/chrome-driver/chromedriver"
driver = webdriver.Chrome(chrome_driver_path)
driver.get("https://www.linkedin.com/jobs/search/?f_LF=f_AL&geoId=105149290&keywords=python%20developer&location=Ontario%2C%20Canada&sortBy=R")
sign_in = driver.find_element_by_link_text("Sign in")
sign_in.click()
time.sleep(3)
username = driver.find_element_by_id("username")
username.send_keys(ACCOUNT_EMAIL)
password = driver.find_element_by_id("password")
password.send_keys(ACCOUNT_PASSWORD)
password.send_keys(Keys.ENTER)
time.sleep(3)
all_listings = driver.find_elements_by_css_selector(".job-card-container--clickable")
for listing in all_listings:
listing.click()
time.sleep(3)
try:
apply_button = driver.find_element_by_css_selector(".jobs-s-apply button")
apply_button.click()
time.sleep(3)
phone = driver.find_element_by_class_name("fb-single-line-text__input")
if phone.text == "":
phone.send_keys(PHONE)
submit_button = driver.find_element_by_css_selector("footer button")
if submit_button.get_attribute("data-control-name") == "continue_unify":
dismiss_button = driver.find_element_by_class_name("artdeco-modal__dismiss")
dismiss_button.click()
time.sleep(2)
discard_button = driver.find_elements_by_class_name("artdeco-modal__confirm-dialog-btn")[1]
discard_button.click()
print("Complex Application, skipped")
continue
else:
submit_button.click()
time.sleep(3)
close_button = driver.find_element_by_class_name("artdeco-modal__dismiss")
close_button.click()
except NoSuchElementException:
print("No application button, skipped.")
continue
time.sleep(5)
driver.quit()
|
985,489 | b7a74d755e6ce41510001e6a8ee9d812b4184b75 | import options
import glob
import json
import os
import random
import sqlite3
import string
from base64 import b64decode, b64encode
from hashlib import blake2b
from Cryptodome.Cipher import AES
from Cryptodome.Random import get_random_bytes
from bismuthclient import bismuthutil
__version__ = '0.0.1'
config = options.Get()
config.read()
ledger_path = config.ledger_path
connection = sqlite3.connect(ledger_path)
cursor = connection.cursor()
Bismuthutil = bismuthutil.BismuthUtil()
def test_db():
try:
cursor.execute("SELECT MAX (block_height) FROM transactions")
result = cursor.fetchone()[0]
if result:
print(f"DB test passed, last block: {result}")
else:
raise ValueError("DB test failed")
except:
raise ValueError("DB test failed")
def load_address(file="wallet.der"):
with open(file) as infile:
loaded_file = json.loads(infile.read())
address = loaded_file["Address"]
return address
def blake2b_generate(nonce):
return blake2b(nonce.encode(), digest_size=20).hexdigest()
def process(nonce):
if not os.path.exists("shielded_history"):
os.mkdir("shielded_history")
hash_path = f'shielded_history/{blake2b_generate(nonce)}'
if not os.path.exists(hash_path):
with open(hash_path, "w") as nonce_file:
nonce_file.write("")
def is_processed(nonce):
if not os.path.exists("shielded_history"):
os.mkdir("shielded_history")
hash_path = f'shielded_history/{blake2b_generate(nonce)}'
if os.path.exists(hash_path):
return True
else:
return False
def find_txs(signals_dict, anchor):
signal_set = ','.join('?' for _ in signals_dict)
query = 'SELECT address, block_height, openfield FROM transactions WHERE operation IN (%s) AND block_height >= %s ORDER BY block_height ASC' % (signal_set, anchor)
result = cursor.execute(query, signals_dict).fetchall()
return_list = []
for entry in result:
zipped = dict(zip(["address", "block_height", "openfield"], [entry[0], entry[1], json.loads(entry[2])]))
return_list.append(zipped)
return return_list
def signals_generate(size):
signal_list = []
for item in range(size):
signal_list.append(''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10)))
return signal_list
def token_key_generate(len=32): # AES-256 default
return get_random_bytes(len)
def encrypt_data(token_name, token_amount, operation, recipient, key_encoded):
key = b64decode(key_encoded)
cipher = AES.new(key, AES.MODE_EAX)
nonce = cipher.nonce
ciphertext, tag = cipher.encrypt_and_digest(json.dumps({"name": token_name.zfill(10),
"amount": token_amount.zfill(10),
"recipient": recipient,
"operation": operation}).encode())
return {"nonce": b64encode(nonce).decode(),
"ciphertext": b64encode(ciphertext).decode(),
"tag": b64encode(tag).decode()}
def decrypt(enc_dict, key_encoded):
key = b64decode(key_encoded)
cipher = AES.new(key, AES.MODE_EAX, nonce=b64decode(enc_dict["nonce"]))
plaintext = json.loads(cipher.decrypt(b64decode(enc_dict["ciphertext"])).decode())
plaintext["name"] = plaintext["name"].lstrip("0") # remove leading 0s
plaintext["amount"] = plaintext["amount"].lstrip("0") # remove leading 0s
try:
cipher.verify(b64decode(enc_dict["tag"]))
return plaintext
except ValueError:
print("Key incorrect or message corrupted")
def load_token_dict(token):
print(f"Loading keys for {token}")
token_path = f'shielded_keys/{token}'
if os.path.exists(token_path):
with open(token_path) as token_keys:
keys_loaded = json.loads(token_keys.read())
return keys_loaded
else:
return False
def save_token_key(token, signals, public_signal, key):
print(public_signal)
if not os.path.exists("shielded_keys"):
os.mkdir("shielded_keys")
token_path = f'shielded_keys/{token}.json'
if not os.path.exists(token_path):
keys = {}
keys["name"] = token
keys["key"] = b64encode(key).decode()
keys["signals"] = signals
keys["public_signal"] = public_signal[0]
with open(token_path, "w") as token_keys:
token_keys.write(json.dumps(keys))
def tokens_update(token_key_dict: dict):
found_txs = find_txs(signals_dict=token_key_dict["signals"], anchor=0)
print("Existing transactions for the given master key:")
for transaction in found_txs: # print
try:
print("transaction", transaction)
action = decrypt(transaction["openfield"], token_key_dict["key"])
print(action)
except Exception as e:
print(f"Corrupted message: {e}")
for transaction in found_txs: # transactions
try:
action = decrypt(transaction["openfield"], token_key_dict["key"])
if not is_processed(transaction["openfield"]["nonce"]):
process(transaction["openfield"]["nonce"])
if action["operation"] == "move":
account_add_to(account=action["recipient"], token=action["name"], amount=action["amount"], debtor=transaction["address"])
elif action["operation"] == "make":
token_genesis(account=action["recipient"], token=action["name"], amount=action["amount"])
else:
print("Skipping processed transaction")
except Exception as e:
print(f"Corrupted message: {e}")
raise
def load_signal(signals):
return random.choice(signals)
def account_file_load(account):
if not os.path.exists(f"shielded_accounts"):
os.mkdir(f"shielded_accounts")
account_path = f"shielded_accounts/{account}.json"
if not os.path.exists(account_path):
with open(account_path, "w") as token_keys:
token_keys.write(json.dumps({"tokens": {}}))
with open(account_path) as file:
account_contents = json.loads(file.read())["tokens"]
return account_contents
def account_file_save(account, data):
account_path = f"shielded_accounts/{account}.json"
data_formatted = {"name": account, "tokens": data}
with open(account_path, "w") as account_file:
account_file.write(json.dumps(data_formatted))
def token_genesis(account: str, token: str, amount: int):
amount = int(amount)
data = account_file_load(account)
try:
data[token]
except: # if unprocessed
data[token] = amount
account_file_save(account, data)
def get_accounts():
accounts = []
paths = glob.glob("shielded_accounts/*")
for path in paths:
with open(path) as infile:
account = json.loads(infile.read())
accounts.append(account)
return accounts
def account_add_to(account: str, token: str, amount: int, debtor: str):
amount = int(amount)
if account_take_from(debtor, token, amount):
data = account_file_load(account)
try:
data[token] += amount
except:
data[token] = amount
account_file_save(account, data)
def account_take_from(account: str, token: str, amount: int):
amount = int(amount)
data = account_file_load(account)
try:
if data[token] - amount >= 0:
data[token] -= amount
account_file_save(account, data)
return True
else:
return False
except:
print("Insufficient balance or corrupted file")
return False
def load_tokens():
token_paths = glob.glob('shielded_keys/*.json')
token_names = []
for token_path in token_paths:
token_names.append(os.path.basename(token_path))
return token_names
def move_token(token_name: str, recipient: str, amount: str):
token_key_dict = load_token_dict(token=f"{token_name}.json")
print("token_key_dict", token_key_dict)
encrypted_data_move = encrypt_data(token_name=token_name,
token_amount=str(amount),
recipient=recipient,
operation="move",
key_encoded=token_key_dict["key"])
print(decrypt(encrypted_data_move, token_key_dict["key"]))
operation = load_signal(token_key_dict["signals"])
data = json.dumps(encrypted_data_move)
bisurl = Bismuthutil.create_bis_url(recipient, 0, operation, data)
print("move (data)", data)
print("move (operation)", operation)
print("BISURL to move", bisurl)
return {"data": data, "operation": operation, "bisurl": bisurl}
def generate_token(token_name: str, recipient: str, amount: str):
save_token_key(token=token_name,
signals=signals_generate(100),
public_signal=signals_generate(1),
key=token_key_generate())
token_key_dict = load_token_dict(token=f"{token_name}.json")
print("token_key_dict", token_key_dict)
encrypted_data_make = encrypt_data(token_name=token_name,
token_amount=str(amount),
recipient=recipient,
operation="make",
key_encoded=token_key_dict["key"])
# print(decrypt(encrypted_data_make, token_key_dict["key"]))
operation = load_signal(token_key_dict["signals"])
data = json.dumps(encrypted_data_make)
bisurl = Bismuthutil.create_bis_url(recipient, 0, operation, data)
print("make (data)", data)
print("make (operation)", operation)
print("BISURL to make", bisurl)
return {"data": data, "operation": operation, "bisurl": bisurl, "keyfile": json.dumps(token_key_dict)}
if __name__ == "__main__":
# account_add_to(account="test", token="stoken2", amount=1, debtor="test0") # this is automated based on chain
# account_add_to(account="test", token="stoken3", amount=1, debtor="test0") # this is automated based on chain
# print(account_file_load("test"))
"""
generate_token(token_name="stest4",
recipient="4edadac9093d9326ee4b17f869b14f1a2534f96f9c5d7b48dc9acaed",
amount="10000")
move_token(token_name="stest3",
recipient="4edadac9093d9326ee4b17f869b14f1a2534f96f9c5d7b48dc9acaed",
amount="1")
"""
loaded_tokens = load_tokens()
for token in loaded_tokens:
token_key_dict = load_token_dict(token=token)
tokens_update(token_key_dict)
|
985,490 | cc76f341e1d3ca275d07296f68be5a18fe8b8477 | # Copyright 2021 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepnull.main."""
import os
import tempfile
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
import ml_collections
import numpy as np
import pandas as pd
from deepnull import config
from deepnull import main
FLAGS = flags.FLAGS
_DEFAULT_CONFIG = ml_collections.ConfigDict({
'model_type':
'deepnull',
'model_config':
ml_collections.ConfigDict({
'mlp_units': (32, 16, 8),
'mlp_activation': 'relu',
}),
'optimizer_config':
ml_collections.ConfigDict({
'learning_rate_batch_1024': 1e-4,
'beta_1': 0.9,
'beta_2': 0.99,
'optimization_metric': '',
}),
'training_config':
ml_collections.ConfigDict({
'num_epochs': 2,
'batch_size': 512,
}),
})
def _create_df(size: int):
ids = np.arange(size)
feature_1 = np.random.random(size)
feature_2 = np.random.random(size)
err = 0.1 * np.random.random(size)
val = feature_1 + feature_2 + feature_2**2 + err
return pd.DataFrame(
{
'FID': ids,
'IID': ids,
'cov1': feature_1,
'cov2': feature_2,
'label': val
},
columns=['FID', 'IID', 'cov1', 'cov2', 'label'])
class MainTest(absltest.TestCase):
def test_end_to_end_default_config(self):
with tempfile.TemporaryDirectory() as tmpdir:
input_filename = os.path.join(tmpdir, 'input.tsv')
_create_df(size=1024).to_csv(input_filename, sep='\t', index=False)
input_df = pd.read_csv(input_filename, sep='\t')
output_filename = os.path.join(tmpdir, 'output.tsv')
with flagsaver.flagsaver():
FLAGS.input_tsv = input_filename
FLAGS.output_tsv = output_filename
FLAGS.target = 'label'
FLAGS.covariates = ['cov1', 'cov2']
FLAGS.num_folds = 3
FLAGS.seed = 234
FLAGS.logdir = tmpdir
with mock.patch.object(config, 'get_config', autospec=True) as conf:
conf.return_value = _DEFAULT_CONFIG
# Run the e2e test.
main.main(['main.py'])
# Load the results.
output_df = pd.read_csv(output_filename, sep='\t')
# Compare the results of input and output df.
input_columns = set(input_df.columns)
output_columns = set(output_df.columns)
self.assertEqual(input_columns & output_columns, input_columns)
self.assertEqual(output_columns - input_columns, {'label_deepnull'})
shared_columns = sorted(input_columns & output_columns)
pd.testing.assert_frame_equal(input_df[shared_columns],
output_df[shared_columns])
# TODO: Figure out how to specify a --model_config flag as a string that will
# be parsed appropriately within the test. That would enable testing an
# explicit config as well as the default config above.
if __name__ == '__main__':
absltest.main()
|
985,491 | 554c3aed5e2105c0b33fdc79b568e88a5449ff99 | class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums:
return
less_equal = True
for i in range(len(nums) - 1):
if less_equal:
if nums[i] > nums[i + 1]:
nums[i], nums[i + 1] = nums[i + 1], nums[i]
else:
if nums[i] < nums[i + 1]:
nums[i], nums[i + 1] = nums[i + 1], nums[i]
less_equal = not less_equal |
985,492 | ea44cee1bb396a3650ae83257a40d712aa604fe4 | import sys
import os
import csv
import mgmnet.ranRxn_nets as rn
import mgmnet.topo_measure as tm
ranRxn = rn.ranRxn()
topo = tm.topoMeasure()
# level
level = ranRxn.level[sys.argv[1]]
# group
group = ranRxn.group[sys.argv[2]]
# species
species = int(sys.argv[3])
system_name = '%s_%s'%(level, group)
dr = ''
for ds in ('../results_cluster', '/topo_ave', '/ranRxn', '/%s'%(system_name)):
dr = dr + ds
if not os.path.exists(dr):
os.makedirs(dr)
outputFileName = dr + '/%s-%d.csv'%(system_name, species)
header = topo.header
with open(outputFileName, 'w') as f:
if os.stat(outputFileName).st_size == 0: # if file not yet written
csvf = csv.writer(f)
csvf.writerow(header)
# species_name
species_name = ranRxn.species_name(system_name, species)
# nbr_rxn
nbr_rxn = ranRxn.number_of_rxn(system_name, species)
data0 = [level, group, species, species_name, nbr_rxn]
#----- To import sub-netwroks with rxn-degree for node attributes -----#
sEdges = ranRxn.sub_edges(system_name, species)
nodeAttr = ranRxn.rxn_degree(system_name, species)
#--- To Compute ---#
data1 = topo.global_measure(sEdges, nodeAttr)
data = data0 + data1
with open(outputFileName, 'a') as f:
csvf = csv.writer(f)
csvf.writerow(data)
|
985,493 | e0f6c29b74ad6805e8ec8541b5b40805fe00a5b2 | # 1003474
# Alex W
import pytest
from present import sBoxLayer, pLayer, present_round, present_inv_round, genRoundKeys
class TestKey:
def test_one(self):
key1 = 0x00000000000000000000
keys = genRoundKeys(key1)
keysTest = {
0: 32,
1: 0,
2: 13835058055282163712,
3: 5764633911313301505,
4: 6917540022807691265,
5: 12682149744835821666,
6: 10376317730742599722,
7: 442003720503347,
8: 11529390968771969115,
9: 14988212656689645132,
10: 3459180129660437124,
11: 16147979721148203861,
12: 17296668118696855021,
13: 9227134571072480414,
14: 4618353464114686070,
15: 8183717834812044671,
16: 1198465691292819143,
17: 2366045755749583272,
18: 13941741584329639728,
19: 14494474964360714113,
20: 7646225019617799193,
21: 13645358504996018922,
22: 554074333738726254,
23: 4786096007684651070,
24: 4741631033305121237,
25: 17717416268623621775,
26: 3100551030501750445,
27: 9708113044954383277,
28: 10149619148849421687,
29: 2165863751534438555,
30: 15021127369453955789,
31: 10061738721142127305,
32: 7902464346767349504
}
for k in keysTest.keys():
assert keysTest[k] == keys[k]
class TestSbox:
def test_one(self):
state0 = 0x0123456789abcdef
expected = 0xc56b90ad3ef84712
output = sBoxLayer(state0, 64)
assert (hex(output) == hex(expected))
class TestPlayer:
def test_one(self):
state0 = 0b1
expected = 0b1
output = pLayer(state0)
assert (output == expected)
def test_two(self):
state0 = 0b10
expected = 0b10000000000000000
output = pLayer(state0)
assert (bin(output) == bin(expected))
def test_three(self):
state0 = 0b1000000000
expected = 0b1000000000000000000
output = pLayer(state0)
assert (bin(output) == bin(expected))
class TestNoKeySchedule:
def test_one(self):
plain1 = 0x0000000000000000
key1 = 0x00000000000000000000
round1 = present_round(plain1, key1)
round11 = 0xffffffff00000000
assert (round1 == round11)
def test_two(self):
plain1 = 0x0000000000000000
key1 = 0x00000000000000000000
round1 = present_round(plain1, key1)
round2 = present_round(round1, key1)
round22 = 0xff00ffff000000
assert round2 == round22
def test_three(self):
plain1 = 0x0000000000000000
key1 = 0x00000000000000000000
round1 = present_round(plain1, key1)
round2 = present_round(round1, key1)
round3 = present_round(round2, key1)
round33 = 0xcc3fcc3f33c00000
assert round3 == round33
def test_inv(self):
# invert single rounds
plain1 = 0x0000000000000000
key1 = 0x00000000000000000000
round1 = present_round(plain1, key1)
round2 = present_round(round1, key1)
round3 = present_round(round2, key1)
plain11 = present_inv_round(round1, key1)
assert plain1 == plain11
plain22 = present_inv_round(round2, key1)
assert round1 == plain22
plain33 = present_inv_round(round3, key1)
assert round2 == plain33 |
985,494 | a552ecf50adf2fbd3004bacc20c7781b5cdb4acc | from naoqi import ALProxy
Class FaceRecognition():
def __init__(self, ip, port):
self.ip = ip
self.port = port
# start speech recognition within a given time in sec, return reconized words
def speech_recognize(self, time):
with sr.Microphone() as source:
self.r.adjust_for_ambient_noise(source)
print 'Mic activated. Start listening...'
audio = self.r.listen(source, phrase_time_limit=time)
try:
print 'Reconized words: ' + self.r.recognize_google(audio)
return self.r.recognize_google(audio)
except LookupError, e:
print e
return 'there was an error!'
except sr.UnknownValueError, e:
print e
print 'Reconized nothing within ' + str(time) + ' second!'
return ''
def learn(self):
def reset(self):
def getDatabase(self):
|
985,495 | 1e20d9545b8a796671c4c7a8d1523293746554a9 | import argparse
import os.path
import types
parser = argparse.ArgumentParser(description='File wrapper.')
parser.add_argument("-f", "--file", dest="filename", required=True, help="REQUIRED. input file")
parser.add_argument("-n", "--name", dest="cvarname", help="name of the generated C string variable")
options = parser.parse_args()
inputfile = open(options.filename, "rb")
basename, extension = os.path.splitext(options.filename)
if extension:
outputfilename = basename + "_" + extension.strip('.') + ".h"
else:
outputfilename = basename + ".h"
if options.cvarname:
cvarname = options.cvarname
else:
(head, tail) = os.path.split(options.filename);
basetail = os.path.splitext(tail)[0]
postfix = extension.strip('.')
if postfix:
cvarname = "psz" + "_" + basetail + "_" + postfix
else:
cvarname = "psz" + "_" + basetail
byline = "//Auto-generated by fwrap.py. Do not edit manually\n";
headerfile = byline + "const unsigned char " + cvarname + " [] = {\n"
headerfile = headerfile;
chars = inputfile.read()
for char in chars:
headerfile = headerfile + str(ord(char)) + ",\n"
headerfile = headerfile + "};\n"
# register with file system
inputfile.close()
outputfile = open(outputfilename, "w")
outputfile.write(headerfile)
outputfile.close()
|
985,496 | 01079f40a488f69f309499350c816ae54ce36bc0 | import pandas as pd
import numpy as np
import os
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from datetime import datetime, timedelta
from urllib.request import urlopen
import json
import plotly.graph_objects as go
from dash.dependencies import Input, Output
import flask
from app import app
def Table(dataframe, link_column_name=None, col1=None, col2=None, drop=[]):
"""Create a table with links in columns"""
if link_column_name:
if col2:
links1 = dataframe[link_column_name] \
.map(lambda x: x.replace(' ', '').split(';')[0]).values
links2 = dataframe[link_column_name] \
.map(lambda x: x.replace(' ', '').split(';')[1]).values
else:
links1 = dataframe[link_column_name] \
.map(lambda x: x.replace(' ', '')).values
rows = []
for i in range(len(dataframe)):
row = []
for col in dataframe.columns:
if (col in [link_column_name] + drop) is False:
value = dataframe.iloc[i][col]
if col in [col1, col2]:
if col == col2:
cell = html.Td(dcc.Link(href=links2[i], children=value))
else:
cell = html.Td(dcc.Link(href=links1[i], children=value))
else:
cell = html.Td(children=value)
row.append(cell)
rows.append(html.Tr(row,
style={
'color': '#7FDBFF',
'fontSize': '18px',
}))
return html.Table(
# Header
[html.Tr([html.Th(col,
style={
'background-color': '#111111',
'color': '#7FDBFF',
'fontSize': '20px',
}) \
for col in dataframe.columns if (col in [link_column_name] + drop) is False])] + \
rows,
style={'width':'100%'}
)
def total_cases_graph(day, pathname, df, location_colname, dates, dates2=None):
if not dates2:
dates2 = dates
try:
location = pathname \
.replace('/countries', '') \
.replace('/states', '') \
.replace(' ', '') \
.strip('/') \
.lower()
if location in ['']: # world_page
location_df = df.groupby(['Date']).sum()[['Cases', 'Deaths']].reset_index()
l = 'globally'
else:
# any county
if len(pathname) - len(pathname.replace('/', '')) > 2:
location = pathname.split('/')[3].lower()
s = pathname.split('/')[2].lower()
df[location_colname + '_'] = [x.replace(' ', '').lower() for x in df[location_colname]]
df['State_'] = df['State'].map(lambda x: str(x).replace(' ', '').lower())
location_df = df[(df[location_colname + '_'] == location) & (df['State_'] == s)]\
.reset_index(drop=True)
# any state
elif pathname[:7] == '/states':
location_colname = 'State'
location = pathname.split('/')[2].lower()
df[location_colname + '_'] = [str(x).replace(' ', '').lower() for x in df[location_colname]]
location_df = df[df[location_colname + '_'] == location] \
.reset_index(drop=True)
# any country
elif pathname[:7] == '/countr':
location_colname = 'Country'
location = pathname.split('/')[2].lower()
df[location_colname + '_'] = [str(x).replace(' ', '').lower() for x in df[location_colname]]
location_df = df[df[location_colname + '_'] == location] \
.reset_index(drop=True)
if len(location_df) > 0:
l_ = location_df[location_colname].values[0]
if l_[:6].lower() == 'united':
l = 'in the ' + l_
else:
l = 'in ' + l_
location_df.loc[:, 'Text'] = [f'<b>{x}</b><br>{int(y):,} Cases<br>{int(z):,} Deaths' for x, y, z in \
zip(location_df['Date'], location_df['Cases'], location_df['Deaths'])]
df1 = location_df.loc[location_df['Date'] <= dates[day],:]
df2 = location_df.loc[location_df['Date'] > dates[day],:]
yrange = [0, max(50,df1['Cases'].max())]
xrange = [dates[0], str(datetime.strptime(dates[-1], "%Y-%m-%d") + timedelta(days=2))]
show_legend_2 = False
if len(df2) > 0:
if len(dates2) != len(dates):
show_legend_2 = True
return go.Figure(data=[
go.Bar(name='Deaths', x=df1['Date'], y=df1['Deaths'],
marker_color='red', text=df1['Text'], hoverinfo='text'),
go.Bar(name='Cases', x=df1['Date'], y=df1['Cases'] - df1['Deaths'],
marker_color='blue', text=df1['Text'], hoverinfo='text'),
go.Bar(name='Deaths', x=df2['Date'], y=df2['Deaths'],
marker_color='red', text=df2['Text'],
hoverinfo='text', opacity=.4, showlegend=show_legend_2),
go.Bar(name='Cases', x=df2['Date'], y=df2['Cases'] - df2['Deaths'],
marker_color='blue', text=df2['Text'], hoverinfo='text',
opacity=.4, showlegend=show_legend_2)
]).update_layout(barmode='stack',
plot_bgcolor='white',
xaxis=dict(title='Date', range=xrange),
yaxis=dict(title='Total', range=yrange),
title=dict(text='Total cases and deaths ' + l, x=0.5),
legend=dict(x=0, y=1))
except:
return go.Figure()
def daily_cases_graph(day, pathname, df, location_colname, dates, dates2=None):
if not dates2:
dates2 = dates
try:
location = pathname \
.replace('/countries', '') \
.replace('/states', '')\
.replace(' ','')\
.strip('/') \
.lower()
if location == '': # world_page
location_df = df.groupby(['Date']).sum()[['Cases', 'Deaths']].reset_index()
l = 'globally'
else:
# any county
if len(pathname) - len(pathname.replace('/', '')) > 2:
location = pathname.split('/')[3].lower()
s = pathname.split('/')[2].lower()
df[location_colname + '_'] = [x.replace(' ', '').lower() for x in df[location_colname]]
df['State_'] = df['State'].map(lambda x: str(x).replace(' ', '').lower())
location_df = df[(df[location_colname + '_'] == location) & (df['State_'] == s)] \
.reset_index(drop=True)
# any state
elif pathname[:7] == '/states':
location_colname = 'State'
location = pathname.split('/')[2].lower()
df[location_colname + '_'] = [str(x).replace(' ', '').lower() for x in df[location_colname]]
location_df = df[df[location_colname + '_'] == location] \
.reset_index(drop=True)
# any country
elif pathname[:7] == '/countr':
location_colname = 'Country'
location = pathname.split('/')[2].lower()
df[location_colname + '_'] = [str(x).replace(' ', '').lower() for x in df[location_colname]]
location_df = df[df[location_colname + '_'] == location] \
.reset_index(drop=True)
if len(location_df) > 0:
l_ = location_df[location_colname].values[0]
if l_[:6].lower() == 'united':
l = 'in the ' + l_
else:
l = 'in ' + l_
# after getting l and location_df, plot
c = location_df['Cases'].values
d = location_df['Deaths'].values
location_df['New Cases'] = [c[0]] + list(c[1:] - c[:-1])
location_df['New Deaths'] = [d[0]] + list(d[1:] - d[:-1])
location_df.loc[:, 'Text'] = [f'<b>{x}</b><br>{int(y):,} New Cases<br>{int(z):,} New Deaths' for x, y, z in \
zip(location_df['Date'], location_df['New Cases'], location_df['New Deaths'])]
df1 = location_df.loc[location_df['Date'] <= dates[day], :]
df2 = location_df.loc[location_df['Date'] > dates[day], :]
yrange = [0, max(5, df1['New Cases'].max())]
xrange = [dates[0], str(datetime.strptime(dates[-1], "%Y-%m-%d") + timedelta(days=2))]
show_legend_2 = False
if len(df2) > 0:
if len(dates2) != len(dates):
show_legend_2 = True
return go.Figure(data=[
go.Bar(name='New Deaths', x=df1['Date'], y=df1['New Deaths'],
marker_color='red', text=df1['Text'], hoverinfo='text'),
go.Bar(name='New Cases', x=df1['Date'], y=df1['New Cases'],
marker_color='blue', text=df1['Text'], hoverinfo='text'),
go.Bar(name='New Deaths', x=df2['Date'], y=df2['New Deaths'],
marker_color='red', text=df2['Text'],
hoverinfo='text', opacity=.4, showlegend=show_legend_2),
go.Bar(name='New Cases', x=df2['Date'], y=df2['New Cases'],
marker_color='blue', text=df2['Text'], hoverinfo='text',
opacity=.4, showlegend=show_legend_2)
]).update_layout(barmode='stack', plot_bgcolor='white',
xaxis=dict(title='Date', range=xrange),
yaxis=dict(title='Total', range=yrange),
title=dict(text='Daily cases and deaths ' + l, x=0.5),
legend=dict(x=0, y=1))
except:
return go.Figure()
def data_table(day=None, pathname=None, cummulative_cases=None,
dates=None, location_colname=None):
try:
if pathname != '/':
if pathname.lower()[:7] == '/states':
if len(pathname) - len(pathname.replace('/', '')) <= 2:
location_colname = 'County'
location = pathname\
.lower()\
.replace(' ', '')\
.replace('/countries', '') \
.replace('/states', '') \
.strip('/')
cummulative_cases['State_'] = cummulative_cases['State'].map(lambda x: str(x).replace(' ', '').lower())
cummulative_cases['County_'] = cummulative_cases['County'].map(lambda x: str(x).replace(' ', '').lower())
totals = cummulative_cases[
(cummulative_cases['Date'] == dates[day]) &\
(cummulative_cases['State_'] == location)][['State_', 'County_', 'County', 'Cases', 'Deaths']] \
.sort_values('Cases', ascending=False)
totals['Cases'] = totals['Cases'].map(lambda x: f'{x:,}')
totals['Deaths'] = totals['Deaths'].map(lambda x: f'{x:,}')
totals['Links'] = ['/states/' + state + '/' + county \
for state, county in zip(totals['State_'], totals['County_'])]
totals = totals.drop(['State_', 'County_'], axis=1)
elif pathname.lower() == '/countries/unitedstates-counties':
location_colname = 'County'
location = 'unitedstates'
cummulative_cases['State_'] = cummulative_cases['State'].map(lambda x: str(x).replace(' ', '').lower())
cummulative_cases['County_'] = cummulative_cases['County'].map(
lambda x: str(x).replace(' ', '').lower())
totals = cummulative_cases[cummulative_cases['Date'] == dates[day]][['State_','County_', 'State', 'County', 'Cases', 'Deaths']] \
.sort_values('Cases', ascending=False)
totals['Cases'] = totals['Cases'].map(lambda x: f'{x:,}')
totals['Deaths'] = totals['Deaths'].map(lambda x: f'{x:,}')
totals['Links'] = ['/states/' + state + \
';/states/' + state + '/' + county
for state, county in zip(totals['State_'], totals['County_'])]
totals = totals.drop(['State_', 'County_'], axis=1)
table = Table(totals, 'Links', 'State', 'County')
return table
elif pathname.lower() == '/countries/unitedstates':
location_colname = 'State'
cummulative_cases['State_'] = cummulative_cases['State'].map(lambda x: str(x).replace(' ', '').lower())
totals = cummulative_cases[cummulative_cases['Date'] == dates[day]][['State_', 'State', 'Cases', 'Deaths']]\
.sort_values('Cases', ascending=False)
totals['Cases'] = totals['Cases'].map(lambda x: f'{x:,}')
totals['Deaths'] = totals['Deaths'].map(lambda x: f'{x:,}')
totals['Links'] = ['/states/' + state \
for state in totals['State_']]
totals = totals.drop(['State_'], axis=1)
table_US = Table(totals, 'Links', location_colname)
return table_US
else:
totals = cummulative_cases[cummulative_cases['Date'] == dates[day]][['Country', 'Cases', 'Deaths']] \
.sort_values('Cases', ascending=False)
totals['Cases'] = totals['Cases'].map(lambda x: f'{x:,}')
totals['Deaths'] = totals['Deaths'].map(lambda x: f'{x:,}')
totals['Link'] = totals['Country'].map(lambda x: '/countries/' + x.replace(' ', ''))
table = Table(totals, link_column_name='Link', col1='Country')
return table
except:
return
def update_header(pathname, cummulative_cases, location_colname):
try:
if len(pathname) - len(pathname.replace('/','')) > 2:
location_colname = 'County'
location = pathname.split('/')[3].lower()
location = cummulative_cases\
.loc[cummulative_cases[location_colname]\
.map(lambda x: str(x).replace(' ', '').lower()) == location, location_colname].values[0]
if 'parish' in location.lower():
return "Tracking COVID-19 in " + str(location)
else:
return "Tracking COVID-19 in " + str(location) + ' County'
elif pathname == '/countries/unitedstates-counties':
location = 'unitedstates'
location_colname = 'County'
location = cummulative_cases \
.loc[cummulative_cases[location_colname] \
.map(lambda x: str(x).replace(' ', '').lower()) == location, location_colname].values[0]
if location.lower()[:6] == 'united':
return "Tracking COVID-19 in the " + str(location)
return "Tracking COVID-19 in " + str(location)
else:
if pathname[:7] in ['/states', '/countr']:
if pathname[:7] == '/states':
location_colname = 'State'
else:
location_colname = 'Country'
location = pathname \
.replace('/countries', '') \
.replace('/states', '') \
.strip('/')\
.lower()
location = cummulative_cases\
.loc[cummulative_cases[location_colname]\
.map(lambda x: str(x).replace(' ', '').lower()) == location, location_colname].values[0]
if location.lower()[:6] == 'united':
return "Tracking COVID-19 in the " + str(location)
return "Tracking COVID-19 in " + str(location),
except:
return "Tracking COVID-19"
def update_totals(day, pathname, cummulative_cases, location_colname, dates):
try:
if pathname:
if pathname.lower()[:6] in ['/state','/count']:
if len(pathname) - len(pathname.replace('/','')) > 2:
location = pathname.split('/')[3].lower()
location_colname = 'County'
elif pathname.lower() == '/countries/unitedstates-counties':
location = 'unitedstates'
location_colname = 'Country'
elif pathname.lower()[:7] == '/countr':
location = pathname \
.replace('/countries', '') \
.replace('/states', '') \
.strip('/') \
.lower()
location_colname = 'Country'
else:
location = pathname\
.replace('/countries','')\
.replace('/states','')\
.strip('/')\
.lower()
location_colname = 'State'
day_totals = cummulative_cases[
(cummulative_cases['Date'] == dates[day]) &
(cummulative_cases[location_colname].map(lambda x: str(x).replace(' ', '').lower()) == location)].sum()
d = {'Total Cases': [f"{int(day_totals['Cases']):,}"],
'Total Deaths': [f"{int(day_totals['Deaths']):,}"]}
else:
day_totals = cummulative_cases[cummulative_cases['Date'] == dates[day]].sum()
d = {'Total Cases': [f"{day_totals['Cases']:,}"],
'Total Deaths': [f"{day_totals['Deaths']:,}"]}
totals = pd.DataFrame.from_dict(d)
table = Table(totals)
return table
except:
return
def state_county_choropleth(day, pathname, county_dfs, location_colname, location_lat_lon, dates, cummulative_cases):
try:
if len(pathname) - len(pathname.replace('/','')) > 2:
return go.Figure()
location = pathname \
.replace('/countries', '') \
.replace('/states', '') \
.strip('/')\
.lower()
dff = county_dfs[day]
code = dff.loc[dff[location_colname].map(lambda x: str(x).replace(' ', '').lower()) == location, 'Code'].values[0]
location_ = dff.loc[dff[location_colname].map(lambda x: str(x).replace(' ', '').lower()) == location, location_colname].values[0]
dff['FIPS'] = dff['FIPS'].map(lambda x: '0' + str(x) if (len(str(x)) <= 4) else str(x))
dff = dff.loc[dff['Code'] == code]
dff.loc[:, 'Text'] = [f'<b>{w}</b><br>{int(x):,} Cases<br>{int(z):,} Deaths' for w, x, y, z in \
zip(dff['County'], dff['Cases'], dff[location_colname], dff['Deaths'])]
location_code = dff.loc[dff[location_colname].map(lambda x: str(x).replace(' ','').lower()) == location, 'Code'].values[0]
center_dict = {"lat": float(location_lat_lon.loc[location_lat_lon[location_colname] == location_code, 'Latitude'].values[0]),
"lon": float(location_lat_lon.loc[location_lat_lon[location_colname] == location_code, 'Longitude'].values[0])}
return go.Figure(data=go.Choroplethmapbox(
locations=dff['FIPS'], # Spatial coordinates
geojson=counties,
z=dff['Cases'].astype(float), # Data to be color-coded
zmin=0,
zmax=cummulative_cases['Cases'].max() * 1.1,
text=dff['Text'],
hoverinfo='text',
colorscale=[[0, "rgb(255, 250, 250)"],
[0.0001, "rgb(255, 200, 170)"],
[0.001, "rgb(255, 150, 120)"],
[0.01, "rgb(255, 100, 70)"],
[0.1, "rgb(255, 50, 20)"],
[1.0, "rgb(100, 0, 0)"]],
colorbar_title="Total Cases",
)).update_layout(
mapbox_style='white-bg',
mapbox_zoom=4,
mapbox_center=center_dict,
geo_scope='usa', # limit map scope to USA,
geo={'fitbounds': 'locations'},
title=dict(text='Total cases by county in ' + \
location_ +
' on ' + \
str(dates[day]), x=0.5))
except:
return go.Figure()
def create_dropdown_options(world_data, county_data):
"""Create options for dropdown menu from input data"""
world_dropdown_choices = sorted(set(world_data['Country']))
state_dropdown_choices = sorted(set(county_data['State'][county_data['State'].isna() == False]))
combinations = sorted(set(zip(county_data['State'][county_data['State'].isna() == False],
county_data['County'][county_data['State'].isna() == False])))
county_dropdown_labels = [s + ' - ' + c for s, c in combinations if str(s) not in ['nan', '']]
county_dropdown_values = ['/states/' + c.replace(' ', '').lower() + '/' + s.replace(' ', '').lower() \
for c, s in combinations]
top_options = [{'label': 'Global Cases', 'value': '/' },
{'label': 'United States - by State', 'value': '/countries/unitedstates'},
{'label': 'United States - by County', 'value': '/countries/unitedstates-counties'}]
country_options = [{'label': location, 'value': '/countries/' + location.replace(' ', '').lower()} \
for location in world_dropdown_choices]
state_options = [{'label': location, 'value': '/states/' + location.replace(' ', '').lower()} \
for location in state_dropdown_choices]
county_options = [{'label': l, 'value': v} \
for l, v in zip(county_dropdown_labels, county_dropdown_values)]
dropdown_options = top_options + country_options + state_options + county_options
return dropdown_options
def create_search_bar(dropdown_options, dropdown_style, dropdown_id):
"""Create a dash dropdown menu with choices given"""
return dcc.Dropdown(
id=dropdown_id,
options=dropdown_options,
placeholder="Jump to a country/state/county (search or select)",
searchable=True,
clearable=True,
style=dropdown_style)
## Styles
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
date_style_dict = {
'textAlign': 'center',
'color': colors['text'],
'fontSize': "18px"
}
table_style = {}
baseURL = 'http://coronavirusmapsonline.com'
header = []
## Data
world_data = pd.read_csv('../data/world_data_with_codes.csv')
state_data = pd.read_csv('../data/daily_cases_USA_states.csv')
county_data = pd.read_csv('../data/daily_cases_USA_counties.csv')
dates = [str(x).replace('date_', '').replace('.csv', '') \
for x in os.listdir('../data/county_data/')]
dates = sorted(set([x for x in dates if x if x[0]=='2']))
county_data_by_date = [pd.read_csv(f'../data/county_data/date_{d}.csv') for d in dates]
location_lat_lon = pd.read_csv('../data/statelatlong.csv')
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
## Search bar / Dropdown
dropdown_style = {
'padding-left': '0%',
'padding-right': '0%',
'width': '60%'
}
dropdown_id = 'location-dropdown'
dropdown_options = create_dropdown_options(world_data, county_data)
search_bar = create_search_bar(dropdown_options=dropdown_options,
dropdown_style=dropdown_style,
dropdown_id=dropdown_id)
@app.callback(Output(component_id='url', component_property='pathname'),
[Input(component_id=dropdown_id, component_property='value')])
def link_to_choice(dropdown_value):
""""Update the URL with the value from dropdown selection"""
return dropdown_value
# world_page
world_page_entities = 'Country'
world_page_df = world_data.copy()
world_page_df_grouped = world_page_df\
.groupby(['Date', world_page_entities])\
.sum()[['Cases', 'Deaths']]\
.reset_index()
world_page_dates = sorted(set(world_page_df['Date']))
# # usa counties page
# world_page_entities = 'Country'
# world_page_df = world_data.copy()
# world_page_df_grouped = world_page_df\
# .groupby(['Date', world_page_entities])\
# .sum()[['Cases', 'Deaths']]\
# .reset_index()
# world_page_dates = sorted(set(world_page_df['Date']))
usa_county_page_entities = 'County'
usa_county_page_df = pd.read_csv('../data/daily_cases_USA_counties.csv')
usa_county_page_dates = [str(x).replace('date_', '').replace('.csv', '') for x in os.listdir('../data/county_data/')]
usa_county_page_dates = sorted(set([x for x in usa_county_page_dates if x if x[0]=='2']))
usa_county_page_county_dfs = [x.copy() for x in county_data_by_date]
usa_county_page_df_grouped = usa_county_page_df.groupby(['Date', usa_county_page_entities, 'State', 'Code']).sum()[['Cases', 'Deaths']].reset_index()
county_dfs = [pd.read_csv(f'../data/county_data/date_{d}.csv') for d in usa_county_page_dates]
# usa states page
# world_page_entities = 'Country'
# world_page_df = world_data.copy()
# world_page_df_grouped = world_page_df\
# .groupby(['Date', world_page_entities])\
# .sum()[['Cases', 'Deaths']]\
# .reset_index()
# world_page_dates = sorted(set(world_page_df['Date']))
state_page_df = state_data.copy()
state_page_df_grouped = state_page_df.groupby(['Date', 'State', 'Day', 'Code'])\
.sum()[['Cases', 'Deaths']].reset_index()
state_page_dates = sorted(set(state_page_df['Date']))
states = [x for x in set(state_page_df['State']) if str(x) != 'nan']
usa_state_page_dates = [str(x).replace('date_', '').replace('.csv', '') for x in os.listdir('../data/county_data/')]
usa_state_page_dates = sorted(set([x for x in usa_state_page_dates if x if x[0]=='2']))
# country specific pages
C_page_entities = 'Country'
C_page_df = world_data.copy()
C_page_df[C_page_entities + '_'] = [x.replace(' ','').lower() for x in C_page_df[C_page_entities]]
C_page_df_grouped = C_page_df.groupby(['Date', C_page_entities]).sum()[['Cases', 'Deaths']].reset_index()
C_page_dates = sorted(set(C_page_df['Date']))
# state specific pages
# location
SS_page_entities = 'State'
SS_page_df = state_data.copy()
SS_page_df[SS_page_entities + '_'] = [str(x).replace(' ','').lower() for x in SS_page_df[SS_page_entities]]
# county
SS_page_df_counties = county_data.copy()
SS_page_df_grouped = SS_page_df_counties.groupby(['Date', 'County', SS_page_entities, 'Code']).sum()[['Cases', 'Deaths']].reset_index()
SS_page_dates = [str(x).replace('date_', '').replace('.csv', '') for x in os.listdir('../data/county_data/')]
SS_page_dates = sorted(set([x for x in SS_page_dates if x if x[0]=='2']))
SS_page_county_dfs = [x.copy() for x in county_data_by_date]
SS_page_location_lat_lon = location_lat_lon.copy()
# county specific pages
CS_page_entities = 'County'
CS_page_df = county_data.copy()
CS_page_df[CS_page_entities + '_'] = [str(y).replace(' ','').lower() + '/' + str(x).replace(' ','').lower() for x,y in zip(CS_page_df[CS_page_entities], CS_page_df['State'])]
CS_page_df_grouped = CS_page_df.groupby(['Date', CS_page_entities, 'State', 'Code']).sum()[['Cases', 'Deaths']].reset_index()
CS_page_dates = [str(x).replace('date_', '').replace('.csv', '') for x in os.listdir('../data/county_data/')]
CS_page_dates = sorted(set([x for x in CS_page_dates if x if x[0]=='2']))
CS_page_county_dfs = [x.copy() for x in county_data_by_date]
|
985,497 | c02583a2e94b81c328766391c8405ecdb93709e0 | import codecs
N = 150000
file = codecs.open("data/wiki-news-300d-1M.vec", "r", encoding='utf-8', errors='ignore')
file1 = codecs.open("data/wiki-short1.vec", "w", encoding='utf-8', errors='ignore')
ct = 0
for i in range(N):
try:
line = file.readline()
file1.write(line)
except:
print("Error at ", i)
ct += 1
file.close()
file1.close()
print(ct)
|
985,498 | 48d9d41de8eb1f1db64ae4046d1894c60e22ef8f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openpose_utils.py
import os
import json
import re
import numpy as np
from collections import Counter
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def read_openpose_json(openpose_output_dir, idx, is_debug=False):
if is_debug == True:
logger.setLevel(logging.DEBUG)
# openpose output format:
# [x1,y1,c1,x2,y2,c2,...]
# ignore confidence score, take x and y [x1,y1,x2,y2,...]
logger.info("start reading data: %s", openpose_output_dir)
#load json files
json_files = os.listdir(openpose_output_dir)
# check for other file types
json_files = sorted([filename for filename in json_files if filename.endswith(".json")])
cache = {}
cache_confidence = {}
smoothed = {}
_past_tmp_points = []
_past_tmp_data = []
_tmp_data = []
### extract x,y and ignore confidence score
is_started = False
start_frame_index = 0
end_frame_index = 0
for file_name in json_files:
logger.debug("reading {0}".format(file_name))
_file = os.path.join(openpose_output_dir, file_name)
if not os.path.isfile(_file): raise Exception("No file found!!, {0}".format(_file))
data = json.load(open(_file))
# 12桁の数字文字列から、フレームINDEX取得
frame_idx = int(re.findall("(\d{12})", file_name)[0])
if frame_idx <= 0 or is_started == False:
# 開始したらフラグを立てる
is_started = True
# 開始フレームインデックス保持
start_frame_index = frame_idx
# 最初のフレームはそのまま登録するため、INDEXをそのまま指定
_data = data["people"][idx]["pose_keypoints_2d"]
xy = []
confidence = []
for o in range(0,len(_data),3):
xy.append(_data[o])
xy.append(_data[o+1])
confidence.append(_data[o+2])
logger.debug("found {0} for frame {1}".format(xy, str(frame_idx)))
#add xy to frame
cache[frame_idx] = xy
cache_confidence[frame_idx] = confidence
end_frame_index = frame_idx
# plt.figure(1)
# drop_curves_plot = show_anim_curves(cache, plt)
# pngName = '{0}/dirty_plot.png'.format(subdir)
# drop_curves_plot.savefig(pngName)
# # exit if no smoothing
# if not smooth:
# # return frames cache incl. 18 joints (x,y)
# return cache
if len(json_files) == 1:
logger.info("found single json file")
# return frames cache incl. 18 joints (x,y) on single image\json
return cache
if len(json_files) <= 8:
raise Exception("need more frames, min 9 frames/json files for smoothing!!!")
logger.info("start smoothing")
# last frame of data
last_frame = [start_frame_index-1 for i in range(18)]
# threshold of confidence
confidence_th = 0.3
### smooth by median value, n frames
for frame, xy in cache.items():
confidence = cache_confidence[frame]
# joints x,y array
_len = len(xy) # 36
# frame range
smooth_n = 7 # odd number
one_side_n = int((smooth_n - 1)/2)
one_side_n = min([one_side_n, frame-start_frame_index, end_frame_index-frame])
smooth_start_frame = frame - one_side_n
smooth_end_frame = frame + one_side_n
# build frame range vector
frames_joint_median = [0 for i in range(_len)]
# more info about mapping in src/data_utils.py
# for each 18joints*x,y (x1,y1,x2,y2,...)~36
for x in range(0,_len,2):
# set x and y
y = x+1
joint_no = int(x / 2)
x_v = []
y_v = []
for neighbor in range(smooth_start_frame, smooth_end_frame + 1):
if cache_confidence[neighbor][joint_no] >= confidence_th:
x_v.append(cache[neighbor][x])
y_v.append(cache[neighbor][y])
if len(x_v) >= 1:
# 配列の長さを奇数にする
if len(x_v) % 2 == 0:
# x_v.append(cache[frame][x])
# y_v.append(cache[frame][y])
x_v.append(np.mean(x_v))
y_v.append(np.mean(y_v))
# get median of vector
x_med = np.median(sorted(x_v))
y_med = np.median(sorted(y_v))
# 前のフレームが欠損している場合、今回のフレームのデータと前回最後に取得できたデータで線形補間する
if last_frame[joint_no] != frame -1:
if last_frame[joint_no] < start_frame_index:
# 過去に一度もデータを取得できなかった場合
last_value_x = x_med
last_value_y = y_med
else:
last_value_x = smoothed[last_frame[joint_no] - start_frame_index][x]
last_value_y = smoothed[last_frame[joint_no] - start_frame_index][y]
for frame_linear_in in range(last_frame[joint_no] + 1, frame):
# smoothed[frame_linear_in - start_frame_index][x] = last_value_x + (x_med - last_value_x) * (frame_linear_in - last_frame[joint_no]) / (frame - last_frame[joint_no])
# smoothed[frame_linear_in - start_frame_index][y] = last_value_y + (y_med - last_value_y) * (frame_linear_in - last_frame[joint_no]) / (frame - last_frame[joint_no])
smoothed[frame_linear_in - start_frame_index][x] = last_value_x
smoothed[frame_linear_in - start_frame_index][y] = last_value_y
last_frame[joint_no] = frame
else:
# holding frame drops for joint
# allow fix from first frame
if frame > start_frame_index:
# get x from last frame
# logger.info("frame %s, x %s", frame, x)
x_med = smoothed[frame - start_frame_index -1][x]
# get y from last frame
y_med = smoothed[frame - start_frame_index -1][y]
else:
x_med = 0
y_med = 0
# logger.debug("old X {0} sorted neighbor {1} new X {2}".format(xy[x],sorted(x_v), x_med))
# logger.debug("old Y {0} sorted neighbor {1} new Y {2}".format(xy[y],sorted(y_v), y_med))
# build new array of joint x and y value
frames_joint_median[x] = x_med
frames_joint_median[x+1] = y_med
smoothed[frame - start_frame_index] = frames_joint_median
for n, (frame, xy) in enumerate(smoothed.items()):
# 顔と耳のX位置を補正する
joints = [(16, 1, 17, 17), (17, 1, 16, 16), (0, 1, 16, 17)]
for (fromj, tojx, tojy1, tojy2) in joints:
if cache_confidence[frame + start_frame_index][fromj] < confidence_th:
# Fromが取れてない場合、Toから引っ張ってくる
smoothed[frame][fromj*2] = smoothed[frame][tojx*2]
smoothed[frame][fromj*2+1] = (smoothed[frame][tojy1*2+1] + smoothed[frame][tojy2*2+1]) / 2
# return frames cache incl. smooth 18 joints (x,y)
return start_frame_index, smoothed
|
985,499 | 4027344d12c5864376ca621aae3638fc1fc52938 | import cv2
import os
data_path = '../data'
#os.remove(data_path +'/.DS_Store')
image_paths = os.listdir(data_path)
print('image Paths=',image_paths)
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read('modelo/modeloLBPHFace.xml')
cap = cv2.VideoCapture('../test2.mov')
face_classif = cv2.CascadeClassifier(cv2.data.haarcascades+'haarcascade_frontalface_default.xml')
while True:
ret,frame = cap.read()
if ret == False: break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aux_frame = gray.copy()
faces = face_classif.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
rostro = aux_frame[y:y+h,x:x+w]
rostro = cv2.resize(rostro,(150,150),interpolation= cv2.INTER_CUBIC)
#.predict predice una etiqueta y la confianza para una imagen
result = face_recognizer.predict(rostro)
cv2.putText(frame,'{}'.format(result),(x,y-5),1,1.3,(255,255,0),1,cv2.LINE_AA)
if result[1] < 75:
# Persona conocida
cv2.putText(frame,'{}'.format(image_paths[result[0]]),(x,y-25),2,1.1,(0,255,0),1,cv2.LINE_AA)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0),2)
else:
# Persona desconocida
cv2.putText(frame,'Not a jeancha!',(x,y-20),2,0.8,(0,0,255),1,cv2.LINE_AA)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,0,255),2)
cv2.imshow('frame',frame)
k = cv2.waitKey(1)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.