index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,500 | 6e6a7281e2b4c3180c2659aaf0d31f3d74b20c4e | class stack:
def __init__(self):
self.stack=[]
def __len__(self):
return len(self.stack)
def stack_contents(self):
return self.stack
def is_empty(self):
return len(self.stack) == 0
def pop(self, index=None):
if index and int(index) not in range(self.__len__()) :
raise Exception("Index does not exist")
elif not self.is_empty:
raise Exception("Stack is empty")
elif index:
return self.stack.pop(index)
else:
return self.stack.pop()
def push(self, ele):
self.stack.append(ele)
stack_obj = stack()
stack_obj.push(2)
stack_obj.push(6)
stack_obj.push(3)
stack_obj.push("lewis")
print (stack_obj.stack_contents())
print (stack_obj.pop(1))
print (stack_obj.stack_contents())
print (stack_obj.pop())
print (stack_obj.stack_contents())
|
996,501 | 62d7dcb73a42da62b2a2b9c9bda0570409b556a8 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="url_index"),
path('<int:note_id>', views.note_detail, name="url_detail"),
path('tag/<int:tag_id>', views.tag_detail, name="url_tag"),
path('note_create/', views.NoteCreate.as_view(), name="url_note_create"),
path('<int:obj_id>/note_update/',
views.NoteUpdate.as_view(), name="url_note_update"),
path('<int:obj_id>/note_delete/',
views.NoteDelete.as_view(), name="url_note_delete"),
path('tag_create/', views.TagCreate.as_view(), name="url_tag_create"),
path('mynotes/', views.MyNotesByUserListView.as_view(), name='mynotes'),
path('comment/<int:note_id>', views.add_comment, name = 'add_comment'),
path('tag/<int:obj_id>/tag_update/',
views.TagUpdate.as_view(), name="url_tag_update"),
path('tag/<int:obj_id>/tag_delete/',
views.TagDelete.as_view(), name="url_tag_delete"),
]
|
996,502 | 1e845839c1461fb18721d96fee4dd66ece9fcc8f | # encoding: utf-8
# module PyQt5.QtGui
# from C:\Users\Doly\Anaconda3\lib\site-packages\PyQt5\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
class QPaintDevice(__sip.simplewrapper):
""" QPaintDevice() """
def colorCount(self): # real signature unknown; restored from __doc__
""" colorCount(self) -> int """
return 0
def depth(self): # real signature unknown; restored from __doc__
""" depth(self) -> int """
return 0
def devicePixelRatio(self): # real signature unknown; restored from __doc__
""" devicePixelRatio(self) -> int """
return 0
def devicePixelRatioF(self): # real signature unknown; restored from __doc__
""" devicePixelRatioF(self) -> float """
return 0.0
def devicePixelRatioFScale(self): # real signature unknown; restored from __doc__
""" devicePixelRatioFScale() -> float """
return 0.0
def height(self): # real signature unknown; restored from __doc__
""" height(self) -> int """
return 0
def heightMM(self): # real signature unknown; restored from __doc__
""" heightMM(self) -> int """
return 0
def logicalDpiX(self): # real signature unknown; restored from __doc__
""" logicalDpiX(self) -> int """
return 0
def logicalDpiY(self): # real signature unknown; restored from __doc__
""" logicalDpiY(self) -> int """
return 0
def metric(self, QPaintDevice_PaintDeviceMetric): # real signature unknown; restored from __doc__
""" metric(self, QPaintDevice.PaintDeviceMetric) -> int """
return 0
def paintEngine(self): # real signature unknown; restored from __doc__
""" paintEngine(self) -> QPaintEngine """
return QPaintEngine
def paintingActive(self): # real signature unknown; restored from __doc__
""" paintingActive(self) -> bool """
return False
def physicalDpiX(self): # real signature unknown; restored from __doc__
""" physicalDpiX(self) -> int """
return 0
def physicalDpiY(self): # real signature unknown; restored from __doc__
""" physicalDpiY(self) -> int """
return 0
def width(self): # real signature unknown; restored from __doc__
""" width(self) -> int """
return 0
def widthMM(self): # real signature unknown; restored from __doc__
""" widthMM(self) -> int """
return 0
def __init__(self): # real signature unknown; restored from __doc__
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
PdmDepth = 6
PdmDevicePixelRatio = 11
PdmDevicePixelRatioScaled = 12
PdmDpiX = 7
PdmDpiY = 8
PdmHeight = 2
PdmHeightMM = 4
PdmNumColors = 5
PdmPhysicalDpiX = 9
PdmPhysicalDpiY = 10
PdmWidth = 1
PdmWidthMM = 3
|
996,503 | 5234bab4e0d635c52c22f14ae4cb9b194bfdb1ae | import RPi.GPIO as GPIO
import dht11
import time
from datetime import datetime
import csv
import sqlite3
# initialize GPIO
GPIO.setwarnings(True)
GPIO.setmode(GPIO.BCM)
instance = dht11.DHT11(pin=17)
try:
result = instance.read()
if result.is_valid():
conn = sqlite3.connect('data.db')
now = datetime.now()
row = {
'year' : now.strftime("%Y"),
'month': now.strftime("%m"),
'day': now.strftime("%d"),
'hour': now.strftime("%H"),
'minutes': now.strftime("%M"),
'seconds': now.strftime("%S"),
'temperature': result.temperature,
'humidity': result.humidity,
}
conn.execute("INSERT INTO temp_and_humi (year,month,day,hour,minutes,seconds,temperature,humidity) \
VALUES ({year}, {month}, {day}, {hour}, {minutes}, {seconds}, '{temperature}', '{humidity}')".format(**row))
conn.commit()
conn.close()
except KeyboardInterrupt:
print("Cleanup")
GPIO.cleanup()
|
996,504 | ff168f8f063b805f868d7daeb20353b2847b7e18 | '''
注释的作用:
1.对你的代码进行解释说明
2.排错
注意:注释的内容是不会执行的
注释的格式2种:
单行注释:# 快捷键是ctrl+/(?键)
多行注释(块注释、文档注释):前后3个引号"""注释的内容"""
''' |
996,505 | b1370ea4e0624fef4fd6501f9e2c0eb3e176bbc1 | from TextToSpeech import *
from GlobalHelpers import *
from AzureHelpers import *
def startBotGreeting():
greet = getRandomBotAnswers(botAnswers["greeting"])
BotSpeak(greet)
return greet
def humanIntroduction():
intent,responseIntentJson = AzureContinuousIntentFetching()
intrGreet = mapIntent(intent,responseIntentJson,"Introduction")
BotSpeak(intrGreet)
return intrGreet
def askExercise():
intent,responseIntentJson = AzureContinuousIntentFetching()
execGreet = mapIntent(intent,responseIntentJson,"ExerciseSentiment")
BotSpeak(execGreet)
return execGreet
def startBot():
# Greet Stage
BotSpeak(getRandomBotAnswers(botAnswers["greeting"]))
# Name Stage
intent,responseIntentJson = AzureContinuousIntentFetching()
mapIntent(intent,responseIntentJson,"Introduction")
# Exercise Stage
intent,responseIntentJson = AzureContinuousIntentFetching()
mapIntent(intent,responseIntentJson,"ExerciseSentiment")
# startBot()
|
996,506 | 73dcaa5145d8614cc149ef201e74c22b41b06793 | import math
from display import *
AMBIENT = 0
DIFFUSE = 1
SPECULAR = 2
LOCATION = 0
COLOR = 1
SPECULAR_EXP = 4
#lighting functions
def get_lighting(normal, view, ambient, light, areflect, dreflect, sreflect ):
A = calculate_ambient(ambient, areflect)
D = calculate_diffuse(light, dreflect, normal)
S = calculate_specular(light, sreflect, view, normal)
light = []
light.append( A[0] + D[0] + S[0] )
light.append( A[1] + D[1] + S[1] )
light.append( A[2] + D[2] + S[2] )
print light
return limit_color(light)
def calculate_ambient(alight, areflect):
amb_vals = []
amb_vals.append( int(alight[0] * areflect[0]) )
amb_vals.append( int(alight[1] * areflect[1]) )
amb_vals.append( int(alight[2] * areflect[2]) )
return limit_color(amb_vals)
def calculate_diffuse(light, dreflect, normal):
diffuse = []
N = normalize(normal)
L = normalize(light[LOCATION])
dot_prod = dot_product(N, L)
diffuse.append( int( (light[1][0] * dreflect[0]) * dot_prod ) )
diffuse.append( int( (light[1][1] * dreflect[1]) * dot_prod ) )
diffuse.append( int( (light[1][2] * dreflect[2]) * dot_prod ) )
print diffuse
return limit_color(diffuse)
def calculate_specular(light, sreflect, view, normal):
color = []
color.append( light[1][0] * sreflect[0] )
color.append( light[1][1] * sreflect[1] )
color.append( light[1][2] * sreflect[2] )
N = normalize(normal)
L = normalize(light[LOCATION])
V = normalize(view)
if (dot_product(N, L) <= 0):
return [0,0,0]
else:
a = [x*2*dot_product(N, L) for x in N]
b = [x-y for x,y in zip(a,L)]
c = [int(x*(dot_product(b,V)**8)) for x in color]
return limit_color(c)
def limit_color(color):
for x in range(len(color)):
if color[x] <= 0:
color[x] = 0
elif (color[x] >= 255):
color[x] = 255
return color
#vector functions
def normalize(vector):
x = vector[0]**2
y = vector[1]**2
z = vector[2]**2
mag = (x+y+z)**0.5
return ([one/mag for one in vector])
def dot_product(a, b):
return (a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2])
def calculate_normal(polygons, i):
A = [0, 0, 0]
B = [0, 0, 0]
N = [0, 0, 0]
A[0] = polygons[i+1][0] - polygons[i][0]
A[1] = polygons[i+1][1] - polygons[i][1]
A[2] = polygons[i+1][2] - polygons[i][2]
B[0] = polygons[i+2][0] - polygons[i][0]
B[1] = polygons[i+2][1] - polygons[i][1]
B[2] = polygons[i+2][2] - polygons[i][2]
N[0] = A[1] * B[2] - A[2] * B[1]
N[1] = A[2] * B[0] - A[0] * B[2]
N[2] = A[0] * B[1] - A[1] * B[0]
return N
|
996,507 | efd90ac54fe31acfc0644edd6332a09712c9934d | import json
import re
from django.apps import AppConfig
from django.core.handlers.wsgi import WSGIHandler
from django.utils.encoding import force_text
class DjangoExtendedJsonConfig(AppConfig):
name = 'djext.json'
verbose_name = 'JSON tools'
def ready(self):
self.wrap_request()
def wrap_request(self):
json_mime_type_regex = r'^application/(.*\+)?json$'
def _load_json(_self):
_self._json = {}
if _self.method not in ('POST', 'PUT', 'PATCH'):
return
if _self._read_started and not hasattr(_self, '_body'):
_self._post_parse_error = True
return
if re.match(json_mime_type_regex, _self.content_type):
try:
_self._json = json.loads(force_text(_self.body))
except:
_self._post_parse_error = True
def _get_json(_self):
if not hasattr(_self, '_json'):
_load_json(_self)
return _self._json
def _set_json(_self, json):
_self._json = json
setattr(WSGIHandler.request_class, 'json', property(_get_json, _set_json))
|
996,508 | 4899070a424edb09a67f951e1fa9a28e20fd025a | import torch
from network import MobileNetv2
import importlib, pdb
if __name__ == "__main__":
cfg = importlib.import_module('config')
net = MobileNetv2(cfg)
net.eval()
test = torch.Tensor(3, 224, 224).unsqueeze(0)
out = net(test)
saved = torch.load('mobilenetv2_pretrained.pth')
weights = net.state_dict()
_dict = {
'features.0.0.weight': 'features.0.0.weight',
'features.0.1.weight': 'features.0.1.weight',
'features.0.1.bias': 'features.0.1.bias',
'features.0.1.running_mean': 'features.0.1.running_mean',
'features.0.1.running_var': 'features.0.1.running_var',
'features.0.1.num_batches_tracked': 'features.0.1.num_batches_tracked',
'features.1.block.0.weight': 'features.1.conv.0.weight',
'features.1.block.1.weight': 'features.1.conv.1.weight',
'features.1.block.1.bias': 'features.1.conv.1.bias',
'features.1.block.1.running_mean': 'features.1.conv.1.running_mean',
'features.1.block.1.running_var': 'features.1.conv.1.running_var',
'features.1.block.1.num_batches_tracked': 'features.1.conv.1.num_batches_tracked',
'features.1.block.3.weight': 'features.2.block.0.weight',
'features.1.block.4.weight': 'features.2.block.1.weight',
'features.1.block.4.bias': 'features.2.block.1.bias',
'features.1.block.4.running_mean': 'features.2.block.1.running_mean',
'features.1.block.4.running_var': 'features.2.block.1.running_var',
'features.1.block.4.num_batches_tracked': 'features.2.block.1.num_batches_tracked',
'features.1.block.6.weight': 'features.2.block.3.weight',
'features.1.block.7.weight': 'features.2.block.4.weight',
'features.1.block.7.bias': 'features.2.block.4.bias',
'features.1.block.7.running_mean': 'features.2.block.4.running_mean',
'features.1.block.7.running_var': 'features.2.block.4.running_var',
'features.1.block.7.num_batches_tracked': 'features.2.block.4.num_batches_tracked',
'features.2.block.0.weight': 'features.2.conv.3.weight',
'features.2.block.1.weight': 'features.2.conv.4.weight',
'features.2.block.1.bias': 'features.2.conv.4.bias',
'features.2.block.1.running_mean': 'features.2.conv.4.running_mean',
'features.2.block.1.running_var': 'features.2.conv.4.running_var',
'features.2.block.1.num_batches_tracked': 'features.2.conv.4.num_batches_tracked',
'features.2.block.3.weight': 'features.2.conv.6.weight',
'features.2.block.4.weight': 'features.2.conv.7.weight',
'features.2.block.4.bias': 'features.2.conv.7.bias',
'features.2.block.4.running_mean': 'features.2.conv.7.running_mean',
'features.2.block.4.running_var': 'features.2.conv.7.running_var',
'features.2.block.4.num_batches_tracked': 'features.2.conv.7.num_batches_tracked',
}
for key, val in _dict.items():
print(weights[key].size(), saved[val].size())
if weights[key].size() != saved[val].size():
pdb.set_trace()
pdb.set_trace()
|
996,509 | 1b7630b62fba31e701c845e13aace53b1e324bac | from tkinter import *
window = Tk()
window.title("List_box")
window.geometry('400x300+1000+150')
list1 = ['one', 'two', 'three', 'four']
var = StringVar()
var.set(list1)
lb = Listbox(window, listvariable=var)
lb.pack()
window.mainloop()
|
996,510 | bb938b5b49d6d339f2cc64d223ae52258a56d34d | list1 = [3, 6, 8, 9, 1]
#[0, 1, 2, 3, 4]
#[-5,-4,-3,-2,-1]
# get 0 position number
print(list1[0])
print(list1[4])
# Add data to the list with append method
# This method will add value at the end of list
list1.append(2)
print(list1)
#Remove specific value from list
list1.remove(6)
print(list1)
# Insert specific value in the list
list1.insert(3, 10)
print(list1)
#find out max value from the list
print("max value:", max(list1))
# Find out mini value from the list
print("min vallue:", min(list1))
# sort the list, sort method sort the list in place
print("sorted", list1.sort())
print(list1)
#reverse the list, reverse method reverse the list in place
list1.reverse()
print(list1)
# list slicing
print(list1[1:])
print(list1[1:4])
# Negative index
print(list1[-2])
# Reverse with index
list2 = list1[::-1]
print(list2)
print(list1)
#print list with index jump
print(list1[::2])
print(list1[::-2])
print("#"*25)
# Extend the list
list3 = [4, 6, 8, 9]
list4 = [5, 2, 4, 1]
print(list4.extend(list3))
print(list4)
list5 = list4 + list3
print(list5)
print(list4)
print("#"*50)
# Shadow copy and Deep Copy
# Shallow Copy : It pass the refrence of the list
list6 = [5, 7, 23, 45]
list7 = list6
list8 = list7
print(id(list6))
print(id(list7))
list7.append(56)
list8.append(100)
print("list6:", list6)
print("list7:", list7)
print("list8:", list8)
print("#"*50)
# Deep Copy : Copy the whole list , not just passing the refrence
list9 = [4, 78, 55, 45]
list10 = list9.copy()
print("list9 :", id(list9))
print("list10 :", id(list10))
list10.append(200)
print("list9 ",list9)
print("list10 ",list10)
# Delete the list
# print("#"*50)
# list_temp = [3, 4, 6, 7]
# print(list_temp)
# del list_temp
# print(list_temp)
# Pop the element : It remove the element from the list and return he value
print("#"*50)
list_pop = [3, 6, 7, 8, 9]
print("list_pop :", list_pop)
list_pop_new = []
print(list_pop)
#data = list_pop.pop()
#print(data)
#print(list_pop)
# len method to get the length of any data, list, tuple, dictionary, string
list_length = len(list_pop)
for i in range(list_length):
value = list_pop.pop()
list_pop_new.append(value)
print("list_pop :", list_pop)
print("list_pop_new:", list_pop_new)
# 1. Get the square of each number of the list
# list = [5, 6, 8, 9]
# output = [25, 3, 64, 81]
# 2. Get the sum of all the numbers of list
# list = [4, 7 ,9 ,10]
# output = 30
# 3. Check the type of ech element and append in an other list
#-> list = [3, 6, 8, 'a', 2.5, [2, 5, 6]]
# -> outlist = [int, int, int, str, float, list]
# 4. Separate out the even and odd number from the list.
# list = [2, 5, 6, 8, 9, 12, 11]
# list1 = [2, 6, 8, 12]
# list2 = [5, 9, 11]
# 5. Get complete string from the list
# list = ['hello', 'itpd', 'students', 'todays' , 'date', 26, 'june']
# output = "hello itod students todays datet is 26 june"
|
996,511 | 6cc44862b33a24edb2331217e6f67582da74df46 | import math
num1 = int(input("Digite o um Numero :-> "))
if num1 >= 0:
print(f"O numero {num1} é positvo e eis a sua Raiz Quadrada:-> " + str(math.sqrt(num1)) +
" E eis ele elevado ao quadrado :-> " + str(num1 ** 2))
else:
print(f"O numero {num1} tá negativo")
|
996,512 | b499312d32102e561a6fbe0567f009dc05b308ed | from logging import debug
from proto import contest_pb2
from proto import contest_pb2_grpc
from util.data import contest_cache
class Contest(contest_pb2_grpc.ContestDataServiceServicer):
def GetContestData(self, request, context):
data = contest_cache[request.platform].get(request.handle)
return contest_pb2.ContestData(
handle = data['handle'],
profile_url = data['profile_url'],
rating = data['rating'],
length = data['length'],
data = data['data']
) |
996,513 | ac9c8e931be69eff88daa0c1b6770d6ff60b17b1 | from collections import Counter
class Solution:
def removeDuplicateLetters(self, s: str) -> str:
stack = []
counter = Counter(s)
for letter in s:
counter[letter] -= 1
if letter in stack:
continue
while stack and letter < stack[-1] and counter[stack[-1]] > 0:
stack.pop()
stack.append(letter)
return ''.join(stack)
# 재귀
def removeDuplicateLetters(self, s: str) -> str:
for char in sorted(set(s)):
suffix = s[s.index(char):]
if set(s) == set(suffix):
return char + self.removeDuplicateLetters(suffix.replace(char, ''))
return ''
if __name__ == '__main__':
sol = Solution()
print(sol.removeDuplicateLetters("bcabc"), "abc")
print(sol.removeDuplicateLetters("cbacdcbc"), "acdb")
|
996,514 | dd13f037225e08ab0e1db8f47492358213dd5913 | import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
class automation(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.set_page_load_timeout(30)
def test_see_news_comment_login_with_google(self):
driver = self.driver
#1 Visit https://kumparan.com/
driver.get('https://kumparan.com/')
self.assertIn('kumparan', self.driver.title)
#2 Select some news
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]').click()
time.sleep(5)
#3 Insert comment
comment = driver.find_element_by_xpath('//*[@id="newCommentTextArea"]')
comment.send_keys('test')
wait(driver, 50)
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div[3]/div/div[1]/div/div/div/div/div/div/div[2]/div[3]/button').click()
#4 Login with Google Plus
wait(driver, 50).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="content"]/div/div/div/div[2]/div[2]/div/div[1]/div[2]/div[2]/button'))).click()
main_window_handle = None
while not main_window_handle:
main_window_handle = driver.current_window_handle
signin_window_handle = None
while not signin_window_handle:
for handle in driver.window_handles:
if handle!= main_window_handle:
signin_window_handle = handle
break
driver.switch_to.window(signin_window_handle)
wait(driver, 50)
email = driver.find_element_by_id('identifierId')
email.send_keys('mich.kotamori69@gmail.com')
wait(driver, 50).until(EC.element_to_be_clickable((By.ID, 'identifierNext'))).click()
time.sleep(2)
password = driver.find_element_by_name('password')
password.send_keys('michkotamori')
wait(driver, 50).until(EC.element_to_be_clickable((By.ID, 'passwordNext'))).click()
driver.switch_to.window(main_window_handle)
time.sleep(2)
#5 Insert comment again
comment = driver.find_element_by_xpath('//*[@id="newCommentTextArea"]')
comment.send_keys('test')
wait(driver, 50)
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div[3]/div/div[1]/div/div/div/div/div/div/div[2]/div[3]/button').click()
def test_see_news_comment_login_with_facebook(self):
driver = self.driver
#1 Visit https://kumparan.com/
driver.get('https://kumparan.com/')
self.assertIn('kumparan', self.driver.title)
#2 Select some news
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]').click()
time.sleep(5)
#3 Insert comment
comment = driver.find_element_by_xpath('//*[@id="newCommentTextArea"]')
comment.send_keys('test')
wait(driver, 50)
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div[3]/div/div[1]/div/div/div/div/div/div/div[2]/div[3]/button').click()
#4 Login with Facebook
wait(driver, 50).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="content"]/div/div/div/div[2]/div[2]/div/div[1]/div[2]/div[1]/span/button'))).click()
main_window_handle = None
while not main_window_handle:
main_window_handle = driver.current_window_handle
signin_window_handle = None
while not signin_window_handle:
for handle in driver.window_handles:
if handle!= main_window_handle:
signin_window_handle = handle
break
driver.switch_to.window(signin_window_handle)
wait(driver, 50)
email = driver.find_element_by_xpath('//*[@id="email"]')
email.send_keys('filza.94@gmail.com')
password = driver.find_element_by_xpath('//*[@id="pass"]')
password.send_keys('test123456')
wait(driver, 50).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="u_0_0"]'))).click()
driver.switch_to.window(main_window_handle)
time.sleep(2)
#5 Insert comment again
comment = driver.find_element_by_xpath('//*[@id="newCommentTextArea"]')
comment.send_keys('test')
wait(driver, 50)
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div[3]/div/div[1]/div/div/div/div/div/div/div[2]/div[3]/button').click()
def test_negative_test(self):
driver = self.driver
#1 Visit https://kumparan.com/
driver.get('https://kumparan.com/')
self.assertIn('kumparan', self.driver.title)
#2 Select some news
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]').click()
time.sleep(5)
#3 Insert comment
comment = driver.find_element_by_xpath('//*[@id="newCommentTextArea"]')
comment.send_keys('test')
wait(driver, 50)
driver.find_element_by_xpath('//*[@id="content"]/div/div/div/div/div[2]/div/div/div[1]/div/div[2]/div[3]/div/div[1]/div/div/div/div/div/div/div[2]/div[3]/button').click()
#4 Login with Google Plus
wait(driver, 50).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="content"]/div/div/div/div[2]/div[2]/div/div[1]/div[2]/div[2]/button'))).click()
main_window_handle = None
while not main_window_handle:
main_window_handle = driver.current_window_handle
signin_window_handle = None
while not signin_window_handle:
for handle in driver.window_handles:
if handle!= main_window_handle:
signin_window_handle = handle
break
driver.switch_to.window(signin_window_handle)
wait(driver, 50)
email = driver.find_element_by_id('identifierId')
email.send_keys('')
wait(driver, 50).until(EC.element_to_be_clickable((By.ID, 'identifierNext'))).click()
self.assertIn('email', driver.find_element_by_xpath('//*[@id="view_container"]/div/div/div[2]/div/div[1]/form/content/div[1]/div/div[2]/div[2]').text)
time.sleep(2)
email = driver.find_element_by_id('identifierId')
email.send_keys('mich.kotamori69@gmail.com')
wait(driver, 50).until(EC.element_to_be_clickable((By.ID, 'identifierNext'))).click()
time.sleep(2)
password = driver.find_element_by_name('password')
password.send_keys('michkotamori')
wait(driver, 50).until(EC.element_to_be_clickable((By.ID, 'passwordNext'))).click()
driver.switch_to.window(main_window_handle)
time.sleep(2)
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == '__main__':
unittest.main() |
996,515 | 4497865380745f8679a350244522c264af02b06e | #!/usr/bin/env python3
import re
g5_footprints = [4158,4413,4669,4924,5180,5436,5691,5947,6203,6459]
xb_footprints = [4156,4412,4668,4924,5180,5435,5691,5947,6203,6459]
bt_footprints = [4154,4409,4664,4919,5175,5430,5685,5940,6196,6451]
benches = ['g5','xb','bt']
bench_names = {'g5':'Graph500','xb':'XSBench','bt':'BTree'}
footprints_lists = {'g5':g5_footprints,'xb':xb_footprints,'bt':bt_footprints}
# Table 4
resultfile = "results.txt"
results = {}
with open(resultfile) as f:
for line in f:
curr = line.strip().split(',')
bench = curr[0]
size = curr[2]
platform = curr[1]
if bench not in results:
results[bench] = {}
if size not in results[bench]:
results[bench][size] = {}
if platform not in results[bench][size]:
results[bench][size][platform] = {"swpin":[], "swpout":[]}
results[bench][size][platform]["swpin"] = float(curr[-4])
results[bench][size][platform]["swpin_sd"] = float(curr[-3])
results[bench][size][platform]["swpout"] = float(curr[-2])
results[bench][size][platform]["swpout_sd"] = float(curr[-1])
results[bench][size][platform]["total_swp"] = float(curr[-4]) + float(curr[-2])
print("benchmark,footprints,cgswap,iceswap,diff")
for bench in benches:
footprints = footprints_lists[bench]
for size in results[bench]:
curr = results[bench][size]
print("{},{},{:.2f},{:.2f},{:.2f}".format(bench_names[bench], footprints[int(size)],
curr["cg"]["total_swp"],
curr["ice"]["total_swp"],
(curr["cg"]["total_swp"] - curr["ice"]["total_swp"])/curr["cg"]["total_swp"] * 100))
|
996,516 | e1d28b2f77b061657501a607654ba0f40ec1c059 | import re
import numpy as np
import pandas as pd
def string_has_only_letters(input_string):
if input_string is not np.NaN:
return not any(char.isdigit() for char in input_string)
else:
return False
# function for cleaning short date format
def clean_short_date(value):
# regex for dd/mm/yy and mm/dd/yy date format
pattern = r"\d*\/\d*\/\d*"
if value is not np.NaN:
if re.search(pattern, value) is not None:
year_digits = value[-2:]
proper_year = 2000 + int(year_digits)
return proper_year
else:
return value
else:
return value
def year_cleanup(df):
df_copy = df.copy()
# drop rows that contain status 'submitted' in column 'year'
df_copy = df_copy[~df_copy['year'].str.contains(r'submitted', na=False, flags=re.IGNORECASE)]
# drop row that contains 'Marburg' in column 'year'
df_copy = df_copy[~df_copy['year'].apply(string_has_only_letters)]
# extract numbers of entries in 'year' except for 'submitted' entries
df_years = df_copy
year = []
for _, row in df_years.iterrows():
_id = row['id']
names = str(row['year']).split(' ')
for name in names:
year.append([_id, name.strip()])
year = pd.DataFrame(year, columns=['id', 'year'])
df_years = year[
~year['year'].str.contains('[A-Z]', regex=True, flags=re.I)] # .to_csv("year", sep='\t', encoding='utf-8')
# get list of rows with NaN in column'year'
df_nan = df.copy().loc[df.copy()['year'].isnull()]
df_nan = pd.DataFrame(df_nan, columns=['id', 'year'])
# concatenate table of NaN entries in column 'year' and table with numerical entry
df_concat = pd.concat([df_years, df_nan], ignore_index=True, sort=True)
# join tables on id, add cleaned 'year' data to complete table
result = pd.merge(df_copy,
df_concat[['id', 'year']],
on='id')
# drop obsolete 'year' column
result = result.drop(columns=['year_x'])
# rename column
result = result.rename(index=str, columns={"year_y": "year"})
# replace values of '08/11/07' structure
result['year'] = result['year'].apply(clean_short_date)
# replace wrong values
result.loc[result.year == '20188', 'year'] = 2018
result.loc[result.year == '23012', 'year'] = 2012
result.loc[result.year == '2918', 'year'] = 2018
# replace wrong value of '16'
result.loc[result.year == '16', 'year'] = 2016
# nullifying '9.' as entry in column 'year'
result.loc[result.year == '9.', 'year'] = np.NaN
return result
|
996,517 | 22de63dfa5d29580946bfde77532351cb722fa8c | import pytest
# Test configuration options for regression tests
config = {
'event' : False,
'exe': 'openmc',
'mpi': False,
'mpiexec': 'mpiexec',
'mpi_np': '2',
'update': False,
'build_inputs': False
}
def assert_atoms_equal(res_ref, res_test, tol=1e-5):
for mat in res_test[0].index_mat:
for nuc in res_test[0].index_nuc:
_, y_test = res_test.get_atoms(mat, nuc)
_, y_ref = res_ref.get_atoms(mat, nuc)
assert y_test == pytest.approx(y_ref, rel=tol), \
f'Atoms not equal for material {mat}, nuclide {nuc}\n' \
f'y_ref={y_ref}\ny_test={y_test}'
def assert_reaction_rates_equal(res_ref, res_test, tol=1e-5):
for reactions in res_test[0].rates:
for mat in reactions.index_mat:
for nuc in reactions.index_nuc:
for rx in reactions.index_rx:
y_test = res_test.get_reaction_rate(mat, nuc, rx)[1]
y_ref = res_ref.get_reaction_rate(mat, nuc, rx)[1]
assert y_test == pytest.approx(y_ref, rel=tol), \
f'Reaction rate not equal for material {mat}, nuclide '\
f'{nuc}, {rx}\ny_ref={y_ref}\ny_test={y_test}'
|
996,518 | 1cac4fbd6d4bb18902e9d4413ae6308613771620 | import os
import sys
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
import random
from torchvision import transforms
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.lmdb_utils import *
from utils.image_utils import *
from utils import YOLO_utils as YOLO
from utils import FPHA_utils as FPHA
class YOLOV2Dataset_VOC(data.Dataset):
def __init__(self, conf, train_mode, model, deterministic):
super(YOLOV2Dataset_VOC, self).__init__()
self.conf = conf
with open(self.conf["root"], 'r') as file:
self.lines = file.readlines()
self.shape = (conf["img_width"], conf["img_height"])
self.is_train = train_mode and conf["split"] == "train"
self.deterministic = deterministic
if self.deterministic:
random.seed(0)
self.output_imgpath = not train_mode
if self.conf["len"] == "max":
self.num_data = len(self.lines)
else:
self.num_data = self.conf["len"]
if self.is_train:
self.batch_size = conf["batch_size"]
self.num_workers = conf["num_workers"]
self.is_aug = self.conf["aug"]
if self.is_aug:
self.jitter = self.conf["jitter"]
self.hue = self.conf["hue"]
self.sat = self.conf["sat"]
self.exp = self.conf["exp"]
self.rot_deg = self.conf["rot_deg"]
self.scale_jitter = self.conf["scale_jitter"]
self.is_flip = self.conf["flip"]
self.shear = self.conf["shear"]
def aug(self, img, labels):
# marvis implementation
if self.deterministic:
random.seed(0)
img, ofs_info = jitter_img(img, self.jitter, self.shape)
if self.is_flip:
img, flip = flip_img(img)
else:
flip = 0
img = distort_image_HSV(img, self.hue, self.sat, self.exp)
max_boxes = 50
new_labels = np.zeros((max_boxes, 5))
fill_idx = 0
for i, box in enumerate(labels):
x1 = box[1] - box[3]/2
y1 = box[2] - box[4]/2
x2 = box[1] + box[3]/2
y2 = box[2] + box[4]/2
pts = np.asarray([(x1, y1), (x2, y2)])
jit = jitter_points(pts, ofs_info)
new_width = (jit[1, 0] - jit[0, 0])
new_height = (jit[1, 1] - jit[0, 1])
if new_width < 0.001 or new_height < 0.001:
continue
new_labels[fill_idx][0] = box[0]
new_labels[fill_idx][1] = (jit[0, 0] + jit[1, 0])/2
new_labels[fill_idx][2] = (jit[0, 1] + jit[1, 1])/2
new_labels[fill_idx][3] = new_width
new_labels[fill_idx][4] = new_height
if flip:
new_labels[fill_idx][1] = 0.999 - new_labels[fill_idx][1]
fill_idx += 1
return img, new_labels
def aug_plus(self, img, labels):
#ultralytics implementation
# add rotation, shearing
img = distort_image_HSV(img, self.hue, self.sat, self.exp)
img = np.asarray(img)
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=self.shape[1])
aug_labels = labels.copy().astype("float32")
aug_labels[:, 1] = ratio * w * (labels[:, 1] - labels[:, 3] / 2) + padw
aug_labels[:, 2] = ratio * h * (labels[:, 2] - labels[:, 4] / 2) + padh
aug_labels[:, 3] = ratio * w * (labels[:, 1] + labels[:, 3] / 2) + padw
aug_labels[:, 4] = ratio * h * (labels[:, 2] + labels[:, 4] / 2) + padh
img, aug_labels = random_affine(img,
aug_labels,
degrees=(-self.rot_deg, self.rot_deg),
translate=(self.jitter, self.jitter),
scale=(1 - self.scale_jitter, 1 + self.scale_jitter),
shear=(-self.shear, self.shear))
aug_labels[:, 1:5] = YOLO.xyxy2xywh(aug_labels[:, 1:5]) / self.shape[1]
if self.is_flip and random.random() > 0.5:
img = np.fliplr(img)
aug_labels[:, 1] = 1 - aug_labels[:, 1]
max_boxes = 50
new_labels = np.zeros((max_boxes, 5))
new_labels[:len(aug_labels), :] = aug_labels
return img, new_labels
def __getitem__(self, index):
imgpath = self.lines[index].rstrip()
img = Image.open(imgpath).convert('RGB')
labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
labels = np.loadtxt(labpath) # class, x_cen, y_cen, width, height
if len(labels.shape) == 1:
labels = np.expand_dims(labels, axis=0)
if self.is_train:
if self.is_aug:
img, labels = self.aug(img, labels)
else:
img = img.resize(self.shape)
max_boxes = 50
new_labels = np.zeros((max_boxes, 5))
new_labels[:len(labels), :] = labels
labels = new_labels
else:
img = img.resize(self.shape)
max_boxes = 50
new_labels = np.zeros((max_boxes, 5))
new_labels[:len(labels), :] = labels
labels = new_labels
img = np.asarray(img)
img = (img / 255.0)
img = imgshape2torch(img)
if self.output_imgpath:
# output imgpath only when doing prediction
return (img, imgpath)
else:
return (img, labels)
def __len__(self):
return self.num_data
class YOLOV2Dataset_FPHA(data.Dataset):
def __init__(self, conf, train_mode, model, deterministic):
super(YOLOV2Dataset_FPHA, self).__init__()
self.conf = conf
self.keys = get_keys(os.path.join(self.conf["save_prefix"] + "_keys_cache.p"))
self.bbox_env = None
self.shape = (conf["img_width"], conf["img_height"])
self.is_train = train_mode and conf["split"] == "train"
if self.conf["len"] == "max":
self.num_data = len(self.keys)
else:
self.num_data = self.conf["len"]
if self.is_train:
self.batch_size = conf["batch_size"]
self.num_workers = conf["num_workers"]
self.is_aug = self.conf["aug"]
self.is_flip = self.conf["flip"]
if self.is_aug:
self.jitter = self.conf["jitter"]
self.hue = self.conf["hue"]
self.sat = self.conf["sat"]
self.exp = self.conf["exp"]
def __init_db(self):
# necessary for loading env into dataloader
# https://github.com/chainer/chainermn/issues/129
self.bbox_env = get_env(os.path.join(self.conf["save_prefix"] + "_bbox.lmdb"))
def aug(self, img, labels):
new_img, ofs_info = jitter_img(img, self.jitter, self.shape)
if self.is_flip:
new_img, flip = flip_img(new_img)
else:
flip = 0
new_img = distort_image_HSV(new_img, self.hue, self.sat, self.exp)
x1 = labels[0] - labels[2]/2
y1 = labels[1] - labels[3]/2
x2 = labels[0] + labels[2]/2
y2 = labels[1] + labels[3]/2
pts = np.asarray([(x1, y1), (x2, y2)])
jit = jitter_points(pts, ofs_info)
new_x_cen = (jit[0, 0] + jit[1, 0])/2
new_y_cen = (jit[0, 1] + jit[1, 1])/2
new_width = (jit[1, 0] - jit[0, 0])
new_height = (jit[1, 1] - jit[0, 1])
new_labels = np.asarray([new_x_cen, new_y_cen, new_width, new_height])
if flip:
new_labels[0] = 0.999 - new_labels[0]
if new_width < 0.001 or new_height < 0.001:
new_img = img.resize(self.shape)
new_labels = labels
return new_img, new_labels.astype("float32")
def __getitem__(self, index):
if self.bbox_env is None:
self.__init_db()
key = self.keys[index]
labels = read_lmdb_env(key, self.bbox_env, "float32", 4)
img = Image.open(os.path.join(self.conf["img_dir"], key))
if self.is_train:
if self.is_aug:
img, labels = self.aug(img, labels)
else:
img = img.resize(self.shape)
else:
img = img.resize(self.shape)
img = np.asarray(img)
img = (img / 255.0)
img = imgshape2torch(img)
return (img, labels)
def __len__(self):
return self.num_data
class YOLOV2Dataset_FPHA_reg(data.Dataset):
def __init__(self, conf, train_mode, model, deterministic):
super(YOLOV2Dataset_FPHA_reg, self).__init__()
self.conf = conf
self.keys = get_keys(os.path.join(self.conf["save_prefix"] + "_keys_cache.p"))
self.bbox_env = None
self.xyz_gt_env = None
self.shape = (conf["img_width"], conf["img_height"])
self.is_train = train_mode and conf["split"] == "train"
if self.conf["len"] == "max":
self.num_data = len(self.keys)
else:
self.num_data = self.conf["len"]
if self.is_train:
self.batch_size = conf["batch_size"]
self.num_workers = conf["num_workers"]
self.is_aug = self.conf["aug"]
self.is_flip = self.conf["flip"]
if self.is_aug:
self.jitter = self.conf["jitter"]
self.hue = self.conf["hue"]
self.sat = self.conf["sat"]
self.exp = self.conf["exp"]
def __init_db(self):
# necessary for loading env into dataloader
# https://github.com/chainer/chainermn/issues/129
self.bbox_env = get_env(os.path.join(self.conf["save_prefix"] + "_bbox.lmdb"))
self.xyz_gt_env = get_env(os.path.join(self.conf["save_prefix"] + "_xyz_gt.lmdb"))
def aug(self, img, labels, uvd_gt):
new_img, ofs_info = jitter_img(img, self.jitter, self.shape)
if self.is_flip:
new_img, flip = flip_img(new_img)
else:
flip = 0
new_img = distort_image_HSV(new_img, self.hue, self.sat, self.exp)
x1 = labels[0] - labels[2]/2
y1 = labels[1] - labels[3]/2
x2 = labels[0] + labels[2]/2
y2 = labels[1] + labels[3]/2
pts = np.asarray([(x1, y1), (x2, y2)])
jit = jitter_points(pts, ofs_info)
new_uvd_gt = jitter_points(uvd_gt.copy(), ofs_info)
new_x_cen = (jit[0, 0] + jit[1, 0])/2
new_y_cen = (jit[0, 1] + jit[1, 1])/2
new_width = (jit[1, 0] - jit[0, 0])
new_height = (jit[1, 1] - jit[0, 1])
new_labels = np.asarray([new_x_cen, new_y_cen, new_width, new_height])
if flip:
new_labels[0] = 0.999 - new_labels[0]
new_uvd_gt[:, 0] = 0.999 - new_uvd_gt[:, 0]
if new_width < 0.001 or new_height < 0.001:
new_img = img.resize(self.shape)
new_labels = labels
new_uvd_gt = uvd_gt
return new_img, new_labels.astype("float32"), new_uvd_gt.astype("float32")
def __getitem__(self, index):
if self.bbox_env or self.xyz_gt_env is None:
self.__init_db()
key = self.keys[index]
labels = read_lmdb_env(key, self.bbox_env, "float32", 4)
xyz_gt = read_lmdb_env(key, self.xyz_gt_env, "float32", (21, 3))
uvd_gt = FPHA.xyz2uvd_color(xyz_gt)
uvd_gt = scale_points_WH(uvd_gt, (FPHA.ORI_WIDTH, FPHA.ORI_HEIGHT), (1,1))
uvd_gt[..., 2] /= FPHA.REF_DEPTH
img = Image.open(os.path.join(self.conf["img_dir"], key))
if self.is_train:
if self.is_aug:
img, labels, uvd_gt = self.aug(img, labels, uvd_gt)
else:
img = img.resize(self.shape)
else:
img = img.resize(self.shape)
img = np.asarray(img)
img = (img / 255.0)
img = imgshape2torch(img)
return (img, labels, uvd_gt)
def __len__(self):
return self.num_data |
996,519 | 14e54684f3fa446d9f8706f4dd0320d28e57bcfb | import re
from morepath.publish import resolve_model as _resolve_model
from ..interfaces import ISchema
import jsl
import jsonobject
import dataclasses
from copy import copy
import typing
from datetime import datetime, date
def resolve_model(request):
newreq = request.app.request_class(
request.environ.copy(), request.app, path_info=request.path)
context = _resolve_model(newreq)
context.request = request
return context
def jsonobject_property_to_jsl_field(
prop: jsonobject.JsonProperty, nullable=False) -> jsl.BaseField:
if isinstance(prop, jsonobject.DateProperty):
return jsl.DateTimeField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.DateTimeProperty):
return jsl.DateTimeField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.StringProperty):
return jsl.StringField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.IntegerProperty):
return jsl.IntField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.FloatProperty):
return jsl.NumberField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.BooleanProperty):
return jsl.BooleanField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.DictProperty):
if prop.item_wrapper:
subtype = jsonobject_to_jsl(
prop.item_wrapper.item_type, nullable=nullable)
return jsl.DocumentField(name=prop.name,
document_cls=subtype,
required=prop.required)
return jsl.DictField(name=prop.name, required=prop.required)
if isinstance(prop, jsonobject.ListProperty):
if prop.item_wrapper:
if isinstance(prop.item_wrapper, jsonobject.ObjectProperty):
if issubclass(prop.item_wrapper.item_type, jsonobject.JsonObject):
subtype = jsl.DocumentField(
document_cls=jsonobject_to_jsl(prop.item_wrapper.item_type), nullable=nullable)
elif isinstance(prop.item_wrapper.item_type, jsonobject.JsonProperty):
subtype = jsonobject_property_to_jsl_field(
prop.item_wrapper.item_type)
else:
raise KeyError(prop.item_wrapper.item_type)
elif isinstance(prop.item_wrapper, jsonobject.StringProperty):
subtype = jsl.StringField(name=prop.name)
elif isinstance(prop.item_wrapper, jsonobject.IntegerProperty):
subtype = jsl.IntField(name=prop.name)
elif isinstance(prop.item_wrapper, jsonobject.FloatProperty):
subtype = jsl.NumberField(name=prop.name)
elif isinstance(prop.item_wrapper, jsonobject.DictProperty):
subtype = jsl.DictField(name=prop.name)
else:
raise KeyError(prop.item_wrapper)
return jsl.ArrayField(items=subtype, required=prop.required)
return jsl.ArrayField(name=prop.name, required=prop.required)
raise KeyError(prop)
_marker = object()
def dataclass_get_type(field):
metadata = {
'required': _marker,
'exclude_if_empty': False,
'validators': []
}
metadata.update(field.metadata.get('morpfw', {}))
origin = getattr(field.type, '__origin__', None)
required = True
if origin == typing.Union:
if len(field.type.__args__) == 2:
if field.type.__args__[1] == type(None):
required = False
typ = field.type.__args__[0]
else:
typ = field.type
if metadata['required'] is _marker:
metadata['required'] = required
required = metadata['required']
origin = getattr(typ, '__origin__', None)
if origin == list:
if getattr(typ, '__args__', None):
return {
'name': field.name,
'type': list,
'schema': field.type.__args__[0],
'required': required,
'metadata': metadata
}
else:
return {
'name': field.name,
'type': list,
'required': required,
'metadata': metadata
}
return {
'type': typ,
'required': required,
'metadata': metadata
}
def dataclass_check_type(field, basetype):
t = dataclass_get_type(field)
if t['type'] == basetype:
return t
# wtf bool is a subclass of integer?
if t['type'] == bool and basetype == int:
return None
if issubclass(t['type'], basetype):
return t
return None
def dataclass_field_to_jsl_field(
prop: dataclasses.Field, nullable=False) -> jsl.BaseField:
t = dataclass_check_type(prop, date)
if t:
return jsl.DateTimeField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, datetime)
if t:
return jsl.DateTimeField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, str)
if t:
return jsl.StringField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, int)
if t:
return jsl.IntField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, float)
if t:
return jsl.NumberField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, bool)
if t:
return jsl.BooleanField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, dict)
if t:
return jsl.DictField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, ISchema)
if t:
subtype = jsonobject_to_jsl(
t['schema'], nullable=nullable)
return jsl.DocumentField(name=prop.name,
document_cls=subtype,
required=t['required'])
t = dataclass_check_type(prop, list)
if t:
return jsl.ArrayField(name=prop.name, required=t['required'])
t = dataclass_check_type(prop, typing.List)
if t:
if 'schema' not in t.keys():
return jsl.ArrayField(name=prop.name, required=t['required'])
if issubclass(t['schema'], ISchema):
subtype = jsl.DocumentField(
document_cls=jsonobject_to_jsl(
t['schema'], nullable=nullable))
elif t['schema'] == str:
subtype = jsl.StringField(name=prop.name)
elif t['schema'] == int:
subtype = jsl.IntField(name=prop.name)
elif t['schema'] == float:
subtype = jsl.NumberField(name=prop.name)
elif t['schema'] == dict:
subtype = jsl.DictField(name=prop.name)
else:
raise KeyError(t['schema'])
return jsl.ArrayField(items=subtype, required=t['required'])
raise KeyError(prop)
def _set_nullable(prop):
if not prop.required:
return jsl.OneOfField([prop, jsl.NullField(name=prop.name)])
return prop
def dataclass_to_jsl(schema, nullable=False, additional_properties=False):
attrs = {}
_additional_properties = additional_properties
class Options(object):
additional_properties = _additional_properties
for attr, prop in schema.__dataclass_fields__.items():
prop = dataclass_field_to_jsl_field(prop, nullable=False)
if nullable:
attrs[attr] = _set_nullable(prop)
else:
if not prop.required:
attrs[attr] = prop
else:
if isinstance(prop, jsl.StringField):
if not prop.pattern:
prop.pattern = '.+'
attrs[attr] = prop
attrs['Options'] = Options
Schema = type("Schema", (jsl.Document, ), attrs)
return Schema
def jsonobject_to_jsl(schema, nullable=False):
# output jsl schema from jsonobject schema
attrs = {}
class Options(object):
additional_properties = True
for attr, prop in schema._properties_by_attr.items():
prop = jsonobject_property_to_jsl_field(prop, nullable=nullable)
if nullable:
attrs[attr] = _set_nullable(prop)
else:
if not prop.required:
attrs[attr] = prop
else:
if isinstance(prop, jsl.StringField):
if not prop.pattern:
prop.pattern = '.+'
attrs[attr] = prop
attrs['Options'] = Options
Schema = type("Schema", (jsl.Document, ), attrs)
return Schema
def jsl_nullable(schema):
return jsonobject_to_jsl(jsl_to_jsonobject(schema), nullable=True)
def jsl_field_to_jsonobject_property(
prop: jsl.BaseField) -> jsonobject.JsonProperty:
if isinstance(prop, jsl.DateTimeField):
return jsonobject.DateTimeProperty(name=prop.name,
required=prop.required)
if isinstance(prop, jsl.StringField):
return jsonobject.StringProperty(name=prop.name,
required=prop.required)
if isinstance(prop, jsl.IntField):
return jsonobject.IntegerProperty(name=prop.name,
required=prop.required)
if isinstance(prop, jsl.DictField):
return jsonobject.DictProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.NumberField):
return jsonobject.FloatProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.BooleanField):
return jsonobject.BooleanProperty(name=prop.name,
required=prop.required)
if isinstance(prop, jsl.DocumentField):
if prop.document_cls:
subtype = jsl_to_jsonobject(prop.document_cls)
return jsonobject.DictProperty(name=prop.name,
item_type=subtype,
required=prop.required)
return jsonobject.DictProperty(name=prop.name, required=prop.required)
if isinstance(prop, jsl.ArrayField):
if prop.items:
if isinstance(prop.items, jsl.DocumentField):
subtype = jsl_to_jsonobject(prop.items.document_cls)
elif isinstance(prop.items, jsl.BaseField):
subtype = jsl_field_to_jsonobject_property(prop.items)
else:
raise KeyError(prop.items)
return jsonobject.ListProperty(item_type=subtype,
required=prop.required)
return jsonobject.ListProperty(name=prop.name, required=prop.required)
raise KeyError(prop)
def jsl_to_jsonobject(schema):
# output jsonobject schema from jsl schema
attrs = {}
for attr, prop in schema._fields.items():
prop.name = attr
attrs[attr] = jsl_field_to_jsonobject_property(prop)
Schema = type("Schema", (jsonobject.JsonObject, ), attrs)
return Schema
def generate_default(schema):
data = {}
if isinstance(schema, jsl.DocumentField):
schema = schema.document_cls
for n, f in schema._fields.items():
if isinstance(f, jsl.DocumentField):
data[n] = generate_default(f)
else:
data[n] = f.get_default()
if data[n] is None:
if isinstance(f, jsl.StringField):
data[n] = None
elif (isinstance(f, jsl.IntField) or
isinstance(f, jsl.NumberField)):
data[n] = None
elif isinstance(f, jsl.DictField):
data[n] = {}
elif isinstance(f, jsl.ArrayField):
data[n] = []
return data
|
996,520 | fccdeeeaa37448b5dae9ec12e26d1f6ae4727146 | #!/usr/bin/env
from __future__ import print_function
from bluetooth import *
print("Looking for compatible devices")
nearby_devices = discover_devices(lookup_names=1, duration=10, flush_cache=True)
for addr, name in nearby_devices:
if name == 'Nintendo RVL-CNT-01' or name == 'Nintendo RVL-WBC-01':
print('Compatible controller:\n %s - %s' % (addr, name))
if name == 'Nintendo RVL-CNT-01-TR':
print("Incompatible controller:\n %s - %s" % (addr, name))
|
996,521 | b2775461cedf9fede947a81025740d268f9765a2 | #!usr/bin/python3
'''
ask
You are given a string S. Your task is to print all possible size k
replacement combinations of the string in lexicographic sorted order.
Input Format
A single line containing the string S and integer value k
separated by a space.
Output Format
Print the combinations with their replacements of string S on separate lines.
Sample Input
HACK 2
Sample Output
AA
AC
AH
AK
CC
CH
CK
HH
HK
KK
'''
from itertools import combinations_with_replacement
word, size = input().split()
word = list(word)
word.sort()
combos = list(combinations_with_replacement(word, int(size)))
for combo in combos:
for i in combo:
print(i, end='')
print()
|
996,522 | d337bc55bfeac4d3a9fe2f2d6f94dbce66f394d4 | import sys
import os
import math
import pygame
try:
import _path
except:
pass
import tiledtmxloader
# 'frm' denotes the portals in a map
# this function returns the coordinates of hero after entering the portal
def entry(map_name,frm):
if(map_name=='./maps/village1.tmx' and frm==0):
return [2*32,70*32+16]
elif(map_name=='./maps/village1.tmx' and frm==1):
return [82*32,10*32+16]
elif(map_name=='./maps/village1.tmx' and frm==2):
return [31*32+20,60*32]
elif(map_name=='./maps/village2_out1.tmx' and frm==0):
return [3*32,26*32]
elif(map_name=='./maps/village2_out1.tmx' and frm==1):
return [42*32,5*32]
elif(map_name=='./maps/village2_out1.tmx' and frm==2):
return [23*32+20,21*32]
elif(map_name=='./maps/village2_inside.tmx' and frm==0):
return [22*32,51*32]
elif(map_name=='./maps/village2_inside.tmx' and frm==1):
return [20*32+20,23*32]
elif(map_name=='./maps/village2_inside.tmx' and frm==2):
return [4*32+20,20*32]
elif(map_name=='./maps/tunnel3.tmx' and frm==0):
return [21*32,72*32]
elif(map_name=='./maps/ship.tmx' and frm==0):
return [4*32,3*32]
elif(map_name=='./maps/ship.tmx' and frm==1):
return [53*32,2*32+16]
|
996,523 | 7fd4e9102e7ed888188337dea448bce04942d89c | import computational_graph as cg
|
996,524 | 621091d2f6ec2a9ded2101d7ed7519baa75d43bf | def standaardtarief(afstandKM):
prijs = 0
if afstandKM >= 50:
prijs += 15.00
for x in range(afstandKM - 50):
prijs += 0.60
if afstandKM > 0 and afstandKM < 50:
for x in range(afstandKM):
prijs += 0.80
return round(prijs, 2)
def ritprijs(leeftijd, weekendrit, afstandKM):
prijs = standaardtarief(afstandKM)
print("\nStandaard tarief: \n€ " + str(prijs))
if weekendrit:
if leeftijd < 12 or leeftijd >= 65:
prijs -= prijs * 0.35
print("Met 35% korting: ")
else:
prijs -= prijs * 0.40
print("Met 40% korting: ")
else:
if leeftijd < 12 or leeftijd >= 65:
prijs -= prijs * 0.30
print("Met 30% korting: ")
else:
print("Met 0% korting: ")
return round(prijs, 2)
def Test(leeftijd, weekendrit, afstand):
print('\n--- Nieuwe test-situatie ---')
print('Afstand = ' + str(afstand) + ' km')
print('Leeftijd = ' + str(leeftijd) + ' jaar oud')
print('Weekend = ' + str(weekendrit))
print("€ " + str(ritprijs(leeftijd, weekendrit, afstand)))
# De leeftijd, of het weekend is en het aantal kilometers:
Test(18, True, 15)
Test(20, True, 52)
Test(25, False, 15)
Test(40, False, 70)
Test(12, True, 100)
Test(8, False, 5)
Test(69, True, 80)
Test(80, False, 2)
## afstandKM = int(input("Afstand in kilometers: "))
## leeftijd = int(input("Leeftijd: "))
## weekendrit = str(input("Weekend (Ja / Nee): ")).lower()
## if weekendrit == "ja":
## weekendrit = True
## else:
## weekendrit = False
## print("€ " + str(ritprijs(leeftijd, weekendrit, afstandKM))) |
996,525 | f40a00af68b3056703c875fca34b3bd8d9b94afb | '''
Created on 02/06/14
@author: kibanez
'''
#!/usr/bin/python
import sys, re, shlex , os, string, urllib, time, math, random, subprocess, shutil
from multiprocessing import Process, Manager
import inspect
from itertools import izip_longest,groupby
from operator import itemgetter
import ConfigParser
import optparse
from os import path as osp
localModulesBase = osp.dirname(osp.realpath(__file__))
modulesRelDirs = ["modules/"]
for moduleRelDir in modulesRelDirs:
sys.path.insert(0,osp.join(localModulesBase,moduleRelDir))
from mod_CoverageControl import c_CoverageControl
import mod_CNV
import logging
import numpy
######################################################################
class OptionParser(optparse.OptionParser):
def check_required (self, opt):
option = self.get_option(opt)
atrib = getattr(self.values, option.dest)
if atrib is None:
# self.error("%s option not supplied" % option)
return False
else:
return True
######################################################################
def read_cfg_file(cfg_filename):
fi = open(cfg_filename,'r')
config = ConfigParser.ConfigParser()
config.readfp(fi)
hash_cfg = {}
for field in config.options('INPUT'):
hash_cfg[field] = config.get('INPUT',field)
for field in config.options('REFERENCE'):
hash_cfg[field] = config.get('REFERENCE',field)
for field in config.options('OUTPUT'):
hash_cfg[field] = config.get('OUTPUT',field)
for field in config.options('BDs'):
hash_cfg[field] = config.get('BDs',field)
for field in config.options('SOFTWARE'):
hash_cfg[field] = config.get('SOFTWARE',field)
fi.close()
return hash_cfg
#######################################################################
def parse_coverage(f_cov):
#Sample_id Gene Refseq Exon Chr Exon Start Exon End Exon length Avg_Coverage % Coverage < 5 % Coverage 5-10 % Coverage 10-15 % Coverage 15-20 % Coverage < 20
hash_table = {}
fi = open(f_cov,'r')
l_lines = map(lambda l: l.strip().split('\t') , fi.readlines())[1:]
map(lambda (i,(sample,gene,transc,exon,chr,start,end,len,avg_cov,cov_5,cov_5_10,cov_10_15,cov_15_20,cov_less_20)): hash_table.setdefault((i,gene,transc,exon,chr,start,end),[]).append([sample,float(avg_cov.replace(',','.')),float(cov_less_20.replace(',','.'))]), enumerate(l_lines))
fi.close()
return hash_table
#######################################################################
# cleans the bed file, removing lines where different things apart from "chr start end" appear
def clean_bed(bed_file,alignment_path):
bed_name = bed_file.split('/')
bed_name = bed_name[len(bed_name) - 1]
bedName, bedExtension = os.path.splitext(bed_name)
new_analysis_bed = bedName + "_reheadered" + bedExtension
new_analysis_bed_path = alignment_path + "/" + new_analysis_bed
aux = alignment_path + "/" + "aux.bed"
aux2 = alignment_path + "/" + "aux2.bed"
iOutFile = open(new_analysis_bed_path,"w")
iOutFile2 = open(aux,"w")
# we conserve all those lines with the following info: chr \t start \t end
os.system("grep -e '^chr[[:upper:]]' -e '^chr[[:digit:]][[:blank:]]' -e '^chr[[:digit:]][[:digit:]][[:blank:]]' %s > %s" %(bed_file,aux))
# we remove "chrM" lines if they appear
os.system("grep -v '^chrM' %s > %s" %(aux,aux2))
os.system("sort -k1,1V %s > %s" %(aux2,new_analysis_bed_path))
iOutFile.close()
iOutFile2.close()
os.remove(aux)
os.remove(aux2)
return new_analysis_bed_path
#######################################################################
def __create_reference_tables(refseq_filename,output_path):
ref_annotation_bed = os.path.join(output_path,os.path.basename(refseq_filename) + str(random.randrange(1,500)) + '.ref.bed')
ref_annotation_file = os.path.join(output_path,os.path.basename(refseq_filename)+ str(random.randrange(1,500)) + '.ref.annot')
"""
Columns in the table:
hg19.refGene.name
hg19.refGene.chrom
hg19.refGene.strand
hg19.refGene.cdsStart
hg19.refGene.cdsEnd
hg19.refGene.exonCount
hg19.refGene.exonStarts
hg19.refGene.exonEnds
hg19.refGene.name2
hg19.refSeqStatus.status
Columns in ref_annotation_file:
Chr
txStart
txEnd
exonCount
exonStart
exonEnd
Gene
Refseq
"""
f = open(refseq_filename,'r')
l_lines = map(lambda x: x.strip().split('\t'), f.readlines())[1:]
f.close()
#NM_032291 chr1 + 67000041 67208778 25 66999824,67091529,67098752,67101626,67105459,67108492,67109226,67126195,67133212,67136677,67137626,67138963,67142686,67145360,67147551,67154830,67155872,67161116,67184976,67194946,67199430,67205017,67206340,67206954,67208755 67000051,67091593,67098777,67101698,67105516,67108547,67109402,67126207,67133224,67136702,67137678,67139049,67142779,67145435,67148052,67154958,67155999,67161176,67185088,67195102,67199563,67205220,67206405,67207119,67210768 SGIP1 Validated
#select the principal isoform as the largest transcript
hash_gene_2_isoform = {}
hash_table = {}
hash_index_gene = {}
for index,line in enumerate(l_lines):
refseq_id = line[0]
chrm = line[1]
strand = line[2]
cds_start = line[3]
cds_end = line[4]
n_exon = line[5]
l_exon_s = map(int,line[6].split(','))
l_exon_e = map(int,line[7].split(','))
gene_name = line[8]
if len(l_exon_s) <> len(l_exon_e):
raise RuntimeError('coverage_statistics.__create_reference_tables: The number of initial exon coordinates does not agree with the number of end coordinates: %s\n' % (gene_name))
if len(l_exon_s) <> int(n_exon):
raise RuntimeError('coverage_statistics.__create_reference_tables: The number of exons does not agree with exon_count: %s: %s\n' % (gene_name,n_exon))
cs = int(cds_start)
ce = int(cds_end)
if strand == '+':
l_coding_exons = filter(lambda (i,s,e): float(min(e,ce)-max(s,cs))/(e-s)>0, zip(range(1,len(l_exon_s)+1),l_exon_s,l_exon_e))
else:
l_coding_exons = filter(lambda (i,s,e): float(min(e,ce)-max(s,cs))/(e-s)>0, zip(range(len(l_exon_s)+1,1,-1),l_exon_s,l_exon_e))
l_exon_coord = map(lambda (i,s,e): (i,max(s,cs),min(e,ce)), l_coding_exons)
hash_gene_2_isoform.setdefault(gene_name,[]).append((sum(map(lambda (i,s,e): (e-s) , l_exon_coord)),refseq_id))
hash_table[refseq_id] = (chrm,l_exon_coord,gene_name)
hash_index_gene.setdefault(gene_name,[]).append(index)
l_genes = map(itemgetter(1),sorted(map(lambda gene: (sorted(hash_index_gene[gene])[0],gene), hash_index_gene.keys())))
f_bed = open(ref_annotation_bed,'w')
f_annot = open(ref_annotation_file,'w')
f_annot.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %("Chr","txStart","txEnd","exonCount","exonStart","exonEnd","Gene","Refseq"))
for gene_name in l_genes:
length_trans,refseq_id = sorted(hash_gene_2_isoform[gene_name],reverse=True)[0]
if length_trans == 0:
continue
(chrm,l_exon_coord,gene_name) = hash_table[refseq_id]
f_bed.write('\n'.join(map(lambda (i,s,e): "%s\t%d\t%d\t%s\t%s" % (chrm,s,e,gene_name,refseq_id), l_exon_coord))+'\n')
f_annot.write('\n'.join(map(lambda (i,s,e): "%s\t-\t-\t%d\t%d\t%d\t%s\t%s" % (chrm,i,s,e,gene_name,refseq_id), l_exon_coord))+'\n')
f_bed.close()
f_annot.close()
return ref_annotation_bed,ref_annotation_file
#######################################################################
def run(argv=None):
if argv is None: argv = sys.argv
parser = OptionParser(add_help_option=True,description="The script performs CNV estimation within the regions of interest")
parser.add_option("--cfg",default=None,help="Config file with the complete information of the target regions and paths of the files needed for the calling",dest="f_cfg")
# Se leen las opciones aportadas por el usuario
(options, args) = parser.parse_args(argv[1:])
if len(argv) == 1:
sys.exit(0)
if not parser.check_required("--cfg"):
raise IOError('The cfg file does not exist')
try:
if options.f_cfg <> None:
cfg_file = options.f_cfg
if not os.path.exists(cfg_file):
raise IOError('The file %s does not exist' % (cfg_file))
hash_cfg = read_cfg_file(cfg_file)
cnv_output_path = hash_cfg.get('cnv_path','')
controlCoverage_path = hash_cfg.get('coverage_path','')
alignment_path = hash_cfg.get('alignment_path','')
ref_fasta = hash_cfg.get('ref_fasta','')
fasta_cnv_path = hash_cfg.get('ref_fasta_cnv','')
gatk_path = hash_cfg.get('gatk_path','')
analysis_bed = hash_cfg.get('analysis_bed','')
l_samples = hash_cfg.get("sample_names",'').split(',')
l_gender = hash_cfg.get("sample_gender",'').split(',')
window_length = hash_cfg.get("window_length",'')
annotation_file = hash_cfg.get("annotation_file",'')
if not os.path.exists(alignment_path):
raise IOError('The path does not exist. %s' % (alignment_path))
if not os.path.isfile(ref_fasta):
raise IOError('The file does not exist. %s' % (ref_fasta))
if not os.path.isfile(fasta_cnv_path):
raise IOError('The file does not exist. %s' % (fasta_cnv_path))
if not os.path.exists(controlCoverage_path):
os.mkdir(controlCoverage_path)
if not os.path.exists(cnv_output_path):
os.mkdir(cnv_output_path)
if not os.path.isfile(gatk_path):
raise IOError('The file does not exist. %s' % (gatk_path))
if not os.path.isfile(analysis_bed):
raise IOError('The file does not exist. %s' % (analysis_bed))
if not os.path.isfile(annotation_file):
raise IOError("annotation_file not exist. %s" % (annotation_file))
#Configure logger
formatter = logging.Formatter('%(asctime)s - %(module)s - %(levelname)s - %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.INFO)
logger = logging.getLogger("preprocess")
logger.setLevel(logging.INFO)
logger.addHandler(console)
l_bams = []
for bam_f in l_samples:
abs_path = os.path.join(alignment_path,bam_f)
if not os.path.exists(abs_path):
raise IOError("The bam file does not exist. Check if the introduced path is correct: %s" %(abs_path))
else:
l_bams.append(abs_path)
logger.info("CNV estimation will be done in the following files: %s \n" %(l_bams))
logger.info("Human genome bed generation, for the annotation process")
refseq_filename = "/ingemm/ref/DBs/genes_tables/RefSeq_table.txt"
(ref_annotation_bed,ref_annotation_file) = __create_reference_tables(refseq_filename,controlCoverage_path)
logger.info("Bed generation")
new_analysis_bed_path = clean_bed(analysis_bed,alignment_path)
# before calling for CNV, it is necessary to create GATK coverage files , normalize them and do the estimation CNV
# 1 - GATK coverage average calling
cov_control = c_CoverageControl(l_bams,logger)
cov_control.set_bed_analysis(new_analysis_bed_path)
# all the resulting files are generated in coverage/files directory
path_files = os.path.join(controlCoverage_path,"files")
if not os.path.exists(path_files):
os.mkdir(path_files)
logger.info("Starting Coverage Control...")
cov_output = cov_control.perform_coverage_control_gatk(path_files,gatk_path,ref_fasta)
# 2 - Normalization and 3 - CNV estimation calling (via mod_CNV)
l_covFiles = []
for i in cov_output:
aux = i + ".sample_interval_summary"
l_covFiles.append(aux)
aux1 = filter(lambda x:'H' in x, l_gender)
aux2 = filter(lambda x:'M' in x, l_gender)
aux3 = filter(lambda x:'X' in x, l_gender)
if (len(aux1) + len(aux2) + len(aux3) == len(l_gender)):
logger.info("CNV estimation starts")
cnv_estimation = mod_CNV.mod_CNV(l_covFiles,ref_fasta,fasta_cnv_path,gatk_path,cnv_output_path,l_gender,annotation_file,logger)
cnv_estimation.set_bed_analysis(new_analysis_bed_path)
cnv_estimation.perform_cnv()
else:
raise IOError('The gender list must have only M (xx) or H (xy) or X (unknown) characters. Please review the cfg file.')
logger.info("CNV estimation done! ")
except:
print >> sys.stderr , '\n%s\t%s' % (sys.exc_info()[0],sys.exc_info()[1])
sys.exit(2)
############################################################################333
if __name__=='__main__':
run()
|
996,526 | 6bc7eefc7de7b4ac5b4e56e69816b81b45e357fb |
class Field:
name = ""
type = "text"
xpath = None
cssPath = None
def __init__(self,conf):
self.name = conf['name']
self.type = conf['type']
self.xpath = conf.get("xpath","")
self.cssPath = conf.get("cssPath","")
def parse(self,obj):
if self.xpath != "":
nodes = obj.xpath(self.xpath)
elif self.cssPath != "":
nodes = obj.css(self.cssPath)
else:
return None
if len(nodes) == 0:
return None
return self.parseNode(nodes[0])
def parseNode(self,node):
pass
def getNodeText(self,node):
nodes = node.xpath("./node()")
text = ""
if len(nodes) == 0:
text = node.extract()
else:
for child in nodes:
text += self.getNodeText(child)
return text
|
996,527 | e6733431da5d0388e618cbff7299ef9162b6d598 | from newsapi.newsapi_client import NewsApiClient
import requests
import spotipy
import spotipy.oauth2 as oauth2
import random
import nltk
from nltk.corpus import words
## Retrieve top headlines and append to list.
def TopHeadlines():
news_URL = "https://newsapi.org/v2/top-headlines?country=gb&apiKey=108a2d7663e94e639f96a3a6c77d82f1"
open_page = requests.get(news_URL).json()
article = open_page["articles"]
headlines = []
for i in article:
headlines.append(i["title"])
for j in range (len(headlines)):
return headlines
Headlines = TopHeadlines()
## if word in list is longer than 5 characters
## then append to data
Headlines1 = "".join(Headlines)
processedData = ' '.join([w for w in Headlines1.split() if len(w)>5])
data = processedData.split()
## Download word dictionary
nltk.download('words')
## Check if word is in english dictionary and append to new list
newdata = []
for i in data:
if i in words.words():
newdata.append(i)
## Choose random word from list
Random_News_Word = random.choice(newdata)
print (Random_News_Word)
## Spotify Authentication
credentials = oauth2.SpotifyClientCredentials(
client_id="c367030cba0547c18102ec491320e635",
client_secret="3286485fbcca41f7b324a6602221755e")
token = credentials.get_access_token()
sp = spotipy.Spotify(auth=token)
## Search spotify for keyword extracted from headlines
##returns 20 tracks that are based on the 'Random_News_Word'
results = sp.search(q=Random_News_Word, limit=20)
for i, t in enumerate(results['tracks']['items']):
print (' ', i, t['name'])
|
996,528 | 55b99e100612857799c1f059b1d7ece5c34f4951 |
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('',TemplateView.as_view(template_name='panel/home.html'),name='home'),
path('dashboard/',TemplateView.as_view(template_name='panel/dashboard.html'),
name='dashboard'),
path('login/',auth_views.LoginView.as_view(template_name='panel/login.html'),
name='login'),
path('logout/',auth_views.LoginView.as_view(template_name='panel/logout.html'),
name='logout'),
path('changepass/',auth_views.PasswordChangeView.as_view(template_name='panel/changepass.html'),
name='changepass'),
]
|
996,529 | 209e087f851772b8fd197e2ad111f609eb802b0c | # 6-11. Cities: Make a dictionary called cities. Use the names of three cities as keys in your dictionary.
# Create a dictionary of information about each city and include the country that the city is in,
# its approximate population, and one fact about that city.
# The keys for each city’s dictionary should be something like country, population, and fact.
# Print the name of each city and all of the information you have stored about it.
cities = {
'Saltillo': {
'country': 'Mexico',
'population': 1045000,
'number of museums': '36',
},
'Monterrey': {
'country': 'Mexico',
'population': 2300000,
'number of museums': '18',
},
'Guadalajara': {
'country': 'Mexico',
'population': 1680285,
'number of museums': '23',
}
}
for city, city_info in cities.items():
country = city_info['country'].title()
population = city_info['population']
museums = city_info['number of museums'].title()
print("\n" + city.title() + " is in " + country + ".")
print(" It has a population of about " + str(population) + ".")
print(" This city has " + museums + " museums.") |
996,530 | bedf1c2a597f487ef67b08ab40b571ae9c3d15e0 | def z(x, y):
return (x-5.0) ** 2.0 + (y-5.0) ** 2.0 + 7.0
if __name__ == '__main__':
delta_z = 10.0
alpha = 0.01
x = 10.0
y = 10.0
nvz = 0.0
while (delta_z > 0.0001):
vz = z(x,y)
x = x - (alpha * (2.0 * x - 10.0))
y = y - (alpha * (2.0 * y - 10.0))
nvz = z(x,y)
delta_z = vz - nvz
print(delta_z)
print(x, y, nvz, delta_z) |
996,531 | 76b3086aceb75852bae1f2bf5ef3fa08a6c03b9a | import datetime
import pathlib
import re
from typing import List, NamedTuple
from matata.util import log, UserError
class TimeSheetEntry(NamedTuple):
date: datetime.date
start_time: datetime.time
end_time: datetime.time
def __repr__(self):
date_str = self.date.isoformat()
start_time = self.start_time.isoformat('minutes')
end_time = self.end_time.isoformat('minutes')
return f'TimeSheetEntry({date_str}, {start_time}, {end_time})'
class TimeSheet(NamedTuple):
entries: List[TimeSheetEntry]
def _parse_time(time_str):
match = re.fullmatch(
'(?P<hour>[0-9]+)'
'(\\.(?P<decihour>[0-9])|:(?P<minute>[0-9]{2}))?',
time_str)
if not match:
raise UserError(f'Invalid time specification: {time_str}')
hour = int(match.group('hour'))
decihour_str = match.group('decihour')
minute_str = match.group('minute')
if decihour_str:
minute = int(decihour_str) * 6
elif minute_str:
minute = int(minute_str)
else:
minute = 0
return datetime.time(hour, minute)
def read_time_sheet(path: pathlib.Path):
def iter_entries():
with path.open(encoding='utf-8') as file:
for i in file:
line, *_ = i.split('#', 1)
if line.strip():
date_str, *range_strs = line.split()
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
while range_strs:
if len(range_strs) < 2:
log(f'warning: Line contains an incomplete time range: {i}')
break
start_str, end_str, *range_strs = range_strs
yield TimeSheetEntry(date, _parse_time(start_str), _parse_time(end_str))
return TimeSheet(list(iter_entries()))
|
996,532 | 1b6a65e99a843d099e3f46f17539ea9e5dc7f163 | mylist=[10,2,3,4,4,4,3,3,3,56,7]
for i in range(len(mylist)//2):
mylist[i], mylist[len(mylist)-i-1] = mylist[len(mylist)-i-1], mylist[i]
print(mylist) |
996,533 | c5571f0e8214c1faf4ad40763daedde0af0a5941 | import pyximport
pyximport.install()
from collections import defaultdict
import string
import random
from deap import base, creator, tools
import matplotlib.pyplot as plt
import numpy
import itertools
from recurse_grid import recurse_grid_external
from recurse_grid import BOARD_SIZE
import matplotlib.animation as animation
import networkx
from boggle import *
from recurse_grid import ALL_WORD_TRIE, trie_member, TrieMembership
def update_trie_grid_fig(line, path, graph, graph_axes, current_word, membership):
x, y = zip(*list(map(word_coord_to_plot_coord, path)))
line.set_xdata(x)
line.set_ydata(y)
nodes =[0]
root = NODE_TRIE
for letter in current_word:
try:
nodes.append(root[letter]["num"])
root = root[letter]
except KeyError:
break
networkx.draw_networkx_nodes(graph, ax=graph_axes, pos=POS, node_size=450, node_color="w", nodelist=graph.nodes())
color = ""
if membership == TrieMembership.invalid:
color = "r"
elif membership == TrieMembership.word:
color = "g"
else:
color = "y"
line.set_color(color)
networkx.draw_networkx_nodes(graph, ax=graph_axes, pos=POS, node_size=450, node_color=color, nodelist=nodes)
plt.draw()
def update_trie_fig(_, line, graph, graph_axes, update_dict_generator):
path, current_word, membership = next(update_dict_generator)
return update_trie_grid_fig(line, path, graph, graph_axes, current_word, membership)
def recurse_grid_internal(grid, path, current_word, words_trie, found_words, debug=False):
# path should be empty on the initial call
if not path:
# This is the initial call to ensure that a search
# starts from each square in the grid.
for y, row in enumerate(grid):
for x, letter in enumerate(row):
for this in recurse_grid_internal(grid,
[(x, y)],
letter,
words_trie,
found_words,
debug):
yield tuple(this)
return
# path is a list of coordinates, so the last one
# in the list is our current position in the grid
current_position = path[-1]
# test to see how our word is contained in the word list
membership = trie_member(words_trie, current_word)
if debug:
yield (path, current_word, membership)
# We have found a new word from our list and
# should yield the current path and the word
if membership == TrieMembership.word and current_word not in found_words:
found_words.add(current_word)
if not debug:
yield (path, current_word)
# If it's not a full word, but a prefix to one or more words in the
# list, continue searching by moving to a neighbor
# and adding that letter to the current word.
if membership >= TrieMembership.prefix:
for nx, ny in neighbors(*current_position):
# the same square can only be used in each word once
if (nx, ny) not in path:
new_letter = grid[ny][nx]
# the Q cube in boggle has QU on it.
new_letter = new_letter if new_letter != 'q' else 'qu'
# add the letter on the newest cube to the current word.
new_word = current_word + new_letter
# if the new word is either a word or prefix,
# continue recursively searching from that new square.
for this in recurse_grid_internal(grid,
path + [(nx, ny)],
new_word,
words_trie,
found_words,
debug):
yield tuple(this)
if __name__ == "__main__":
generate_trie_gif("trie.gif")
|
996,534 | 95206da57b75b0ede0bbe48074f0551a7defe849 | """
Simple Pinkcoin p2p node implementation.
"""
from asyncio import open_connection, create_task, CancelledError
from .buffer import ProtocolBuffer
from .core.serializers import Version, VerAck, Pong
from .exceptions import NodeDisconnectException, InvalidMessageChecksum
class Node:
"""
The base class for a network node, this class
implements utility functions to create your own class.
:param ip: node ip address
:param port: node port to it binds to
"""
network_type = "main"
def __init__(self, ip: str, port):
self.node_ip = ip
self.node_port = port
# Peers connected to the node.
self.peers = {}
def send_message(self, peer_name, message):
"""
Serializes the message using the appropriate
serializer based on the message command
and sends it to the socket stream.
:param peer_name: Peer name
:param message: The message object to send
"""
try:
writer = self.peers[peer_name]["writer"]
writer.write(message.get_message(self.network_type))
except KeyError:
print(f"Error: Connection to {peer_name} doesn't exist.")
async def close_connection(self, peer_name):
"""
Closes TCP connection and ensures it's closed.
:param peer_name: Peer name
"""
try:
writer = self.peers[peer_name]["writer"]
reader = self.peers[peer_name]["reader"]
reader.feed_eof()
writer.close()
await writer.wait_closed()
del self.peers[peer_name]
except KeyError:
print(f"Error: Connection to {peer_name} doesn't exist.")
async def handle_message_header(self, peer_name, message_header, payload):
"""
Is called for every message before the
message payload deserialization.
:param peer_name: Peer name
:param message_header: The message header
:param payload: The payload of the message
"""
async def connect(self, peer_ip, peer_port):
"""
Creates TCP connection and spawns new
task handling communication.
:param peer_ip: Peer ip address
:param peer_port: Peer port
"""
peer_name = f"{peer_ip}:{peer_port}"
try:
reader, writer = await open_connection(peer_ip, peer_port)
self.peers[peer_name] = {
"reader": reader,
"writer": writer,
"buffer": ProtocolBuffer()
}
client_coro = create_task(self.connection_handler(peer_name))
await client_coro
except CancelledError:
print(f"Warning: Task handling connection to {peer_name} canceled.")
except NodeDisconnectException:
print(f"Warning: Peer {peer_name} disconnected")
await self.close_connection(peer_name)
except ConnectionError:
print(f"Error: connection error for peer {peer_name}")
async def connection_handler(self, peer_name):
"""
Handles connection to the node's peer.
"""
# Initialize communitaion.
self.handshake(peer_name)
try:
writer = self.peers[peer_name]["writer"]
# Enters connection read/write loop.
while not writer.is_closing():
await self.handle_message(peer_name)
except KeyError:
print(f"Error: Connection to {peer_name} doesn't exist.")
async def handle_message(self, peer_name):
"""
Handles one message received from the peer.
:param peer_name: Peer name
"""
try:
reader = self.peers[peer_name]["reader"]
buffer = self.peers[peer_name]["buffer"]
except KeyError:
print(f"Error: Connection to {peer_name} doesn't exist.")
return
data = await reader.read(1024*8)
if not data:
raise NodeDisconnectException(f"Node {peer_name} disconnected.")
buffer.write(data)
try:
message_header, message = buffer.receive_message()
except InvalidMessageChecksum as ex:
print(f"Warning: {ex} (node {peer_name}).")
return
if message_header is not None:
await self.handle_message_header(peer_name, message_header, data)
if not message:
return
# Executes proper message handler.
handle_func_name = "handle_" + message_header.command
handle_func = getattr(self, handle_func_name, None)
if handle_func and callable(handle_func):
await handle_func(peer_name, message_header, message)
def handshake(self, peer_name):
"""
Implements the handshake of a network
protocol. It sends the Version message.
:param peer_name: Peer name
"""
version = Version()
self.send_message(peer_name, version)
async def handle_version(self, peer_name, message_header, message):
#pylint: disable=unused-argument
"""
Handles the Version message and sends
a VerAck message when it receives the Version message.
:param peer_name: Peer name
:param message_header: The Version message header
:param message: The Version message
"""
verack = VerAck()
self.send_message(peer_name, verack)
async def handle_ping(self, peer_name, message_header, message):
#pylint: disable=unused-argument
"""
Handles the Ping message and answers every
Ping message with a Pong message using the nonce received.
:param peer_name: Peer name
:param message_header: The header of the Ping message
:param message: The Ping message
"""
pong = Pong()
pong.nonce = message.nonce
self.send_message(peer_name, pong)
|
996,535 | b39e5057e672a8f0d887b92f04c0685394956ab2 | #!/usr/bin/python
#-*- coding: utf-8 -*-
import os
import json
import rrdtool
import sys
from errno import EEXIST
from itertools import product
from math import ceil
from os import makedirs, remove
from os.path import abspath, dirname, expanduser, isdir, isfile, join
from time import sleep
PROJECT_ROOT = abspath(dirname(__file__))
def read_config():
data = {}
for name in ('default_config.json', 'config.json'):
full_name = join(PROJECT_ROOT, name)
try:
with open(full_name) as fd:
data.update(json.load(fd))
except IOError:
pass
assert data
return data
def write_config(data):
with open(join(PROJECT_ROOT, 'config.json'), 'w') as fd:
json.dump(data, fd, sort_keys = True, indent = 4, separators = (',', ': '))
def expand_path(path):
path = expanduser(path)
if not path.startswith('/'):
path = join(PROJECT_ROOT, path)
return path
def mkdirp(path):
try:
makedirs(path)
except OSError as e:
if e.errno == EEXIST and isdir(path):
pass
else:
raise
def sensor_graph_file(sensor, start):
config = read_config()
return join(expand_path(config['graphs_path']), '{0}-{1}.png'.format(sensor,
start.replace('-', '')))
def do_graph(graph, sensor, database, start):
result = rrdtool.graph(str(graph),
'--imgformat', 'PNG',
'--font', 'DEFAULT:0:/usr/share/fonts/truetype/ttf-dejavu/DejaVuSans.ttf',
'--color', 'BACK#FFFFFFFF',
'--color', 'CANVAS#FFFFFF00',
'--border', '0',
'--width', '775',
'--height', '279',
'--slope-mode',
'--start', str(start),
'--vertical-label', 'Temperature °C',
'--title', str(sensor),
'--lower-limit', '0',
'DEF:T=%s:t:AVERAGE' % str(database),
'LINE1:T#FF3300:Temperature \t',
'GPRINT:T:MAX:Max\: %6.2lf°C\t',
'GPRINT:T:MIN:Min\: %6.2lf°C\t',
'GPRINT:T:AVERAGE:Average\: %6.2lf°C\t',
'GPRINT:T:LAST:Current\: %6.2lf°C\c',
)
def generate_rrdtool_create_parameters():
SECONDS_IN_MINUTE = 60
SECONDS_IN_HOUR = 3600
HOURS_IN_DAY = 24
SECONDS_IN_DAY = SECONDS_IN_HOUR * HOURS_IN_DAY
rra_in = (
(SECONDS_IN_MINUTE, SECONDS_IN_HOUR),
(SECONDS_IN_MINUTE, SECONDS_IN_DAY),
(SECONDS_IN_HOUR, SECONDS_IN_DAY * 7),
(SECONDS_IN_HOUR * 3, SECONDS_IN_DAY * 100),
)
rra = []
unit_seconds = 60
parameters = [
'-s %d' % (unit_seconds,),
'DS:t:GAUGE:600:U:U',
]
for (slot_seconds, keep_slot_for_seconds), type in product(rra_in, ('AVERAGE', 'MIN', 'MAX')):
slot_units = int(ceil(slot_seconds * 1.0 / unit_seconds))
keep_slot_for_units = int(ceil(keep_slot_for_seconds * 1.0 / unit_seconds))
parameters.append('RRA:%s:0.5:%d:%d' % (type, slot_units, keep_slot_for_units,))
return parameters
def main():
handle_temperatures(get_temperatures())
def handle_temperatures(temperatures):
for sensor, temperature in temperatures.items():
handle_sensor_value(sensor, temperature)
def handle_sensor_value(sensor, value):
config = read_config()
config['sensor_names'].setdefault(sensor, sensor)
write_config(config)
name = config['sensor_names'][sensor]
print "sensor %s (%s) - %s C" % (config['sensor_names'][sensor], sensor, value)
save_sensor_value(sensor, value)
generate_graphs_for_sensor(sensor)
def save_sensor_value(sensor, value):
database = rrd_path(sensor)
mkdirp(dirname(database))
if not isfile(database):
params = generate_rrdtool_create_parameters()
rrdtool.create(str(database), *params)
rrdtool.update(str(database), 'N:%s' % (value,))
def rrd_path(sensor):
config = read_config()
return join(expand_path(config['databases_path']), "%s.rrd" % sensor)
def generate_graphs_for_sensor(sensor):
config = read_config()
name = config['sensor_names'][sensor]
database = rrd_path(sensor)
for start in config['graphs']:
path = sensor_graph_file(name, start)
mkdirp(dirname(path))
do_graph(sensor_graph_file(name, start), sensor, database, start)
def get_temperatures():
config = read_config()
digitemp_config = join(PROJECT_ROOT, 'digitemp.conf')
output = os.popen("digitemp_DS9097 -c {conf} -s {port} -a -q -o '%R %.2C' -i".format(
conf = digitemp_config, port = config['port'])).readlines()
remove(digitemp_config)
lines = [line.split() for line in output]
lines = [line for line in lines if len(line) == 2]
temperatures = dict((sensor, float(temperature)) for (sensor, temperature) in lines)
return temperatures
if __name__ == '__main__':
period = int(sys.argv[1]) if len(sys.argv) > 1 else 0
main()
while period:
sleep(period)
main()
|
996,536 | 19f6a6f1cb6921fe62d92975a84e3350cf184a1f | #-----------------------------------------------------------------------------
# Runtime: 48ms
# Memory Usage:
# Link:
#-----------------------------------------------------------------------------
import sys
sys.path.append('LeetCode')
from ListNode import ListNode
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
if head is None or k == 1:
return head
dummy_head = ListNode(-1)
dummy_head.next = head
p1, p2, p3 = dummy_head, head, head
while p2 is not None and p3 is not None:
for _ in range(k):
if p3 is None:
return dummy_head.next
p3 = p3.next
for _ in range(k):
temp_next = p2.next
p2.next = p3
p3 = p2
p2 = temp_next
p1.next = p3
for _ in range(k):
p1 = p1.next
p2 = p1.next
p3 = p1.next
return dummy_head.next
|
996,537 | 9bf9558801deb3e6b85e35be1aa96ab1345755ae | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-03 21:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dosirak', '0007_auto_20160503_2146'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='choice_text1',
field=models.CharField(default=b'\xeb\xa7\x9b', max_length=201),
),
]
|
996,538 | d349dd191613ecd0dc37cba72879cecc3420b72f | #!/usr/bin/python
if __name__ == "__main__":
import sys
import cv2
import time
import logging
import ctypes
import numpy as np
from logging.config import dictConfig
from openni import openni2
from openni import _openni2 as c_api
else:
from .Common import *
logging_config = dict(
version = 1,
disable_existing_loggers = False,
formatters = {
'simple': {'format':
'%(asctime)s [%(levelname)s] [%(name)s] %(message)s'}
},
handlers = {
'console': {'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': logging.DEBUG}
},
root = {
'handlers': ['console'],
'level': logging.DEBUG,
},
)
dictConfig(logging_config)
logger = logging.getLogger('Device')
'''
TODO:
QT config interface
QT device finder/selector
Multiple device viewer
OpenCV image in QT interface
PCL integration
QT GL 3d viewer
QT VTK 3d viewer
'''
'''
OpenNI Options:
IMAGE_REGISTRATION_DEPTH_TO_COLOR
IMAGE_REGISTRATION_OFF
Sensor Options:
SENSOR_COLOR
SENSOR_DEPTH
SENSOR_IR
Pixel Format Options:
PIXEL_FORMAT_DEPTH_100_UM
PIXEL_FORMAT_DEPTH_1_MM
PIXEL_FORMAT_GRAY16
PIXEL_FORMAT_GRAY8
PIXEL_FORMAT_JPEG
PIXEL_FORMAT_RGB888
PIXEL_FORMAT_SHIFT_9_2
PIXEL_FORMAT_SHIFT_9_3
PIXEL_FORMAT_YUV422
PIXEL_FORMAT_YUYV
OpenNI Functions:
configure_logging(directory=None, severity=None, console=None)
convert_depth_to_color(depthStream, colorStream, depthX, depthY, depthZ)
convert_depth_to_world(depthStream, depthX, depthY, depthZ)
convert_world_to_depth(depthStream, worldX, worldY, worldZ)
get_bytes_per_pixel(format)
get_log_filename()
get_version()
initialize(dll_directories=['.'])
is_initialized()
unload()
wait_for_any_stream(streams, timeout=None)
OpenNI.device functions:
get_device_info()
get_sensor_info(SENSOR_TYPE)
has_sensor(SENSOR_TYPE)
create_stream(SENSOR_TYPE)
is_image_registration_mode_supported(True/False)
get_image_registration_mode()
set_image_registration_mode(True/False)
.depth_color_sync
'''
STREAM_NAMES = {1: "ir", 2: "color", 3: "depth"}
def openni_init(path="."):
if path is None:
path = "."
if path:
if not "Redist" in path:
if "linux" in sys.platform:
path = path.rstrip('/') + "/Redist"
elif "win32" in sys.platform:
path = path.rstrip('\\') + "\\Redist"
try:
if (not openni2.is_initialized()):
logger.info("OpenNi2 is not Initialized! Initializing.")
openni2.initialize(path)
return True
except Exception as e:
logger.error(e)
logger.warning("Openni path is: " + path)
try:
logger.warning("Resorting to standard openni2 initialization")
openni2.initialize()
return True
except Exception as e:
logger.fatal(e)
return False
def openni_list(path=""):
openni_init(path)
pdevs = ctypes.POINTER(c_api.OniDeviceInfo)()
count = ctypes.c_int()
c_api.oniGetDeviceList(ctypes.byref(pdevs), ctypes.byref(count))
devices = [(pdevs[i].uri, pdevs[i].vendor, pdevs[i].name) for i in range(count.value)]
c_api.oniReleaseDeviceList(pdevs)
return devices
class VideoMode():
def __init__(self, oniVideoMode=None, pixelFormat=None, resolutionX=None, resolutionY=None, fps=None):
self._set_video_mode(oniVideoMode=oniVideoMode, pixelFormat=pixelFormat, resolutionX=resolutionX, resolutionY=resolutionY, fps=fps)
def _set_video_mode(self, oniVideoMode = None, pixelFormat=None, resolutionX=None, resolutionY=None, fps=None):
if oniVideoMode is not None:
fps = oniVideoMode.fps
pixelFormat = oniVideoMode.pixelFormat
resolutionX = oniVideoMode.resolutionX
resolutionY = oniVideoMode.resolutionY
self.fps = fps
self.pixelFormat = pixelFormat
self.resolutionX = resolutionX
self.resolutionY = resolutionY
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return ('{} {}:{}@{}'.format(self.pixelFormat, self.resolutionX, self.resolutionY, self.fps))
def __repr__(self):
return ('{} {}:{}@{}'.format(repr(self.pixelFormat), repr(self.resolutionX), repr(self.resolutionY), repr(self.fps)))
@ property
def video_mode(self):
return openni2.VideoMode(pixelFormat = self.pixelFormat, resolutionX = self.resolutionX, resolutionY = self.resolutionY, fps = self.fps)
@ video_mode.setter
def video_mode(self, method):
self._set_video_mode(oniVideoMode=method)
return self.video_mode
class OpenNIStream(openni2.VideoStream):
#TODO: Handle different cameras (Kinect vs Astra)
def __init__(self, device, sensor_type):
openni2.VideoStream.__init__(self, device, sensor_type)
self.sensor_type = sensor_type
self.active = False
self.ctype = None
self.frame = None
self.frame_data = None
self.video_modes = list()
for mode in self.get_sensor_info().videoModes:
video_mode = VideoMode(mode)
self.video_modes.append(video_mode)
#self.camera
#self.settings = openni2.CameraSettings(self)
def _set_video_mode(self, video_mode=None):
if video_mode is None:
logger.debug('Setting video mode to default')
if self.default_video_mode is not None:
video_mode = self.default_video_mode
else:
video_mode = self.video_modes[0]
try:
if video_mode in self.video_modes:
self.set_video_mode(video_mode.video_mode)
else:
raise(Exception('Video Mode not valid for {}\n{}'.format(self.sensor_type, video_mode)))
except Exception as e:
logger.error(e)
return False
finally:
self.settings = openni2.CameraSettings(self)
self.fov = (self.get_horizontal_fov(), self.get_vertical_fov())
return True
def _getData(self, ctype = None):
try:
ctype = ctype if (ctype) else self.ctype
self.frame = self.read_frame()
self.height = self.frame.height
self.width = self.frame.width
frame_data_buffer = self.frame.get_buffer_as(ctype)
if (ctype is ctypes.c_ubyte):
dtype = np.uint8
else:
# FIXME: Handle this better? map pixelFormat to np/ctype?
dtype = np.uint16
#self.frame_data = np.ndarray((self.height,self.width),dtype=dtype,buffer=frame_data_buffer)
self.frame_data = np.frombuffer(frame_data_buffer, dtype=dtype)
except Exception as e:
logger.error(e)
return False
finally:
return self.frame_data
class OpenNIStream_Color(OpenNIStream):
def __init__(self, device):
self.default_video_mode = VideoMode(pixelFormat=openni2.PIXEL_FORMAT_RGB888, resolutionX=640, resolutionY=480, fps=30)
OpenNIStream.__init__(self, device, openni2.SENSOR_COLOR)
self.ctype = ctypes.c_uint8
def getData(self, ctype = None):
self._getData(ctype)
self.frame_data.shape = (self.height, self.width, 3) #reshape
# self.frame_data = self.frame_data[...,::-1] #BGR to RGB
return self.frame_data
class OpenNIStream_Depth(OpenNIStream):
def __init__(self, device):
self.default_video_mode = VideoMode(pixelFormat=openni2.PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640, resolutionY=480, fps=30)
OpenNIStream.__init__(self, device, openni2.SENSOR_DEPTH)
self.ctype = ctypes.c_uint16
def getData(self, ctype = None):
self._getData(ctype)
self.frame_data.shape = (self.height, self.width)
return self.frame_data
class OpenNIStream_IR(OpenNIStream):
def __init__(self, device):
self.default_video_mode = VideoMode(pixelFormat=openni2.PIXEL_FORMAT_GRAY16, resolutionX=640, resolutionY=480, fps=30)
OpenNIStream.__init__(self, device, openni2.SENSOR_IR)
self.ctype = ctypes.c_uint16
def getData(self, ctype = None):
self._getData(ctype)
self.frame_data.shape = (self.height, self.width) #reshape
return self.frame_data
class OpenNIDevice(openni2.Device):
def __init__(self, uri=None, mode=None, path=None):
openni_init(path)
openni2.configure_logging(severity=0, console=False)
openni2.Device.__init__(self, uri)
self.serial = self.get_property(c_api.ONI_DEVICE_PROPERTY_SERIAL_NUMBER, (ctypes.c_char * 16)).value
self.stream = {'color': OpenNIStream_Color(self),
'depth': OpenNIStream_Depth(self),
'ir': OpenNIStream_IR(self)}
def stop(self):
for s in self.stream:
self.stream[s].stop()
#openni2.unload()
def open_stream(self, stream_type, width=None, height=None, fps=None, pixelFormat=None):
try:
if (not self.has_sensor(stream_type)):
logger.error("Device does not have stream type of {}".format(stream_type))
return False
stream_name = STREAM_NAMES[stream_type.value]
if self.stream[stream_name].active:
raise(Exception("{} stream already active!".format(stream_name)))
self.stream[stream_name].start()
# TODO: No feedback if stream start was successful, evaluate?
video_mode = None
if width is not None and height is not None and fps is not None and pixelFormat is not None:
video_mode = openni2.VideoMode(pixelFormat=pixelFormat, resolutionX=width, resolutionY=height, fps=fps)
self.stream[stream_name]._set_video_mode(video_mode)
self.stream[stream_name].active = True
except Exception as e:
logger.error('Failed to open stream', exc_info=True)
return False
return True
def open_stream_color(self, width=None, height=None, fps=None, pixelFormat=None):
return self.open_stream(openni2.SENSOR_COLOR, width, height, fps, pixelFormat)
def open_stream_depth(self, width=None, height=None, fps=None, pixelFormat=None):
return self.open_stream(openni2.SENSOR_DEPTH, width, height, fps, pixelFormat)
def open_stream_ir(self, width=None, height=None, fps=None, pixelFormat=None):
return self.open_stream(openni2.SENSOR_IR, width, height, fps, pixelFormat)
def get_frame(self, stream_type):
'''
frame_type = SENSOR_IR, SENSOR_COLOR, SENSOR_DEPTH
'''
try:
if (not self.has_sensor(stream_type)):
raise(Exception("Device does not have stream type of {}".format(stream_type)))
stream_name = STREAM_NAMES[stream_type.value]
if not self.stream[stream_name].active:
raise(Exception("{} stream not active!".format(stream_name)))
return self.stream[stream_name].getData()
except Exception as e:
logger.error("Failed to get frame", exc_info=True)
return False
def get_frame_color(self):
return self.get_frame(openni2.SENSOR_COLOR)
def get_frame_depth(self):
return self.get_frame(openni2.SENSOR_DEPTH)
def get_frame_ir(self):
return self.get_frame(openni2.SENSOR_IR)
if __name__ == "__main__":
device = OpenNIDevice()
# FIXME: IR doesn't work?
# Seems to be an OpenNI driver problem
# Corrupt frame buffer
show_depth = False
show_color = True
show_ir = False
if show_depth:
device.open_stream_depth()
cv2.namedWindow("Depth Image")
if show_color:
device.open_stream_color()
cv2.namedWindow("Color Image")
if show_ir:
device.open_stream_ir()
cv2.namedWindow("IR Image")
if not show_ir and not show_color and not show_depth:
logger.error("No streams enabled! Exiting")
sys.exit(0)
while True:
if show_depth:
depth_img = device.get_frame_depth()
depth_img = cv2.convertScaleAbs(depth_img, alpha=(255.0/65535.0))
cv2.imshow("Depth Image", depth_img)
if show_color:
color_img = device.get_frame_color()
color_img = color_img[...,::-1] #BGR to RGB
cv2.imshow("Color Image", color_img)
if show_ir:
ir_img = device.get_frame_ir()
ir_img = cv2.convertScaleAbs(ir_img, alpha=(255.0/65535.0))
cv2.imshow("IR Image", ir_img)
key = cv2.waitKey(1) & 0xFF
if (key == 27 or key == ord('q') or key == ord('x') or key == ord("c")):
device.stop()
break
openni2.unload()
cv2.destroyAllWindows()
|
996,539 | 414ce8da4727c87a49c83729510f1d87cc631f3f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: wj
"""
import tensorflow as tf
import glob
import os
import modeling
import tokenization
import optimization
import collections
import pickle
from sklearn.metrics import classification_report
import re
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids,segment_ids, label_ids,input_mask, output_mask):
self.input_ids=input_ids
self.segment_ids=segment_ids
self.label_ids=label_ids
self.input_mask=input_mask
self.output_mask=output_mask
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def get_labels(data_dir):
labels_file=os.path.join(data_dir,'labels.txt')
if os.path.isfile(labels_file):
tf.logging.info("Read labels from '{}'".format(labels_file))
labels=[]
with open(labels_file,'r') as f:
for line in f:
line=line.strip()
if line:
labels.append(line)
return labels
files=glob.glob(os.path.join(data_dir,'*_train.txt'))
labels=set(['o','x','<PAD>'])
for file in files:
label=file.split("/")[-1].split('_')[0]
labels.add(label)
with open(file,'r') as f:
for line in f:
for slot_name in re.findall("<([a-zA-Z]+)>",line):
labels.add("B_"+slot_name)
labels.add("M_"+slot_name)
labels=sorted(list(labels))
with open(labels_file,'w') as f:
for line in labels:
f.write(line+'\n')
return labels
def get_train_examples(raw_data_dir,data_mode):
samples=[]
num=0
files=glob.glob(os.path.join(raw_data_dir,"*_"+data_mode+".txt"))
for file in files:
label=file.split("/")[-1].split('_')[0]
with open(file,'r') as f:
for line in f:
if not line:
continue
samples.append(InputExample(guid=num,text_a=line,label=label))
return samples
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,labels, output_mask,num_labels):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False)
output_layer = model.get_sequence_output() #[batch_size, seq_length, hidden_size]
hidden_size = output_layer.shape[-1].value
#batch_size= output_layer.shape[0].value
seq_length= output_layer.shape[1].value
output_weights = tf.get_variable("output_weights", [num_labels, hidden_size],initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable("output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
output_layer=tf.reshape(output_layer,shape=[-1,hidden_size])
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits=tf.reshape(logits,shape=[-1,seq_length,num_labels])
seq_length_list=tf.reduce_sum(input_mask,axis=-1)
with tf.variable_scope("cft_loss"):
transition_parameters=tf.get_variable("transitions",shape=[num_labels, num_labels],initializer=tf.contrib.layers.xavier_initializer())
log_likelihood, _= tf.contrib.crf.crf_log_likelihood(logits, labels,seq_length_list,transition_params=transition_parameters)
loss = tf.reduce_mean(-log_likelihood,name="loss")
decode_tags,best_scores=tf.contrib.crf.crf_decode(logits,transition_parameters,seq_length_list)
pre_label=tf.cast(decode_tags,dtype=tf.int32)*tf.cast(output_mask,dtype=tf.int32)
true_label=tf.cast(labels,dtype=tf.int32)*tf.cast(output_mask,dtype=tf.int32)
sentence_level_acc=tf.reduce_sum(tf.cast(tf.equal(pre_label,true_label),dtype=tf.int32),axis=-1)
#sentence_acc=tf.equal(accuracy,tf.constant([accuracy.shape[-1].value]*accuracy.shape[0].value))
per_example_loss=-log_likelihood
probabilities=tf.nn.softmax(logits,axis=-1)
return (loss, per_example_loss, logits, probabilities,sentence_level_acc)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,sentence_acc_array=None):
def model_fn(features, labels, mode):
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
output_mask=features['output_mask']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities,sentence_level_acc) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,output_mask,num_labels)
batch_size=logits.shape[0].value
seq_length=logits.shape[1].value
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.logging.info("**** Trainable Variables ****")
init_num=0
for var in tvars:
#init_string = ""
if var.name in initialized_variable_names:
init_num+=1
#init_string = ", *INIT_FROM_CKPT*"
#tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,init_string)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("init from checkpoint done!var num:{}".format(init_num))
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, False)
output_spec=tf.estimator.EstimatorSpec(mode=mode,loss=total_loss,train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
print(sentence_level_acc)
accuracy = tf.metrics.accuracy(sentence_level_acc,sentence_acc_array)
loss = tf.metrics.mean(per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(mode=mode,loss=total_loss,eval_metric_ops=eval_metrics)
else:
pre_label=tf.argmax(probabilities,axis=-1,name='label_prediction')#batch_size,seq_length,num_labels
predictions={'pre_label':pre_label,'real_label':label_ids}
output_spec = tf.estimator.EstimatorSpec(mode=mode,loss=total_loss,predictions=predictions)
return output_spec
return model_fn
def convert_single_example(ex_index, example, label_list, max_seq_length,tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
text_a=example.text_a
labels_a=[]
text_a=re.split("(<[a-zA-Z]+>[^<>]+</[a-zA-Z]+>)",text_a)
tokens_a=[]
for sub_text in text_a:
if len(sub_text.strip())<1:
continue
elif re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text):
re_res=re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text)
slot_name=re_res.group(1)
slot_value=re_res.group(2)
slot_value=tokenizer.tokenize(slot_value)
slot_labels=[]
for i,s in enumerate(slot_value):
if i==0:
slot_labels.append("B_"+slot_name)
elif re.search("^##",s):
slot_labels.append("x")
else:
slot_labels.append("M_"+slot_name)
tokens_a.extend(slot_value)
labels_a.extend(slot_labels)
else:
sub_text=tokenizer.tokenize(sub_text)
sub_labels=['x' if re.search("^##",i) else 'o' for i in sub_text]
tokens_a.extend(sub_text)
labels_a.extend(sub_labels)
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
labels=[example.label]
for label in labels_a:
labels.append(label)
labels.append('o')
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
output_mask=[1 if i!='x' else 0 for i in labels]
label_ids=[label_map[i] for i in labels]
while len(input_ids) < max_seq_length:
input_ids.append(0)
segment_ids.append(0)
input_mask.append(0)
output_mask.append(0)
label_ids.append(label_map['<PAD>'])
assert len(input_ids)==max_seq_length
assert len(segment_ids)==max_seq_length
assert len(label_ids)==max_seq_length
assert len(input_mask)==max_seq_length
assert len(output_mask)==max_seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(tokens))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("labels: %s" % " ".join([str(x) for x in labels]))
tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
tf.logging.info("output_mask: %s" % " ".join([str(x) for x in output_mask]))
feature = InputFeatures(
input_ids=input_ids,
segment_ids=segment_ids,
label_ids=label_ids,
input_mask=input_mask,
output_mask=output_mask)
return feature
def file_based_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,max_seq_length, tokenizer)
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
features["output_mask"] = create_int_feature(feature.output_mask)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(input_file, seq_length, is_training,drop_remainder,batch_size,sample_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
"output_mask": tf.FixedLenFeature([seq_length], tf.int64),
}
def input_fn():
"""The actual input function."""
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=sample_length)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: tf.parse_single_example(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def main():
tf.logging.set_verbosity(tf.logging.INFO)
output_dir='./out'
data_dir='./data'
bert_config_file='./chinese_L-12_H-768_A-12/bert_config.json'
vocab_file='./chinese_L-12_H-768_A-12/vocab.txt'
do_lower_case=True
log_steps=200
do_train=True
do_eval=True
do_predict=False
train_batch_size=8
eval_batch_size=32
test_batch_size=32
num_train_epochs=2
warmup_proportion=0.1
init_checkpoint='./chinese_L-12_H-768_A-12/bert_model.ckpt'
eval_checkpoint='./out/model.ckpt-3668'
learning_rate=5e-5
max_seq_length=64
output_predict_file='./predict.pkl'
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
tf.gfile.MakeDirs(output_dir)
label_list=get_labels(data_dir)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
session_config.gpu_options.allow_growth = True
run_config=tf.estimator.RunConfig(model_dir=output_dir,log_step_count_steps=log_steps,session_config=session_config)
train_examples = None
num_train_steps = None
num_warmup_steps = None
if do_train:
train_examples = get_train_examples(data_dir,'train')
num_train_steps = int(len(train_examples) / train_batch_size * num_train_epochs)
num_warmup_steps = int(num_train_steps * warmup_proportion)
sentence_acc_array=[max_seq_length]*eval_batch_size
model_fn=model_fn_builder(bert_config, len(label_list), init_checkpoint, learning_rate,num_train_steps, num_warmup_steps,sentence_acc_array=sentence_acc_array)
estimator=tf.estimator.Estimator(model_fn=model_fn,model_dir=output_dir,config=run_config)
if do_train:
train_file = os.path.join(output_dir, "train.tf_record")
file_based_convert_examples_to_features(train_examples, label_list, max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d",train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=max_seq_length,
is_training=True,
drop_remainder=True,
batch_size=train_batch_size,
sample_length=len(train_examples))
estimator.train(input_fn=train_input_fn, steps=num_train_steps)
if do_eval:
eval_examples = get_train_examples(data_dir,'test')
num_eval_steps=int(len(eval_examples)/eval_batch_size)
eval_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running eval *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d",eval_batch_size)
tf.logging.info(" Num steps = %d", num_eval_steps)
eval_input_fn=file_based_input_fn_builder(
input_file=eval_file,
seq_length=max_seq_length,
is_training=False,
drop_remainder=False,
batch_size=eval_batch_size,
sample_length=len(eval_examples))
result=estimator.evaluate(input_fn=eval_input_fn,steps=num_eval_steps)#,checkpoint_path=eval_checkpoint)
output_eval_file = os.path.join('./', "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if do_predict:
test_examples = get_train_examples(data_dir,'test')
test_file = os.path.join(output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
test_examples, label_list, max_seq_length, tokenizer, test_file)
tf.logging.info("***** Running test *****")
tf.logging.info(" Num examples = %d", len(test_examples))
tf.logging.info(" Batch size = %d",test_batch_size)
test_input_fn=file_based_input_fn_builder(
input_file=test_file,
seq_length=max_seq_length,
is_training=False,
drop_remainder=False,
batch_size=test_batch_size)
result=estimator.predict(test_input_fn)
result=[i for i in result]
true=[i['real_label'] for i in result]
false=[i['pre_label'] for i in result]
with open(output_predict_file,'wb') as f:
pickle.dump(result,f,-1)
with open("test_tmp.txt",'w') as f:
res=classification_report(true,false)
print(res)
f.write(res)
#with tf.gfile.GFile(output_predict_file, "w") as writer:
# tf.logging.info("***** Predict results *****")
# for prediction in result:
# output_line = "\t".join(str(class_probability) for class_probability in prediction) + "\n"
# writer.write(output_line)
if __name__=="__main__":
#vocab_file='./chinese_L-12_H-768_A-12/vocab.txt'
#do_lower_case=True
#tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
#label_list=get_labels('./data')
#example=InputExample(1,'hello newyork <artist>周杰</artist>,<song>king alen</song>小!',None,'music')
#convert_single_example(1, example, label_list, 64,tokenizer)
main()
|
996,540 | ea7ccf07e43f37688b96b5257674500154a71d35 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.main, name='main'),
path('upload/', views.upload, name ='upload'),
path('upload_DB/', views.upload_DB, name ='upload_DB'),
path('word/', views.word, name ='word'),
path('test/', views.test, name= 'test'),
path('check/',views.check,name ='check')
]
|
996,541 | 23432934cedd284859c002645447e646b160422f | """Crimetime test file
CPE 101
Spring 2020
Author: Brenden Rogers
"""
import crimetime
import unittest
class MyTest(unittest.TestCase):
"""To test crimetime files"""
def test_create_crimes(self):
# c_1 = ['13456', 'ROBBERY', 'BODILY FORCE']
# c_2 = ['13456', 'ROBBERY', 'ARMED ROBBERY']
# c_3 = ['23145', 'THEFT', 'LOOTING']
# c_4 = ['98635', 'MURDER', 'STABBING']
# c_5 = ['54313', 'ROBBERY', 'BODILY FORCE']
# c_6 = ['63527', 'VANDALISM', 'GRAFFITI']
# c_7 = ['72514', 'ROBBERY', 'ARMED']
# crime_lines = [c_1, c_2, c_3, c_4, c_5, c_6, c_7]
# rob_1 = crimetime.Crime('13456', 'ROBBERY')
# rob_2 = crimetime.Crime('54313', 'ROBBERY')
# rob_3 = crimetime.Crime('72514', 'ROBBERY')
# lst_of_crime = [rob_1, rob_2, rob_3]
# test_val = crimetime.create_crimes(crime_lines)
# expected_val = lst_of_crime
# self.assertEqual(test_val, expected_val)
c_1 = ['13456', 'ROBBERY', 'BODILY FORCE']
c_2 = ['13456', 'ROBBERY', 'ARMED ROBBERY']
c_3 = ['23145', 'THEFT', 'LOOTING']
c_4 = ['98635', 'MURDER', 'STABBING']
c_5 = ['54313', 'ROBBERY', 'BODILY FORCE']
c_6 = ['63527', 'VANDALISM', 'GRAFFITI']
c_7 = ['72514', 'ROBBERY', 'ARMED']
crime_lines = [c_1, c_2, c_3, c_4, c_5, c_6, c_7]
rob_1 = partner2.Crime('13456', 'ROBBERY')
rob_2 = partner2.Crime('54313', 'ROBBERY')
rob_3 = partner2.Crime('72514', 'ROBBERY')
lst_of_crime = [rob_1, rob_2, rob_3]
test_val = partner2.create_crimes(crime_lines)
expected_val = lst_of_crime
self.assertEqual(test_val, expected_val)
def test_sort_crimes(self):
# c_1 = crimetime.Crime('1', 'ROBBERY')
# c_2 = crimetime.Crime('2', 'ROBBERY')
# c_3 = crimetime.Crime('3', 'ROBBERY')
# c_4 = crimetime.Crime('4', 'ROBBERY')
# c_5 = crimetime.Crime('5', 'ROBBERY')
# c_6 = crimetime.Crime('6', 'ROBBERY')
# c_7 = crimetime.Crime('7', 'ROBBERY')
# c_8 = crimetime.Crime('8', 'ROBBERY')
# c_9 = crimetime.Crime('9', 'ROBBERY')
# c_10 = crimetime.Crime('10', 'ROBBERY')
# unsorted = [c_4, c_3, c_5, c_1, c_2]
# unsorted_2 = [c_7, c_9, c_6, c_10, c_8]
# test_case1 = crimetime.sort_crimes(unsorted)
# test_case2 = crimetime.sort_crimes(unsorted_2)
# expected1 = [crimetime.Crime('1', 'ROBBERY'), crimetime.Crime('2', 'ROBBERY'), crimetime.Crime('3', 'ROBBERY'),
# crimetime.Crime('4', 'ROBBERY'), crimetime.Crime('5', 'ROBBERY')]
# expected2 = [crimetime.Crime('6', 'ROBBERY'), crimetime.Crime('7', 'ROBBERY'), crimetime.Crime('8', 'ROBBERY'),
# crimetime.Crime('9', 'ROBBERY'), crimetime.Crime('10', 'ROBBERY')]
# self.assertEqual(test_case1, expected1)
# self.assertEqual(test_case2, expected2)
c_1 = partner1.Crime('1', 'ROBBERY')
c_2 = partner1.Crime('2', 'ROBBERY')
c_3 = partner1.Crime('3', 'ROBBERY')
c_4 = partner1.Crime('4', 'ROBBERY')
c_5 = partner1.Crime('5', 'ROBBERY')
c_6 = partner1.Crime('6', 'ROBBERY')
c_7 = partner1.Crime('7', 'ROBBERY')
c_8 = partner1.Crime('8', 'ROBBERY')
c_9 = partner1.Crime('9', 'ROBBERY')
c_10 = partner1.Crime('10', 'ROBBERY')
unsorted = [c_4, c_3, c_5, c_1, c_2]
unsorted_2 = [c_7, c_9, c_6, c_10, c_8]
test_case1 = partner1.sort_crimes(unsorted)
test_case2 = partner1.sort_crimes(unsorted_2)
expected1 = [partner1.Crime('1', 'ROBBERY'), partner1.Crime('2', 'ROBBERY'), partner1.Crime('3', 'ROBBERY'),
partner1.Crime('4', 'ROBBERY'), partner1.Crime('5', 'ROBBERY')]
expected2 = [partner1.Crime('6', 'ROBBERY'), partner1.Crime('7', 'ROBBERY'), partner1.Crime('8', 'ROBBERY'),
partner1.Crime('9', 'ROBBERY'), partner1.Crime('10', 'ROBBERY')]
self.assertEqual(test_case1, expected1)
self.assertEqual(test_case2, expected2)
def test_set_crimetime(self):
c1 = partner1.Crime('1234', 'ROBBERY')
partner1.set_crimetime(c1, 'Wednesday', 7, 17)
c2 = partner1.Crime('1745', 'ROBBERY')
c3 = partner1.Crime('4725', 'ROBBERY')
c4 = partner1.Crime('1234', 'ROBBERY')
c4.day_of_week = 'Wednesday'
c4.month = 'July'
c4.hour = '5PM'
c5 = partner1.Crime('1745', 'ROBBERY')
c6 = partner1.Crime('4725', 'ROBBERY')
self.assertEqual(c1, c4)
self.assertEqual(c2, c5)
self.assertEqual(c3, c6)
def test_update_crimes(self):
crimes = [partner1.Crime('1', 'ROBBERY'), partner1.Crime('2', 'ROBBERY'), partner1.Crime('3', 'ROBBERY')]
times = [['1', 'Tuesday', '01/23/16', '23:21'], ['2', 'Friday', '06/15/01', '13:26'],
['3', 'Wednesday', '12/25/16', '23:56']]
updated = partner1.update_crimes(crimes, times)
c1 = partner1.Crime('1', 'ROBBERY')
c2 = partner1.Crime('2', 'ROBBERY')
c3 = partner1.Crime('3', 'ROBBERY')
c1.day_of_week = 'Tuesday'
c1.month = 'January'
c1.hour = '11PM'
c2.day_of_week = 'Friday'
c2.month = 'June'
c2.hour = '1PM'
c3.day_of_week = 'Wednesday'
c3.month = 'December'
c3.hour = '11PM'
expected = [c1, c2, c3]
self.assertEqual(updated, expected)
def test_find_crime(self):
crimes = [partner1.Crime('1', 'ROBBERY'), partner1.Crime('2', 'ROBBERY'), partner1.Crime('3', 'ROBBERY')]
self.assertEqual(partner1.find_crime(crimes, '1'), partner1.Crime('1', 'ROBBERY'))
self.assertEqual(partner1.find_crime(crimes, '2'), partner1.Crime('2', 'ROBBERY'))
self.assertEqual(partner1.find_crime(crimes, '3'), partner1.Crime('3', 'ROBBERY'))
if __name__ == '__main__':
unittest.main()
|
996,542 | edcb33cfc5ecb25c7d97bc8567dcefd5829d81c3 | import subprocess, sys, json, os
import re
import sys
import struct
from shutil import copyfile
addr_lo = 0xffffffff81000000 # 0xffffffff81609000
addr_hi = 0xffffffff819a1000 # 0xffffffff8160c000
file_offset = 0x200000 # 0x809000
text_size = 0x9a1000 # 12288
def read_file(fname):
with open(fname) as f:
content = f.readlines()
return [x.strip() for x in content]
def add_curr_pr_address(fname):
p1 = subprocess.Popen(["llvm-nm", fname], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "tracking_enabled"], stdin=p1.stdout, stdout=subprocess.PIPE)
out = p2.communicate()[0]
with open("/home/muhammad/testing/debloating/dt_ita_addr", 'w') as fd:
print hex(int(out.split()[0], 16))
fd.write(struct.pack("Q", int(out.split()[0], 16)))
def get_func_names(fname, threshold):
content = read_file(fname)
stripped = map(lambda x : x.split(), content)
func_names_is = map(lambda x : x[0], (filter(lambda x : (int(x[1]) <= 1000 and int(x[1]) > threshold), stripped)))
func_names_oos = map(lambda x : x[0], (filter(lambda x : int(x[1]) <= threshold, stripped)))
return func_names_is, func_names_oos
def create_copy(vmfile, suff):
cpy = vmfile + suff
copyfile(vmfile, cpy)
return cpy
def get_symbol_info(vmfile, func_names_is, func_names_oos):
p = subprocess.Popen(["objdump", "-dF", vmfile.strip()], stdout=subprocess.PIPE)
output = p.communicate()[0]
output = output.split('\n')
work_section = False
func_info = []
func_names_is = set(func_names_is)
func_names_oos = set(func_names_oos)
to_exclude = ["total_mapping_size", "load_elf_binary", "chksum_update"]
infunc = False
init_section_list = []
pattern = re.compile("([a-z0-9]+) <([_a-zA-Z0-9]+)> \(File Offset: ([a-z0-9]+)\):")
for idx, line in enumerate(output):
if line == "":
infunc = False
continue
if work_section:
toks = line.split("\t")
if len(toks) >= 2:
code_size = len(toks[1].split())
if infunc:
func_info[len(func_info) - 1][4].append(code_size)
if line.find("_einittext") >= 0:
init_section_list.append((line, line[:16], code_size))
matched = pattern.match(line)
if not matched:
continue
func_name = matched.group(2)
if(func_name == "ud2_call"):
global switch_ctx_offset
switch_ctx_offset = int(matched.group(3), 16)
continue
address = int(matched.group(1), 16)
is_is = -1
if address < addr_lo:
continue
if address >= addr_hi:
break
work_section = True
if func_name in func_names_is:
is_is = True
elif func_name in func_names_oos:
is_is = False
else:
continue
infunc = True
address = matched.group(1)
offset = matched.group(3)
func_info.append((func_name, address, offset, is_is, []))
return func_info, init_section_list
vmfile = sys.argv[1]
ffile = sys.argv[2]
threshold = int(sys.argv[3])
add_curr_pr_address(vmfile)
func_names_is, func_names_oos = get_func_names(ffile, threshold)
print len(func_names_is), len(func_names_oos)
func_info, init_section_list = get_symbol_info(vmfile, func_names_is, func_names_oos)
temp = []
for tup in func_info:
if tup[3] == False and tup in temp:
print tup
temp.append(tup)
print len(filter(lambda x : x[3] == False, func_info))
with open("/home/muhammad/testing/debloating/init_section_list", 'w') as fd:
for line, address, size in init_section_list:
# print line, address, size
address = struct.pack("Q", int(address, 16))
size = struct.pack("I", size)
fd.write(address)
fd.write(size)
with open(vmfile, "r+b") as fd:
fd.seek(switch_ctx_offset)
fd.write(chr(int("0F", 16)))
fd.write(chr(int("0B", 16)))
to_store = []
iis = create_copy(vmfile, "_is")
oos = create_copy(vmfile, "_oos")
with open(iis, "r+b") as fd:
for (func_name, address, offset, is_is, inst_sizes) in func_info:
offset = int(offset, 16)
fd.seek(offset)
code = fd.read(sum(inst_sizes))
fd.seek(offset)
code_idx = 0
if not is_is:
for isize in inst_sizes:
if isize == 1:
fd.write(code[code_idx])
code_idx += 1
continue
for x in xrange(isize/2):
fd.write(chr(int("0F", 16)))
fd.write(chr(int("0B", 16)))
code_idx += 2
if isize % 2:
fd.write(chr(int("90", 16)))
code_idx += 1
# if inst_sizes[0] == 1:
# continue
# fd.write(chr(int("0F", 16)))
# fd.write(chr(int("0B", 16)))
to_store.append((func_name, address, code, is_is, inst_sizes))
with open(oos, "r+b") as fd:
for (func_name, address, offset, is_is, inst_sizes) in func_info:
offset = int(offset, 16)
fd.seek(offset)
code = fd.read(sum(inst_sizes))
fd.seek(offset)
code_idx = 0
if is_is:
for isize in inst_sizes:
if isize == 1:
fd.write(code[code_idx])
code_idx += 1
continue
for x in xrange(isize/2):
fd.write(chr(int("0F", 16)))
fd.write(chr(int("0B", 16)))
code_idx += 2
if isize % 2:
fd.write(chr(int("90", 16)))
code_idx += 1
# to_store.append((func_name, address, code, is_is, inst_sizes))
fd = open(vmfile, 'r+b')
with open("/home/muhammad/testing/debloating/dt_func_code_original", 'w') as fd2:
fd.seek(file_offset)
for i in xrange(text_size):
byte = fd.read(1)
fd2.write(byte)
fd.close()
fd = open(iis, 'r+b')
with open("/home/muhammad/testing/debloating/dt_func_code_is", 'w') as fd2:
# fd.seek(0x200000)
fd.seek(file_offset)
for i in xrange(text_size):
byte = fd.read(1)
fd2.write(byte)
fd.close()
fd = open(oos, 'r+b')
with open("/home/muhammad/testing/debloating/dt_func_code_oos", 'w') as fd2:
# fd.seek(0x200000)
fd.seek(file_offset)
for i in xrange(text_size):
byte = fd.read(1)
fd2.write(byte)
# print hex(ord(byte))
fd.close()
with open("/home/muhammad/testing/debloating/dt_func_info_is", 'w') as fd:
for (func_name, address, code, is_is, inst_sizes) in to_store:
address = struct.pack("Q", int(address, 16))
code_size = struct.pack("I", len(code))
strlen = struct.pack("I", len(func_name))
inst_size0 = struct.pack("I", inst_sizes[0])
inst_size1 = struct.pack("I", inst_sizes[1])
new_code = ""
fd.write(strlen)
fd.write(func_name)
fd.write(address)
fd.write(code_size)
fd.write(inst_size0)
fd.write(inst_size1)
fd.write(code)
|
996,543 | 74591fe7ce082079c4903d00390164c01ef7d2cc | from django.contrib import admin
from basic_app.models import UserProfileInfo, User,UserPictureCount
# Register your models here.
admin.site.register(UserProfileInfo)
admin.site.register(UserPictureCount)
|
996,544 | 5f286e36c96af123ed0571ddc9edbcc2a9eb6a29 | """This class defines a device capable to calculate a distance using RSSI values"""
from miso_beacon_ai.ranging_functions import rangedistance
class RSSIRanger:
def __init__(self, frecuency, gain=1):
"""Constructor"""
self.frecuency = frecuency
self.gain = gain
def rangedistance(self, rssi):
"""This method calculates the distance that a signal comes from using its RSSI value"""
return rangedistance(rssi, self.frecuency, gain=self.gain)
|
996,545 | 3da9693a5c9e90229d788304a4a9fd42585597e6 | from django.shortcuts import render, redirect, HttpResponse
import random
def index(request):
if 'gold' not in request.session:
request.session['gold'] = 0
context = {
'gold': request.session['gold'],
'message': request.session['message'],
}
return render(request, 'index.html', context)
def process(request):
if request.POST['action'] == "farm":
earnings = random.randrange(10, 20)
request.session['gold'] += earnings
request.session['earnings'] = earnings
request.session['message'] = "Earned " + str(request.session['earnings'])+" gold from the farm!"
print(earnings)
return redirect('/')
if request.POST['action'] == "cave":
earnings = random.randrange(5, 10)
request.session['gold'] += earnings
request.session['earnings'] = earnings
request.session['message'] = "Earned " + str(request.session['earnings'])+ " gold from the farm!"
print(earnings)
return redirect('/')
if request.POST['action'] == "house":
earnings = random.randrange(2, 5)
request.session['gold'] += earnings
request.session['earnings'] = earnings
request.session['message'] = "Earned " + str(request.session['earnings'])+ " gold from the farm!"
print(earnings)
return redirect('/')
if request.POST['action'] == "casino":
earnings = random.randrange(-50, 50)
request.session['gold'] += earnings
request.session['earnings'] = earnings
if earnings > 0:
request.session['message'] = "Entered a casino and won " + str(request.session['earnings'])+ " gold!"
else:
request.session['message'] = "Entered a casino and lost " + str(request.session['earnings']*-1)+ " gold!"
print(earnings)
return redirect('/') |
996,546 | 6206f37fbc3f78d3f7c0d755d9ec5b01806d3abe | print('Será que conseguiremos formar um triângulo com os lados fornecidos por você?')
l1 = float(input('Digite aqui o valor do primeiro lado: '))
l2 = float(input('Digite aqui o valor do segundo lado: '))
l3 = float(input('Digite aqui o valor do terceiro lado: '))
if (l1 + l2 > l3) and ((l2 + l3 > l1)) and (l1 + l3 > l2):
print('É possível formar um triângulo com os lados fornecidos.')
else:
print('Não é possível formar um triângulo com os lados fornecidos!')
#obs: a soma de dois lados tem que ser sempre maior que o terceiro lado para que se possa formar um triângulo |
996,547 | 8293fb0d660779bf48fd76e4e74ef171ff742df7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
红包封面小程序接口
"""
from typing import List, Optional
from fastapi import APIRouter, Depends, Body
from pydantic import BaseModel
from sqlalchemy.orm import Session
from starlette.requests import Request
from app.api import deps
from app.api.api_v1.endpoints.index import templates
from app.controller import miniapp
from app import schemas
from app.schemas import MiniAppInviteUserCreate, MiniAppInviteUser, Cps, RedCover
from app.schemas import MiniAppTip
route = APIRouter()
class Code(BaseModel):
code: str
@route.post("/{app_id}/login", response_model=schemas.MiniAppUser)
def login(
*,
db: Session = Depends(deps.get_db),
app_id: int,
code: Code = Body(None)
):
"""
微信小程序获取登录,获取用户信息
"""
result = miniapp.get_user_info(app_id, code.code, db)
return result
@route.post("/{app_id}/invite/track", response_model=MiniAppInviteUser)
def invite_track(
*,
db: Session = Depends(deps.get_db),
app_id: int,
invite_user: MiniAppInviteUserCreate
):
"""openid: 分享链接所属的openid,invite_openid: 被邀请的用户的openid"""
# print(invite_user.json())
result = miniapp.track_invite_user(
app_id, invite_user.openid, invite_user.invite_openid, db, invite_user.cover_id
)
return result
@route.get("/{app_id}/cps", response_model=List[Cps])
def get_cps_list(
*,
db: Session = Depends(deps.get_db),
app_id: int
):
result = miniapp.get_cps_list(app_id, db)
return result
@route.get("/{app_id}/cover", response_model=List[RedCover])
def get_covers(
*,
db: Session = Depends(deps.get_db),
app_id: int
):
return miniapp.get_covers(app_id, db)
@route.get("/{app_id}/tip", response_model=List[str])
def get_tips(
*,
db: Session = Depends(deps.get_db),
app_id: int,
page: Optional[str] = None,
item_id: Optional[int] = None
):
res = miniapp.get_tips(app_id, db, page, item_id)
return [i.tip for i in res]
@route.get("/{app_id}/cover/detail")
def get_cover_detail(
*,
db: Session = Depends(deps.get_db),
app_id: int,
cover_id: int,
openid: str
):
return miniapp.get_cover_detail(cover_id, openid, app_id, db)
class AdTrack(BaseModel):
openid: str
status: bool
cover_id: int
@route.post("/{app_id}/ad/track")
def do_ad_track(
*,
db: Session = Depends(deps.get_db),
app_id: int,
ad_track: AdTrack = Body(None)
):
return miniapp.track_ad_history(
app_id=app_id,
openid=ad_track.openid,
cover_id=ad_track.cover_id,
status=ad_track.status,
db=db
)
@route.get("/{app_id}/cmVjZWl2ZWQ")
def user_cover_received(
*,
db: Session = Depends(deps.get_db),
app_id: int,
receive_str: str,
request: Request,
):
"""用户领取封面"""
res = miniapp.do_received(db, app_id, receive_str)
if not res:
res = "输入有误"
return templates.TemplateResponse(
"received_cover.html",
{"request": request, "context": {"code": res}}
)
|
996,548 | 39400d008ea7188f5be6e20426a42b17bdcfeb2a | # -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
import re
class BasicSpider(scrapy.Spider):
name = "ratings_page"
allowed_domains = ["imdb.com"]
start_urls = (
'http://www.imdb.com/title/tt4425200/ratings?ref_=tt_ov_rt',
)
def parse(self, response):
percent_match = re.compile(r"%")
#print "Crawling page..."
#print response.body
bsObj = BeautifulSoup(response.body)
#retreive td elements with percentage ratings
ratings = bsObj.findAll("td", attrs={"background": re.compile("rating/ruler.gif")})
#retrieve number of votes
rating_counts = [row.findPreviousSibling("td").get_text() for row in ratings if percent_match.search(row.get_text())]
#retrieve rating # from 1 - 10
rating_vals = [row.findNextSibling("td").get_text() for row in ratings if percent_match.search(row.get_text())]
#retrieve percentage ratings
ratings = [row.get_text() for row in ratings if percent_match.search(row.get_text())]
#format ratings percent to remove unicode characters
ratings_formatted = [rating.replace(u'\xa0',"") for rating in ratings]
rating_dict = dict(zip(rating_vals, zip(rating_counts, ratings_formatted)))
"""
for key, v in rating_dict.items():
print key, v[0], v[1]
yield {
"rating" : key,
"rating %": v[1],
"rating count": v[0]
}
"""
yield {
"title" : response.css("h1>a::text").extract(),
"10 perc" : rating_dict["10"][1],
"10 count" : rating_dict["10"][0],
"9 perc" : rating_dict["9"][1],
"9 count" : rating_dict["9"][0],
"8 perc" : rating_dict["8"][1],
"8 count" : rating_dict["8"][0],
"7 perc" : rating_dict["7"][1],
"7 count" : rating_dict["7"][0],
"6 perc" : rating_dict["6"][1],
"6 count" : rating_dict["6"][0],
"5 perc" : rating_dict["5"][1],
"5 count" : rating_dict["5"][0],
"4 perc" : rating_dict["4"][1],
"4 count" : rating_dict["4"][0],
"3 perc" : rating_dict["3"][1],
"3 count" : rating_dict["3"][0],
"2 perc" : rating_dict["2"][1],
"2 count" : rating_dict["2"][0],
"1 perc" : rating_dict["1"][1],
"1 count" : rating_dict["1"][0]
}
pass
|
996,549 | 47e4e0bb7d74136730895ea5097c4db40383d4d3 | from datetime import datetime
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import random
import string
ELEMENTS = {
"first name": (By.NAME, "firstname"),
"last name": (By.NAME, "lastname"),
"address1": (By.NAME, "address1"),
"postcode": (By.NAME, "postcode"),
"city": (By.NAME, "city"),
"country combobox": (By.CSS_SELECTOR, "span[role=combobox]"),
"country input": (By.CSS_SELECTOR, "input.select2-search__field"),
"email": (By.NAME, "email"),
"phone":(By.NAME, "phone"),
"password": (By.NAME, "password"),
"repeat password": (By.NAME, "confirmed_password"),
"btn create account": (By.NAME, "create_account"),
"account created info": (By.XPATH, "//div[contains(text(), 'Your customer account has been created')]"),
"box account": (By.ID, "box-account"),
"btn login": (By.NAME, "login")
}
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def get_element(driver, element_tuple, timeout=10):
WebDriverWait(driver, timeout).until(EC.presence_of_element_located(element_tuple))
return driver.find_element(element_tuple[0], element_tuple[1])
def test_register_new_account(driver):
driver.get("http://localhost/litecart/en/create_account")
get_element(driver, ELEMENTS["first name"])
user_data = generate_new_account_data()
fill_register_form(driver, user_data)
get_element(driver, ELEMENTS["account created info"])
get_element(driver, ELEMENTS["box account"])
logout(driver)
login(driver, user_data)
logout(driver)
def fill_register_form(driver, user_data):
type_text(driver, ELEMENTS["first name"], user_data["firstname"])
type_text(driver, ELEMENTS["last name"], user_data["lastname"])
type_text(driver, ELEMENTS["address1"], user_data["address"])
type_text(driver, ELEMENTS["postcode"], user_data["postcode"])
type_text(driver, ELEMENTS["city"], user_data["city"])
get_element(driver, ELEMENTS["country combobox"]).click()
type_text(driver, ELEMENTS["country input"], user_data["country"] + Keys.ENTER)
type_text(driver, ELEMENTS["email"], user_data["email"])
type_text(driver, ELEMENTS["phone"], user_data["phone"])
type_text(driver, ELEMENTS["password"], user_data["password"])
type_text(driver, ELEMENTS["repeat password"], user_data["password"])
get_element(driver, ELEMENTS["btn create account"]).click()
def type_text(driver, element_tuple, text_to_input, clear=True):
element = get_element(driver, element_tuple)
if clear:
element.clear()
element.send_keys(text_to_input)
def generate_new_account_data():
return {
"firstname": random.choice(["Pedro", "Santiago", "Michael"]),
"lastname": random.choice(["Gonzales", "De Sousa"]),
"address": "%s %d" % (random.choice(["Dluga", "Prosta", "Krzywa"]), random.randint(1, 999)),
"postcode": "{:05d}".format(random.randint(0, 99999)),
"city": random.choice(["Barcelona", "Madrid"]),
"country": "Spain",
"email": "user%s@testmail.com" % datetime.now().strftime("%Y%m%d%H%M%S"),
"phone": "+%d%d" % (random.randint(0, 99), random.randint(100000000, 999999999)),
"password":
''.join([random.choice(string.ascii_letters + string.digits + string.punctuation) for n in range(8)])
}
def logout(driver):
driver.get("http://localhost/litecart/en/logout")
get_element(driver, ELEMENTS["email"])
def login(driver, user_data):
type_text(driver, ELEMENTS["email"], user_data["email"])
type_text(driver, ELEMENTS["password"], user_data["password"])
get_element(driver, ELEMENTS["btn login"]).click()
get_element(driver, ELEMENTS["box account"])
|
996,550 | ac3ae792ff89d52809927e7c5f74b47fa4535460 | ENTRY_POINT = 'sum_product'
#[PROMPT]
from typing import List, Tuple
def sum_product(numbers: List[int]) -> Tuple[int, int]:
""" For a given list of integers, return a tuple consisting of a sum and a product of all the integers in a list.
Empty sum should be equal to 0 and empty product should be equal to 1.
>>> sum_product([])
(0, 1)
>>> sum_product([1, 2, 3, 4])
(10, 24)
"""
#[SOLUTION]
sum_value = 0
prod_value = 1
for n in numbers:
sum_value += n
prod_value *= n
return sum_value, prod_value
#[CHECK]
METADATA = {
'author': 'jt',
'dataset': 'test'
}
def check(candidate):
assert candidate([]) == (0, 1)
assert candidate([1, 1, 1]) == (3, 1)
assert candidate([100, 0]) == (100, 0)
assert candidate([3, 5, 7]) == (3 + 5 + 7, 3 * 5 * 7)
assert candidate([10]) == (10, 10)
|
996,551 | b7cbe858fdf5caebcc6b59529dc7d9c743855f77 | from rest_framework import serializers
from .models import Movie, TVShow
class MovieSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = ['id', 'title', 'released']
class TVShowSerializer(serializers.ModelSerializer):
class Meta:
model = TVShow
fields = ['id', 'title', 'released']
|
996,552 | 7ae4e851584f87966e87a3a52a8c4f7acb3c3216 | from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from services.institutes.models import Institute
@receiver(post_save, sender=Institute)
def feed_signal_update(sender, **kwargs):
instance = kwargs.get('instance', False)
if instance:
pass
|
996,553 | 5dcd7b8f257c10841a5f1411d4dc7fd0be03a11b | """List utility functions."""
__author__ = "730444252"
# TODO: Implement your functions here.
def all(a: list[int], b: int) -> bool:
"""Check all."""
if len(a) == 0:
return False
i: int = 0
while i < len(a):
if a[i] != b:
return False
else:
i = i + 1
return True
def is_equal(a: list[int], b: list[int]) -> bool:
"""Check if two lists are equal."""
i: int = 0
if len(a) != len(b):
return False
while i < len(a):
if a[i] != b[i]:
return False
else:
i = i + 1
return True
def max(a: list[int]) -> int:
"""Find the maximum."""
if len(a) == 0:
raise ValueError("max() arg is an empth List")
i: int = 1
biggest: int
while i < len(a):
if a[0] < a[i]:
a[0] = a[i]
i = i + 1
else:
i = i + 1
return a[0]
|
996,554 | e0fac845789454582f5122d3fba25cad12ab8b88 | train = pd.read_csv("../train.csv")
test = pd.read_csv("../test.csv")
t=train
y=train.Survived
X = train.copy()
X.drop(columns='Survived',axis=1,inplace=True)
X=X.fillna(-999999)
X_copy = X.copy()
for c in train.columns[train.dtypes=='object']:
X[c]=X[c].factorize()[0]
for c in test.columns[test.dtypes=='object']:
test[c]=test[c].factorize()[0]
from sklearn.ensemble import RandomForestClassifier as RF
rf = RF()
sex_pclass = pd.concat([X.Sex,X.Pclass],axis=1)
rf.fit(sex_pclass,y)
test_sex_pclass = pd.concat([test.Sex,test.Pclass],axis=1)
y_pred = rf.predict(test_sex_pclass)
y_pred_df = pd.DataFrame(y_pred)
y_pred_df.set_index(test.PassengerId,inplace=True)
y_pred_df.columns = ['Survived']
y_pred_df.to_csv("rf_sex_pclass.csv")
|
996,555 | 462bba0a10998441a98fd2441a530ebbae9db34e | '''<b>Enhance Or Suppress Features</b> enhances or suppresses certain image features
(such as speckles, ring shapes, and neurites), which can improve subsequent
identification of objects.
<hr>
This module enhances or suppresses the intensity of certain pixels relative
to the rest of the image, by applying image processing filters to the image. It
produces a grayscale image in which objects can be identified using an <b>Identify</b> module.
'''
import numpy as np
from centrosome.cpmorphology import opening, closing, white_tophat
from centrosome.filter import enhance_dark_holes, circular_hough
from centrosome.filter import hessian, median_filter
from centrosome.filter import variance_transform, line_integration
from scipy.ndimage import gaussian_filter
import cellprofiler.cpimage as cpi
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
from cellprofiler.gui.help import HELP_ON_MEASURING_DISTANCES, PROTIP_AVOID_ICON
ENHANCE = 'Enhance'
SUPPRESS = 'Suppress'
E_SPECKLES = 'Speckles'
E_NEURITES = 'Neurites'
E_DARK_HOLES = 'Dark holes'
E_CIRCLES = 'Circles'
E_TEXTURE = 'Texture'
E_DIC = 'DIC'
S_FAST = "Fast / hexagonal"
S_SLOW = "Slow / circular"
N_GRADIENT = "Line structures"
N_TUBENESS = "Tubeness"
class EnhanceOrSuppressFeatures(cpm.CPModule):
module_name = 'EnhanceOrSuppressFeatures'
category = "Image Processing"
variable_revision_number = 5
def create_settings(self):
self.image_name = cps.ImageNameSubscriber(
'Select the input image',
cps.NONE, doc="""
Select the image with features to be enhanced or suppressed.""")
self.filtered_image_name = cps.ImageNameProvider(
'Name the output image',
'FilteredBlue', doc="""
Enter a name for the feature-enhanced or suppressed image.""")
self.method = cps.Choice(
'Select the operation',
[ENHANCE, SUPPRESS], doc="""
Select whether you want to enhance or suppress the features you designated.
<ul>
<li><i>%(ENHANCE)s:</i> Produce an image whose intensity is largely
composed of the features of interest.</li>
<li <i>%(SUPPRESS)s:</i> Produce an image with the features largely
removed.</li>
</ul>""" % globals())
self.enhance_method = cps.Choice(
'Feature type',
[E_SPECKLES, E_NEURITES, E_DARK_HOLES, E_CIRCLES, E_TEXTURE, E_DIC], doc="""
<i>(Used only if %(ENHANCE)s is selected)</i><br>
This module can enhance three kinds of image intensity features:
<ul>
<li><i>%(E_SPECKLES)s:</i> A speckle is an area of enhanced intensity
relative to its immediate neighborhood. The module enhances
speckles using a white tophat filter, which is the image minus the
morphological grayscale opening of the image. The opening operation
first suppresses the speckles by applying a grayscale erosion to reduce everything
within a given radius to the lowest value within that radius, then uses
a grayscale dilation to restore objects larger than the radius to an
approximation of their former shape. The white tophat filter enhances
speckles by subtracting the effects of opening from the original image.
</li>
<li><i>%(E_NEURITES)s:</i> Neurites are taken to be long, thin features
of enhanced intensity. Choose this option to enhance the intensity
of the neurites using the %(N_GRADIENT)s or %(N_TUBENESS)s methods
described below.</li>
<li><i>%(E_DARK_HOLES)s:</i> The module uses morphological reconstruction
(the rolling-ball algorithm) to identify dark holes within brighter
areas, or brighter ring shapes. The image is inverted so that the dark holes turn into
bright peaks. The image is successively eroded and the eroded image
is reconstructed at each step, resulting in an image which is
missing the peaks. Finally, the reconstructed image is subtracted
from the previous reconstructed image. This leaves circular bright
spots with a radius equal to the number of iterations performed.
</li>
<li><i>%(E_CIRCLES)s:</i> The module calculates the circular Hough transform of
the image at the diameter given by the feature size. The Hough transform
will have the highest intensity at points that are centered within a ring
of high intensity pixels where the ring diameter is the feature size. You
may want to use the <b>EnhanceEdges</b> module to find the edges of your
circular object and then process the output by enhancing circles. You can
use <b>IdentifyPrimaryObjects</b> to find the circle centers and then use
these centers as seeds in <b>IdentifySecondaryObjects</b> to find whole,
circular objects using a watershed.</li>
<li><i>%(E_TEXTURE)s:</i> <b>EnanceOrSuppressFeatures</b> produces an image
whose intensity is the variance among nearby pixels. This method weights
pixel contributions by distance using a Gaussian to calculate the weighting.
You can use this method to separate foreground from background if the foreground
is textured and the background is not.
</li>
<li><i>%(E_DIC)s:</i> This method recovers the optical density of a DIC image by
integrating in a direction perpendicular to the shear direction of the image.
</li>
</ul>
In addition, this module enables you to suppress certain features (such as speckles)
by specifying the feature size.""" % globals())
self.object_size = cps.Integer(
'Feature size', 10, 2, doc="""
<i>(Used only if circles, speckles or neurites are selected, or if suppressing features)</i><br>
Enter the diameter of the largest speckle, the width of the circle
or the width of the neurites to be enhanced or suppressed, which
will be used to calculate an adequate filter size. %(HELP_ON_MEASURING_DISTANCES)s""" % globals())
self.hole_size = cps.IntegerRange(
'Range of hole sizes', value=(1, 10), minval=1, doc="""
<i>(Used only if %(E_DARK_HOLES)s is selected)</i><br>
The range of hole sizes to be enhanced. The algorithm will
identify only holes whose diameters fall between these two
values.""" % globals())
self.smoothing = cps.Float(
'Smoothing scale', value=2.0, minval=0, doc="""
<i>(Used only for the %(E_TEXTURE)s, %(E_DIC)s or %(E_NEURITES)s methods)</i><br>
<ul>
<li><i>%(E_TEXTURE)s</i>: This is the scale of the texture features, roughly
in pixels. The algorithm uses the smoothing value entered as
the sigma of the Gaussian used to weight nearby pixels by distance
in the variance calculation.</li>
<li><i>%(E_DIC)s:</i> Specifies the amount of smoothing of the image in the direction parallel to the
shear axis of the image. The line integration method will leave
streaks in the image without smoothing as it encounters noisy
pixels during the course of the integration. The smoothing takes
contributions from nearby pixels which decreases the noise but
smooths the resulting image. </li>
<li><i>%(E_DIC)s:</i> Increase the smoothing to
eliminate streakiness and decrease the smoothing to sharpen
the image.</li>
<li><i>%(E_NEURITES)s:</i> The <i>%(N_TUBENESS)s</i> option uses this scale
as the sigma of the Gaussian used to smooth the image prior to
gradient detection.</li>
</ul>
<img src="memory:%(PROTIP_AVOID_ICON)s">
Smoothing can be turned off by entering a value of zero, but this
is not recommended.""" % globals())
self.angle = cps.Float(
'Shear angle', value=0, doc="""
<i>(Used only for the %(E_DIC)s method)</i><br>
The shear angle is the direction of constant value for the
shadows and highlights in a DIC image. The gradients in a DIC
image run in the direction perpendicular to the shear angle.
For example, if the shadows run diagonally from lower left
to upper right and the highlights appear above the shadows,
the shear angle is 45°. If the shadows appear on top,
the shear angle is 180° + 45° = 225°.
""" % globals())
self.decay = cps.Float(
'Decay', value=0.95, minval=0.1, maxval=1, doc=
"""<i>(Used only for the %(E_DIC)s method)</i><br>
The decay setting applies an exponential decay during the process
of integration by multiplying the accumulated sum by the decay
at each step. This lets the integration recover from accumulated
error during the course of the integration, but it also results
in diminished intensities in the middle of large objects.
Set the decay to a large value, on the order of 1 - 1/diameter
of your objects if the intensities decrease toward the middle.
Set the decay to a small value if there appears to be a bias
in the integration direction.""" % globals())
self.neurite_choice = cps.Choice(
"Enhancement method",
[N_TUBENESS, N_GRADIENT], doc="""
<i>(Used only for the %(E_NEURITES)s method)</i><br>
Two methods can be used to enhance neurites:<br>
<ul>
<li><i>%(N_TUBENESS)s</i>: This method is an adaptation of
the method used by the <a href="http://www.longair.net/edinburgh/imagej/tubeness/">
ImageJ Tubeness plugin</a>. The image
is smoothed with a Gaussian. The Hessian is then computed at every
point to measure the intensity gradient and the eigenvalues of the
Hessian are computed to determine the magnitude of the intensity.
The absolute maximum of the two eigenvalues gives a measure of
the ratio of the intensity of the gradient in the direction of
its most rapid descent versus in the orthogonal direction. The
output image is the absolute magnitude of the highest eigenvalue
if that eigenvalue is negative (white neurite on dark background),
otherwise, zero.</li>
<li><i>%(N_GRADIENT)s</i>: The module takes the difference of the
white and black tophat filters (a white tophat filtering is the image minus
the morphological grayscale opening of the image; a black tophat filtering is the
morphological grayscale closing of the image minus the image).
The effect is to enhance lines whose width is the "feature size".</li>
</ul>""" % globals())
self.speckle_accuracy = cps.Choice(
"Speed and accuracy",
choices=[S_FAST, S_SLOW],
doc="""
<i>(Used only for the %(E_SPECKLES)s method)</i><br>
<i>%(E_SPECKLES)s</i> can use a fast or slow algorithm to find
speckles.
<ul>
<li><i>%(S_FAST)s:</i> Select this option for speckles that have a large
radius (greater than 10 pixels) and need not be exactly circular.</li>
<li><i>%(S_SLOW)s:</i> Use for speckles of small radius or to
maintain backwards compatibility with previous versions of
CellProfiler.</li>
</ul>
""" % globals())
def settings(self):
return [self.image_name, self.filtered_image_name,
self.method, self.object_size, self.enhance_method,
self.hole_size, self.smoothing, self.angle, self.decay,
self.neurite_choice, self.speckle_accuracy]
def visible_settings(self):
result = [self.image_name, self.filtered_image_name,
self.method]
if self.method == ENHANCE:
result += [self.enhance_method]
if self.enhance_method == E_DARK_HOLES:
result += [self.hole_size]
elif self.enhance_method == E_TEXTURE:
result += [self.smoothing]
elif self.enhance_method == E_DIC:
result += [self.smoothing, self.angle, self.decay]
elif self.enhance_method == E_NEURITES:
result += [self.neurite_choice]
if self.neurite_choice == N_GRADIENT:
result += [self.object_size]
else:
result += [self.smoothing]
elif self.enhance_method == E_SPECKLES:
result += [self.object_size, self.speckle_accuracy]
else:
result += [self.object_size]
else:
result += [self.object_size]
return result
def run(self, workspace):
image = workspace.image_set.get_image(self.image_name.value,
must_be_grayscale=True)
#
# Match against Matlab's strel('disk') operation.
#
radius = (float(self.object_size.value) - 1.0) / 2.0
mask = image.mask if image.has_mask else None
pixel_data = image.pixel_data
if self.method == ENHANCE:
if self.enhance_method == E_SPECKLES:
if self.speckle_accuracy == S_SLOW or radius <= 3:
result = white_tophat(pixel_data, radius, mask)
else:
#
# white_tophat = img - opening
# = img - dilate(erode)
# = img - median_filter(median_filter(0%) 100%)
result = pixel_data - median_filter(
median_filter(pixel_data, mask, radius, percent=0),
mask, radius, percent=100)
if mask is not None:
result[~mask] = pixel_data[~mask]
elif self.enhance_method == E_NEURITES:
if self.neurite_choice == N_GRADIENT:
#
# white_tophat = img - opening
# black_tophat = closing - img
# desired effect = img + white_tophat - black_tophat
# = img + img - opening - closing + img
# = 3*img - opening - closing
result = (3 * pixel_data -
opening(pixel_data, radius, mask) -
closing(pixel_data, radius, mask))
result[result > 1] = 1
result[result < 0] = 0
else:
sigma = self.smoothing.value
smoothed = gaussian_filter(pixel_data, sigma)
L = hessian(smoothed, return_hessian=False,
return_eigenvectors=False)
#
# The positive values are darker pixels with lighter
# neighbors. The original ImageJ code scales the result
# by sigma squared - I have a feeling this might be
# a first-order correction for e**(-2*sigma), possibly
# because the hessian is taken from one pixel away
# and the gradient is less as sigma gets larger.
#
result = -L[:, :, 0] * (L[:, :, 0] < 0) * sigma * sigma
if image.has_mask:
result[~mask] = pixel_data[~mask]
elif self.enhance_method == E_DARK_HOLES:
min_radius = max(1, int(self.hole_size.min / 2))
max_radius = int((self.hole_size.max + 1) / 2)
result = enhance_dark_holes(pixel_data, min_radius,
max_radius, mask)
elif self.enhance_method == E_CIRCLES:
result = circular_hough(pixel_data, radius + .5, mask=mask)
elif self.enhance_method == E_TEXTURE:
result = variance_transform(pixel_data,
self.smoothing.value,
mask=mask)
elif self.enhance_method == E_DIC:
result = line_integration(pixel_data,
self.angle.value,
self.decay.value,
self.smoothing.value)
else:
raise NotImplementedError("Unimplemented enhance method: %s" %
self.enhance_method.value)
elif self.method == SUPPRESS:
if image.has_mask:
result = opening(image.pixel_data, radius, image.mask)
else:
result = opening(image.pixel_data, radius)
else:
raise ValueError("Unknown filtering method: %s" % self.method)
result_image = cpi.Image(result, parent_image=image)
workspace.image_set.add(self.filtered_image_name.value, result_image)
if self.show_window:
workspace.display_data.image = image.pixel_data
workspace.display_data.result = result
def display(self, workspace, figure):
image = workspace.display_data.image
result = workspace.display_data.result
figure.set_subplots((2, 1))
figure.subplot_imshow_grayscale(0, 0, image,
"Original: %s" % self.image_name.value)
figure.subplot_imshow_grayscale(1, 0, result,
"Filtered: %s" % self.filtered_image_name.value,
sharexy=figure.subplot(0, 0))
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
'''Adjust setting values if they came from a previous revision
setting_values - a sequence of strings representing the settings
for the module as stored in the pipeline
variable_revision_number - the variable revision number of the
module at the time the pipeline was saved. Use this
to determine how the incoming setting values map
to those of the current module version.
module_name - the name of the module that did the saving. This can be
used to import the settings from another module if
that module was merged into the current module
from_matlab - True if the settings came from a Matlab pipeline, False
if the settings are from a CellProfiler 2.0 pipeline.
Overriding modules should return a tuple of setting_values,
variable_revision_number and True if upgraded to CP 2.0, otherwise
they should leave things as-is so that the caller can report
an error.
'''
if not from_matlab and variable_revision_number == 1:
#
# V1 -> V2, added enhance method and hole size
#
setting_values = setting_values + [E_SPECKLES, "1,10"]
variable_revision_number = 2
if not from_matlab and variable_revision_number == 2:
#
# V2 -> V3, added texture and DIC
#
setting_values = setting_values + ["2.0", "0", ".95"]
variable_revision_number = 3
if not from_matlab and variable_revision_number == 3:
setting_values = setting_values + [N_GRADIENT]
variable_revision_number = 4
if not from_matlab and variable_revision_number == 4:
setting_values = setting_values + [S_SLOW]
variable_revision_number = 5
return setting_values, variable_revision_number, from_matlab
EnhanceOrSuppressSpeckles = EnhanceOrSuppressFeatures
|
996,556 | c81c2ae61458819a0ec891cf1a262a292157086d | import csv
import random
import string
import mmap
with open('cleaned_data_numerical_training.csv') as File:
with open('blacklistedIpAddresses.txt', 'rb', 0) as file, \
mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as s:
rows = []
reader = csv.DictReader(File)
for row in reader:
blacklistedIpAddressFound = False
possiblyMalicious = row['Possibly-Malicious']
if s.find(str.encode(row['Received1'])) != -1:
blacklistedIpAddressFound = True
elif s.find(str.encode(row['Received2'])) != -1:
blacklistedIpAddressFound = True
elif s.find(str.encode(row['From-IP'])) != -1:
blacklistedIpAddressFound = True
if possiblyMalicious == 'False':
if blacklistedIpAddressFound == True:
possiblyMalicious = True
rows.append({'To': row['To'],'X-To': row['X-To'],'X-cc': row['X-cc'],'X-bcc': row['X-bcc'],'Cc': row['Cc'],'Bcc': row['Bcc'],'Possibly-Spam-Subject': row['Possibly-Malicious-Spam'],'Blacklisted-IP-Address': blacklistedIpAddressFound,'Possibly-Malicious': possiblyMalicious})
with open('output/data_for_decision_tree.csv', 'w') as csvfile:
fieldnames = ['To','X-To','X-cc','X-bcc','Cc','Bcc','Possibly-Spam-Subject','Blacklisted-IP-Address','Possibly-Malicious']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
|
996,557 | ada716c1a40ba31342747c32ed0a2614f489df48 | from tkinter import *
from tkinter import ttk
root = Tk()
button = ttk.Button(root, text = "Click Me")
button.pack()
def callback():
print("Clicked")
button.config(command = callback)
|
996,558 | 0c9d1e7ea5af94ed29dcbcc25e4b90cb05e2ffde | rule = {}
rule['include'] = {
'regText':r"""(include|require)(_once){0,1}(\s{1,5}|\s{0,5}\().{0,60}\$(?!.*(this->))\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"文件包含函数中存在变量,可能存在文件包含漏洞"
}
rule['preg_replace'] = {
'regText':r"""preg_replace\(\s{0,5}.*/[is]{0,2}e[is]{0,2}["']\s{0,5},(.*\$.*,|.*,.*\$)""",
'content':"preg_replace的/e模式,且有可控变量,可能存在代码执行漏洞"
}
rule['phpinfo'] = {
'regText':r"""phpinfo\s{0,5}\(\s{0,5}\)""",
'content':"phpinfo()函数,可能存在敏感信息泄露漏洞"
}
rule['call_user_func'] = {
'regText':r"""call_user_func(_array){0,1}\(\s{0,5}\$\w{1,15}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"call_user_func函数参数包含变量,可能存在代码执行漏洞"
}
rule['readfile'] = {
'regText':r"""(file_get_contents|fopen|readfile|fgets|fread|parse_ini_file|highlight_file|fgetss|show_source)\s{0,5}\(.{0,40}\$\w{1,15}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"读取文件函数中存在变量,可能存在任意文件读取漏洞"
}
rule['systemexec'] = {
'regText':r"""(system|passthru|pcntl_exec|shell_exec|escapeshellcmd|exec|popen|proc_open)\s{0,10}\(.{0,40}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"命令执行函数中存在变量,可能存在任意命令执行漏洞"
}
rule['parse_str'] = {
'regText':r"""(mb_){0,1}parse_str\s{0,10}\(.{0,40}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"parse_str函数中存在变量,可能存在变量覆盖漏洞"
}
rule['doublemoney'] = {
'regText':r"""\${{0,1}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}\s{0,4}=\s{0,4}.{0,20}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"双$$符号可能存在变量覆盖漏洞"
}
rule['ipinfo'] = {
'regText':r"""["'](HTTP_CLIENT_IP|HTTP_X_FORWARDED_FOR|HTTP_REFERER)["']""",
'content':"获取IP地址方式可伪造,HTTP_REFERER可伪造,常见引发SQL注入等漏洞"
}
rule['filectrol'] = {
'regText':r"""(unlink|copy|fwrite|readfile|file_put_contents|file_get_contents|bzopen)\s{0,10}\(.{0,40}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"文件操作函数中存在变量,可能存在任意文件读取/删除/修改/写入等漏洞"
}
rule['extract'] = {
'regText':r"""(extract)\s{0,5}\(.{0,30}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}\s{0,5},{0,1}\s{0,5}(EXTR_OVERWRITE){0,1}\s{0,5}\)""",
'content':"extract函数中存在变量,可能存在变量覆盖漏洞"
}
rule['codeexec'] = {
'regText':r"""\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}\s{0,5}\(\s{0,5}\$_(POST|GET|REQUEST|SERVER)\[.{1,20}\]""",
'content':"可能存在代码执行漏洞,或者此处是后门"
}
rule['urldecode'] = {
'regText':r"""^(?!.*addslashes).{0,40}((raw){0,1}urldecode|stripslashes)\s{0,5}\(.{0,60}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"urldecode绕过GPC,stripslashes会取消GPC转义字符"
}
rule['double``'] = {
'regText':r"""`\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}`""",
'content':"``反引号中包含变量,变量可控会导致命令执行漏洞"
}
rule['array_map'] = {
'regText':r"""array_map\s{0,4}\(\s{0,4}.{0,20}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}\s{0,4}.{0,20},""",
'content':"array_map参数包含变量,变量可控可能会导致代码执行漏洞"
}
rule['sql_select'] = {
'regText':r"""select\s{1,4}.{1,60}from.{1,50}where\s{1,3}.{1,50}=["\s\.]{0,10}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"SQL语句select中条件变量无单引号保护,可能存在SQL注入漏洞"
}
rule['sql_delete'] = {
'regText':r"""delete\s{1,4}from.{1,20}where\s{1,3}.{1,30}=["\s\.]{0,10}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"SQL语句delete中条件变量无单引号保护,可能存在SQL注入漏洞"
}
rule['sql_insert'] = {
'regText':r"""insert\s{1,5}into\s{1,5}.{1,60}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"SQL语句insert中插入变量无单引号保护,可能存在SQL注入漏洞"
}
rule['sql_update'] = {
'regText':r"""update\s{1,4}.{1,30}\s{1,3}set\s{1,5}.{1,60}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"SQL语句delete中条件变量无单引号保护,可能存在SQL注入漏洞"
}
rule['eval'] = {
'regText':r"""(eval|assert)\s{0,10}\(.{0,60}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"eval或者assertc函数中存在变量,可能存在代码执行漏洞"
}
rule['echo'] = {
'regText':r"""(echo|print|print_r)\s{0,5}\({0,1}.{0,60}\$_(POST|GET|REQUEST|SERVER)""",
'content':"echo等输出中存在可控变量,可能存在XSS漏洞"
}
rule['header'] = {
'regText':r"""(header\s{0,5}\(.{0,30}|window.location.href\s{0,5}=\s{0,5})\$_(POST|GET|REQUEST|SERVER)""",
'content':"header函数或者js location有可控参数,存在任意跳转或http头污染漏洞"
}
rule['upload'] = {
'regText':r"""move_uploaded_file\s{0,5}\(""",
'content':"存在文件上传,注意上传类型是否可控"
}
rule['unserialize'] = {
'regText':r"""(unserialize)\s{0,5}\({0,1}.{0,60}\$_(POST|GET|REQUEST|SERVER)""",
'content':"unserialize函数中存在可控变量,可能存在反序列化漏洞"
}
rule['unserialize2'] = {
'regText':r"""(unserialize)\s{0,10}\(.{0,60}\$\w{1,20}((\[["']|\[)\${0,1}[\w\[\]"']{0,30}){0,1}""",
'content':"unserialize函数中存在变量,可能存在反序列化漏洞"
}
rule['ssrf'] = {
'regText':r"""\b(fsockopen|curl_exec)\s{0,400}\(.{0,400}\$\w{1,100}""",
'content':"远程请求函数中存在变量,可能存在SSRF漏洞"
}
rule['PHAR_basic'] = {
'regText':r"""\b(getimagesize|exif_thumbnail|exif_imagetype|imageloadfont| hash_hmac_file|hash_file|hash_update_file|md5_file|sha1_file| get_meta_tags|get_headers)\s{0,400}\(.{0,400}\$\w{1,100}""",
'content':",可能存在PHAR反序列列化漏洞(一般类型)"
}
rule['PHAR_img'] = {
'regText':r"""\b(imagecreatefrom)\w{0,10}\s{0,400}\(.{0,400}\$\w{1,100}""",
'content':",可能存在PHAR反序列列化漏洞(图片转换)"
} |
996,559 | 2426044395df4a1ef0003707e5ca2a4fc7a3843e | #Todo: use abstract class using abc
#for more info at abc , goto : http://docs.python.org/2/library/abc.html
class BaseInfo(object):
#constructor for basic properties
def __init__(self):
self.name = None
self.id = None
self.type = None #the type of the drone
#dimension of the drone
self.height = None
self.width = None
self.depth = None
self.connectivityType = None # i.e : Wifi, 5GHZ and etc..
def Move():
pass |
996,560 | 784cbd8090b06a9510f8e9d4473ff3a0b309916d | # BSD 3-Clause License
#
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
import world
import utils
from world import cprint
import torch
import numpy as np
from tensorboardX import SummaryWriter
import time
import Procedure
from os.path import join
# ==============================
utils.set_seed(world.seed)
print(">>SEED:", world.seed)
# ==============================
import register
from register import dataset
from parse import parse_args
#new
from common.base import Trainer
args = parse_args()#new
Recmodel = register.MODELS[world.model_name](world.config, dataset)
Recmodel = Recmodel.to(world.device)
bpr = utils.BPRLoss(Recmodel, world.config)
weight_file = utils.getFileName()
print(f"load and save to {weight_file}")
if world.LOAD:
try:
Recmodel.load_state_dict(torch.load(weight_file,map_location=torch.device('cpu')))
world.cprint(f"loaded model weights from {weight_file}")
except FileNotFoundError:
print(f"{weight_file} not exists, start from beginning")
Neg_k = 1
# init tensorboard
if world.tensorboard:
w : SummaryWriter = SummaryWriter(
join(world.BOARD_PATH, time.strftime("%m-%d-%Hh%Mm%Ss-") + "-" + world.comment)
)
else:
w = None
world.cprint("not enable tensorflowboard")
try:
for epoch in range(world.TRAIN_epochs):
start = time.time()
if epoch %10 == 0:
cprint("[TEST]")
Procedure.Test(dataset, Recmodel, epoch, w, world.config['multicore'])
output_information = Procedure.BPR_train_original(dataset, Recmodel, bpr, epoch, neg_k=Neg_k,w=w)
print(f'EPOCH[{epoch+1}/{world.TRAIN_epochs}] {output_information}')
#torch.save(Recmodel.state_dict(), weight_file)
if epoch %100 == 0:
torch.save(Recmodel.state_dict(), weight_file)
finally:
if world.tensorboard:
w.close()
|
996,561 | 41a5fe2b1d8685e370eaea424fb22081a8225b98 | ##
##
## Las tuplas son listas inmutables, es decir, no se pueden modificar después de su creación
## No permiten añadir, eliminar, mover elementos etc (no append, extend, remove)
## Si permiten extraer porciones, pero el resultado de la es una tupla nueva
## no permiten búsquedas (no index). "ANTES DE LAS VERSIONES PYTHON 2.6 O .7"
## Si permiten comprobar si un elemento se encuentra en la tupla
## ¿Qué utilidad o ventaja tienen respecto a las listas?
## más rápidas
## menos espacios (mayor optimización)
## Formatean Strings
## Pueden utilizarse como claves en un diccionario. (Las listas NO)
##
## Sintaxis de las tuplas
## nombreLista = (elem1, elem2, elem3,...)
## Se pueden omitir las parentesis
miTupla = ("kevin", "quispe","lima",21,False)
print(miTupla[:])
### ('kevin', 'quispe', 'lima', 21, False)
print(miTupla)
### ('kevin', 'quispe', 'lima', 21, False)
print(miTupla[0])
### kevin
miLista=list(miTupla) ## convirtiendo Tupla en Lista
print(miLista)
### ['kevin', 'quispe', 'lima', 21, False]
tuTupla = tuple(miLista) ## convierte de Lista a Tupla
print(tuTupla)
### ('kevin', 'quispe', 'lima', 21, False)
print("kevin" in miTupla)
### True
print("lalalsa" in miTupla)
### Flase
print(miTupla.count(21)) ## cuantas veces se encuentra un elemento
### 1
print(len(miTupla)) ## CUANTOS elementos posee la tupla. Distinto de indices
### 5
laTupla = ("unico",)## tuplas UNITARIAS. sin la coma(,) no es tupla UNITARIA
print(laTupla)
### ('unico',)
print(len(laTupla))
### 1
tupla2 = "leo",2,21,True ## tupla prescindiendo de las parentesis. "EMPAQUETADO DE TUPLA"
print(tupla2)
### ('leo', 2, 21, True)
tupla3 = ("kevin",22,9,1996)
nombre, dia, mes, agno = tupla3 ## "DES-EMPAQUETADO DE TUPLA"
print(nombre)
### kevin
print(dia)
### 22
print(mes)
### 9
print(agno)
### 1996
print(miTupla.index("quispe")) ##versión resiente si funciona
### 1
|
996,562 | 8ff94abc53c7f99f671a8ab12d2f6c2c1c27f078 |
def nest_paren(s):
if not s:
return True
if s[0] != '(' or s[-1] != ')':
return False
return nest_paren(s[1:-1])
if __name__ == "__main__":
s = "((()))8"
print(nest_paren(s))
|
996,563 | 4e7298bf6682a56c9b159c1ddb9daf94b42f4045 | import train_and_val
if __name__ == '__main__':
train_and_val.train() |
996,564 | 7e0f493efbde89e69d5b20b0e2ab85467ae55615 | #
# Python GUI - Buttons - Generic
#
from GUI.Properties import overridable_property
from GUI.Actions import Action
from GUI import Control
class Button(Control, Action):
""" A pushbutton control."""
style = overridable_property('style',
"One of 'normal', 'default', 'cancel'")
def activate(self):
"""Highlight the button momentarily and then perform its action."""
self.flash()
self.do_action()
def flash(self):
"""Highlight the button momentarily as though it had been clicked,
without performing the action."""
raise NotImplementedError
|
996,565 | e781280557a183e556658d768987354baff48344 | from bs4 import BeautifulSoup
import html
import logging
import re
def __parseHTML(page_html: str) -> dict:
"""Function to parse EDGAR page HTML, returning a dict of company info.
Arguments:
page_html {str} -- Raw HTML of page.
Returns:
dict -- Structured dictionary of company attributes.
"""
# Dict for final output
company_info = dict()
# Parsing HTML
parsed = BeautifulSoup(page_html, features='html.parser')
# Getting company addresses
company_info['addresses'] = __getAddresses(parsed=parsed)
# Getting company name
company_info['name'] = __getCompanyName(parsed=parsed)
# Getting former company names
company_info['former_names'] = __getFormerNames(parsed=parsed)
# Getting company metadata
company_info['metadata'] = __getCompanyMetadata(parsed=parsed)
return company_info
def __getAddresses(parsed: BeautifulSoup) -> list:
"""Function to extract company addresses from the parsed HTML EDGAR page.
Searches for address information in divs with class name 'mailer'.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
list -- List of addresses.
"""
# Addresses container
address_divs = parsed.find_all('div', class_='mailer')
# Building RegEx for phone number
# The following RegEx extracts phone numbers in the following formats:
# 1. (###) ###-####
# 2. ###-###-####
# 3. ##########
phone_number_regex = re.compile(
r'(\(\d{3}\) \d{3}-\d{4}|\d{3}-\d{3}-\d{4}|\d{10})')
# List for final addresses
addresses = list()
for address in address_divs:
# Create dict for address
address_parsed = dict()
# Split text by newline
address_items = address.text.split('\n')
# Removing leading and trailing spaces
address_items = [i.strip() for i in address_items]
# Variable to store street address
street_address = ''
# Iterate through each line
for idx, address_item in enumerate(address_items):
# First line is address type
if idx == 0:
address_parsed['type'] = address_item
continue
# Check if line has phone number
phone_matches = phone_number_regex.findall(address_item)
if len(phone_matches) == 1:
# Stripping non-digit characters from phone number
phone_number = re.sub('[^0-9]', '', phone_matches[0])
address_parsed['phone'] = phone_number
continue
# If no number, add to address line
street_address += address_item.strip() + ' '
# Adding street address to parsed address
address_parsed['street_address'] = street_address.strip()
# Adding parsed address to addresses master list
addresses += [address_parsed]
return addresses
def __getCompanyName(parsed: BeautifulSoup) -> str:
"""Function to extract the company name from the parsed HTML EDGAR page.
Searches for company name in a span with class 'companyName'.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
str -- Name of company.
"""
# Company name container
name_container = parsed.find('span', class_='companyName')
# Extracting raw text elements
name_raw_text = [s for s in name_container.children if isinstance(s, str)]
# Getting name (first raw text instance)
return name_raw_text[0].strip()
def __getFormerNames(parsed: BeautifulSoup) -> list:
"""Function to extract former company names, and dates through which reports
were filed under that name from the parsed HTML EDGAR page.
Searches for strings matching format for previous names first, then extracts
former name and filings-through date separately.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
list -- List of former names (if any), empty list otherwise.
"""
# Former names container
former_container = parsed.find('p', class_='identInfo')
# List for former names
former_names = list()
# Building RegEx for former name sentence
former_sentence_re = re.compile(r'(formerly:.+?\(filings through .+?\))')
# Getting sentence matches
former_sentences = former_sentence_re.findall(former_container.text)
# Building RegEx for name and filings-through date extraction
name_and_date_re = re.compile(r'formerly:(.*)\(.*(\d{4}-\d{2}-\d{2})')
# Extracting former name and filings-through date for each sentence
for sentence in former_sentences:
matches = name_and_date_re.findall(sentence)
former_name = dict()
former_name['former_name'] = matches[0][0].strip()
former_name['filings_through'] = matches[0][1]
former_names += [former_name]
return former_names
def __getCompanyMetadata(parsed: BeautifulSoup) -> dict:
"""Function to extract company Standard Industrial Classification (SIC)
code, SIC type (i.e. description), company location, state of incorporation,
and the end of its fiscal year.
Searches the raw HTML of the company identification section of the page
using regular expressions.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
dict -- Company metadata with keys `sic`, `sic_type`, `location`,
`incorporation_state`, and `fiscal_year_end`.
"""
# Company metadata container
metadata_container = parsed.find('p', class_='identInfo')
# String representation of HTML (used in RegEx)
metadata_str = str(metadata_container)
# Dictionary for company metadata
company_metadata = dict()
# RegEx for extracting SIC and SIC type
sic_re = re.compile(r'SIC.+?:.+?(\d+?)<\/a> -(.+?)<br')
# Getting SIC and SIC type match
sic_matches = sic_re.findall(metadata_str)
# Saving SIC and stripped, HTML-parsed SIC type
company_metadata['sic'] = sic_matches[0][0]
company_metadata['sic_type'] = html.unescape(sic_matches[0][1]).strip()
# RegEx for extracting company location (state)
location_re = re.compile(r'State location:.+?>(\w+?)<\/a>')
# Getting company location
location_matches = location_re.findall(metadata_str)
# Saving company location
company_metadata['location'] = location_matches[0].strip()
# RegEx for extracting state of incorporation
incorp_state_re = re.compile(r'State of Inc\.:.+?>(\w+?)<\/strong>')
# Getting state of incorporation
incorp_match = incorp_state_re.findall(metadata_str)[0]
# Saving state of incorporation
company_metadata['incorporation_state'] = incorp_match.strip()
# RegEx for extracting end of fiscal year
fiscal_year_re = re.compile(r'Fiscal Year End:.+?(\d{4})')
# Getting end of fiscal year
fiscal_year_match = fiscal_year_re.findall(metadata_str)[0]
# Saving end of fiscal year (in mm-dd format)
fy_formatted = fiscal_year_match[0:2] + '-' + fiscal_year_match[2:]
company_metadata['fiscal_year_end'] = fy_formatted
return company_metadata
|
996,566 | 17cddf1d7193db09d63d0cd2497c58bed7d95025 | enroll = []
temp = {}
with open('enrollment.json') as f:
data = json.load(f)
for name in face_names:
temp["Name"] = name
temp["Enrollment"] = data[name]
enroll.append(temp)
temp = {}
payLoad = dict()
payLoad["data"] = enroll
r = requests.post('http://192.168.1.106:3000/upload',
json=payLoad)
print(enroll) |
996,567 | 5ec7319ba4f228b1d1cdf158ca9a3f90b2bc6fd9 | while(1):
new,dict1={},{}
list1,list2=[],[]
count=0
print("Enter 1)Entering the data 2)Deleting the Element 3)Printing the value 4)mapping two list into dictionary :")
c=int(input())
if(c==1):
n=int(input("Enter the number of elements :"))
for i in range(0,n):
key=input("Enter the {} key : ".format(i+1))
value=input("Enter the {} value ".format(i+1))
dict1[key]=value
for x,y in dict1.items():
print(x,y)
print("***************************************")
elif(c==2):
ch=int(input("Enter the key you wanna to delete :"));
del dict1[ch]
print("***************************************")
for x,y in dict1.items():
print(x,y)
print("***************************************")
elif(c==3):
print("Enter the key for which you wanna to print value :");
d=input()
for j in dict1:
if(j==d):
print(dict1[d])
count=0
if(count==1):
print(d," Not Found")
print("***************************************")
elif(c==4):
k1=int(input("Enter the element for 1st list :"))
for i in range(0,k1):
value=input("Enter the {} value : ".format(i+1))
list1.append(value)
k2=int(input("Enter the element for 2st list :"))
for i in range(0,k2):
value=input("Enter the {} value : ".format(i+1))
list2.append(value)
new=dict(zip(list1,list2))
print(new)
else:
print("Enter the valid input")
|
996,568 | ab3edcda98d336b9016d5ccbc90fb27130c72c3c | from django import forms
class ChangeForm(forms.Form):
'''登录验证表单'''
username = forms.CharField(required=True)
password = forms.CharField(required=True,min_length=5)
|
996,569 | fde0e11b0b37f0631192581cd9be786b926a0311 | from math import log, ceil,exp, floor
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97, 101]
logs = [1.0,1.0] + map( log, xrange(2,101) )
hamNumbers = []
lim = 10**9
degree = 100
loglim = log( lim )
lims = map( lambda x: int( floor( loglim/x ) ) + 1, logs )
def getNextPrime(n):
return primes[primes.index(n)+1]
def combine(prev, p):
if p > degree:
hamNumbers.append(int(round(exp(prev))))
else:
for e in xrange(lims[p]):
next = prev + e * logs[p]
if next <= loglim:
combine(next, getNextPrime(p))
else:
break
return
combine(0,2)
print len(hamNumbers)
|
996,570 | 41f732487a1c1a7c04322391e0b33751882ea35f | ##no.12
print("=======Nomer 12======")
from random import*
x = randint(1, 100)
print("saya menyimpan sebuah angka bulat antara 1 sampai 100. Coba tebak")
while True :
a=int(input("masukan tebakan:>"))
if a<x:
print("angka terlalu kecil. Coba lagi")
elif a>x:
print("angka terlalu besar. Coba lagi")
else :
print("waow! anda benar")
break
|
996,571 | bd9c6e119f16dead0b4d6920eaca754748faebfe | #!/usr/bin/env python3
import time
from threading import RLock
from typing import *
import epd2in9.epd2in9 as epd2in9
from PIL import Image, ImageDraw, ImageFont
class Display2in9:
PIXEL_CLEAR = 255
PIXEL_SET = 0
POS_TIME_1 = (220, 0)
POS_TIME_2 = (340, 30)
def __init__(self):
self.epd = epd2in9.EPD()
self.epd.init(self.epd.lut_full_update)
self.epd.Clear(Display2in9.PIXEL_CLEAR)
self.font = ImageFont.truetype('/usr/share/fonts/truetype/lato/Lato-Semibold.ttf', 19)
# put an update here
temp_image = Image.new('1', (epd2in9.EPD_WIDTH, epd2in9.EPD_HEIGHT), Display2in9.PIXEL_CLEAR)
self.epd.display(self.epd.getbuffer(temp_image))
time.sleep(2)
self.epd.init(self.epd.lut_full_update)
self.image = Image.new('1', (epd2in9.EPD_HEIGHT, epd2in9.EPD_WIDTH), Display2in9.PIXEL_CLEAR)
self.draw = ImageDraw.Draw(self.image)
self.periodic_update_image = Image.new('1', (epd2in9.EPD_HEIGHT, epd2in9.EPD_WIDTH), Display2in9.PIXEL_CLEAR)
self.periodic_update_draw = ImageDraw.Draw(self.periodic_update_image)
self.lock = RLock()
def set_lines_of_text(self, data: List[Tuple[float, str, str]]):
with self.lock:
# full update
self.epd.init(self.epd.lut_full_update)
X0 = 0
X1 = 50
X2 = 225
DY = 20
Y_LINE = 24
Y0 = 25 # start point for data on display
Y_MAX = 127 - DY
WIDTH = 295
HEIGHT = 127
# reset
self.draw.rectangle(((0, 0), (WIDTH, HEIGHT)), fill=Display2in9.PIXEL_CLEAR)
self.draw.text((X0, 0), 'Linie', font=self.font, fill=Display2in9.PIXEL_SET)
self.draw.text((X1, 0), 'Ziel', font=self.font, fill=Display2in9.PIXEL_SET)
self.draw.line(((X0, Y_LINE), (WIDTH, Y_LINE)), fill=Display2in9.PIXEL_SET, width=1)
Y = Y0
for eta_seconds, line, dest in data:
# do not draw if not enough space is left
if Y > Y_MAX:
break
eta_minutes = eta_seconds / 60
self.draw.text((X0, Y), line, font=self.font, fill=Display2in9.PIXEL_SET)
self.draw.text((X1, Y), dest, font=self.font, fill=Display2in9.PIXEL_SET)
self.draw.text((X2, Y), f"{eta_minutes:2.0f} min", font=self.font, fill=Display2in9.PIXEL_SET)
Y += DY
# draw the time here since the periodic update is shitty as fug
self.draw.text(Display2in9.POS_TIME_1, time.strftime('%H:%M:%S'), font=self.font,
fill=Display2in9.PIXEL_SET)
self.epd.display(self.epd.getbuffer(self.image))
def update_time(self):
# with self.lock:
## partial update
# self.epd.init(self.epd.lut_partial_update)
#
# self.periodic_update_draw.rectangle((Display2in9.POS_TIME_1, Display2in9.POS_TIME_2),
# fill=Display2in9.PIXEL_CLEAR)
# self.periodic_update_draw.text(Display2in9.POS_TIME_1, time.strftime('%H:%M:%S'), font=self.font,
# fill=Display2in9.PIXEL_SET)
##image_section = self.periodic_update_image.crop([*Display2in9.POS_TIME_1, *Display2in9.POS_TIME_2])
##self.image.paste(image_section, Display2in9.POS_TIME_1)
# self.epd.display(self.epd.getbuffer(self.image))
time.sleep(0.01)
|
996,572 | 486c53c8e3ecc644af0b32115621f0bab00a1a2b | import time
import random
from discord_webhook import DiscordWebhook, DiscordEmbed
from datetime import datetime
import dateutil.relativedelta
end_contest = datetime.fromisoformat('2020-02-29 21:00:00.000')
dt2 = datetime.fromtimestamp(time.time())
rd = dateutil.relativedelta.relativedelta (end_contest, dt2)
DISCORD_HOOK = [
]
rule = 'https://discordapp.com/channels/637075986726518794/651199972221517824/674832873857220618'
post = [ 'https://cdn.discordapp.com/attachments/673381582551121920/678064291961765898/DSC00430.jpg, ''https://cdn.discordapp.com/attachments/673381582551121920/675935542516645928/FebruaryContestPic1.png',
'https://cdn.discordapp.com/attachments/673381582551121920/676181694587338762/1060950.png', 'https://cdn.discordapp.com/attachments/673381582551121920/676187298001584128/IMG_6378.jpg',
'https://media.discordapp.net/attachments/673381582551121920/681560905661546620/DSC_1046.JPG', 'https://cdn.discordapp.com/attachments/673381582551121920/676849507694149672/20200211_185453.JPG',
'https://cdn.discordapp.com/attachments/673381582551121920/678064810197254154/20200210_165219.jpg', 'https://cdn.discordapp.com/attachments/673381582551121920/681754598406357020/image1.jpg',
'https://cdn.discordapp.com/attachments/673381582551121920/681754599199342612/image3.jpg', 'https://cdn.discordapp.com/attachments/673381582551121920/678064291961765898/DSC00430.jpg',
'https://cdn.discordapp.com/attachments/673381582551121920/682043411557384243/image2.jpg', 'https://cdn.discordapp.com/attachments/673381582551121920/682043408986144769/image0.jpg',
'https://cdn.discordapp.com/attachments/673381582551121920/682050629555847198/IMG_1095.JPEG', 'https://cdn.discordapp.com/attachments/673381582551121920/682050653928816910/IMG_1098.JPEG',
'https://cdn.discordapp.com/attachments/673381582551121920/682054538361831426/20200225_212329.jpg', 'https://cdn.discordapp.com/attachments/673381582551121920/682054548063518768/20200225_213117.jpg' ]
post_link = 'https://discordapp.com/channels/637075986726518794/673381582551121920/674800800429768714'
left = "{0} days and {1} hours".format(rd.days, rd.hours)
desc = "Our 3D Printing contest is almost over!\n\nFirst prize:\n**- 1x EZAbl Kit Sponsored by TH3D Studio**\n(value of 68$USD shipping is free)\n\nAnyone can submit a print!\nYou have {0} left before deadline!\n\n[See full contest rules]({1})\n[See posted prints]({2})\n".format(left, rule, post_link)
#print(desc)
embed = DiscordEmbed(title="3DMeltdown 3D Printing contest reminder!", description=desc, color=0xa21d1d)
# embed.set_author(name='3DMeltdown - Contest reminder!', url='https://discordapp.com/channels/637075986726518794/651199972221517824/674832873857220618')
embed.set_footer(text='3DMeltdown contest friendly reminder every 6h', icon_url="https://cdn.discordapp.com/emojis/673897582375993365.png")
for hook in DISCORD_HOOK:
embed.set_thumbnail(url=random.choice(post))
webhook = DiscordWebhook(url=hook)
webhook.add_embed(embed)
webhook.execute()
time.sleep(15)
|
996,573 | 8f93d3ac6d9d806fbd702cd99aa1fc04621c3300 | from fake_useragent import UserAgent
import requests
import json
import settings
import proxy_manager
import cookie_manager
import urllib.parse
class PixivRequestManager(object):
_rank_page = {
"r-18": "https://www.pixiv.net/ranking.php?mode=daily_r18&p={count}&format=json",
"safe": "https://www.pixiv.net/ranking.php?mode=daily&p={count}&format=json"
}
headers = {"user-agent": UserAgent().random}
session = None
quantity = settings.QUANTITY
proxies = proxy_manager.get_proxy()
cookie_manager = cookie_manager.CookieManager()
def __init__(self):
self.session = requests.session()
self.session.keep_alive = False
def get(self, url):
headers = self.headers.copy()
headers["cookie"] = self.cookie_manager.get_cookie()
headers["host"] = urllib.parse.urlparse(url).hostname
headers["referer"] = "https://www.pixiv.net/"
resp = self.session.get(url, headers=headers, proxies=self.proxies)
resp_cookie = resp.headers.get("set-cookie")
self.cookie_manager.update(resp_cookie)
return resp
def get_rank(self):
print("正在拉取排行榜数据")
count = 1
safe_list = []
for page in range(1, self.quantity + 1, 50):
resp = self.get(url=self._rank_page["safe"].format(count=count))
json_data = json.loads(resp.text)
for item in json_data["contents"]:
if item["rank"] < page + min(self.quantity, 50):
safe_list.append(item["illust_id"])
count += 1
count = 1
r18_list = []
for page in range(1, self.quantity + 1, 50):
resp = self.get(self._rank_page["r-18"].format(count=count))
try:
json_data = json.loads(resp.text)
except:
print("因账号限制,仅获取safe rank。")
break
for item in json_data["contents"]:
if item["rank"] < page + min(self.quantity, 50):
r18_list.append(item["illust_id"])
count += 1
print("排行榜数据拉取完成")
return safe_list, r18_list
|
996,574 | 5b279b1ad8d6b49ebb98b37c28bbee8bccfada7a | from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
#Custom
from tbForms.admin_views import edit_formulario, add_formulario
from tbForms.admin_views import log_unidadesaude, log_unidadesaude_by_form
from tbForms.views import correct_address
from tbForms.views import ffrequired
from tbForms.views import list_forms_by_health_unit
from tbForms.views import edit_form
from tbForms.views import handle_form
from tbForms.views import show_patients
from tbForms.views import list_patients
from tbForms.views import sapem_login
from tbForms.views import sapem_logout
from tbForms.views import showPatientLastRegister
from tbForms.views import showPatientRegisters
from tbForms.views import showPatientAllRegisters
from tbForms.views import homepage_view
from tbForms.views import showFichaConteudo
from tbForms.views import retrieveTriagemName
from tbForms.views import retrieveUS
from tbForms.views import retrieveLastReportByType
from tbForms.views import db2file
from tbForms.views import art_view
from tbForms.views import showARTResult
from tbForms.views import retrieveUnidadesSaude
from tbForms.views import showFieldsXML
from tbForms.views import showSPSSfields
from tbForms.views import select_unidade_saude
from tbForms.views import jsFunctionCreateHeaderFooter
from tbForms.views import retrieveFormName
from tbForms.reports.views import create_configuration_reports
from tbForms.reports.views import view_configuration_reports
from tbForms.reports.views import remove_configuration_reports
from tbForms.reports.views import configuration_db2file
from tbForms.reports.views import show_report
from tbForms.reports.views import get_configSettingsXml
from tbForms.reports.views import get_dataXml
from adminplus import AdminSitePlus
admin.site = AdminSitePlus()
admin.autodiscover()
urlpatterns = patterns('',
(r'^custom-media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^admin/forms/formulario/add/$', add_formulario),
(r'^admin/forms/formulario/(\d)/$', edit_formulario),
(r'^admin/', include(admin.site.urls)),
# (r'^admin/unidadesaude/log/$', log_unidadesaude),
(r'^admin/unidadesaude/(?P<healthUnit>\d+)/log/$', log_unidadesaude_by_form),
(r'^FirefoxRequerido/', ffrequired),
(r'^addressService/cep/(\d{5}-\d{3})/$', correct_address),
(r'^showForms/(?P<healthUnit>\d)/$', list_forms_by_health_unit),
(r'^form/(?P<formId>\d+)/(?P<patientId>\d+)/(?P<f>.*)$', handle_form),
(r'^form/edit/(?P<fichaId>\d+)/(?P<f>.*)$', edit_form),
(r'^form/fields/xml/(?P<formId>\d+)/', showFieldsXML),
(r'^form/fields/spss/xml/$', showSPSSfields),
(r'^form/names/(?P<formId>\d+)/$', retrieveFormName),
(r'^ficha/(?P<fichaId>\d+)/$', showFichaConteudo),
(r'^patientLastRegister/(?P<formId>\d+)/(?P<patientId>\d+)/$', showPatientLastRegister),
(r'^registers/(?P<formId>\d+)/(?P<patientId>\d+)/$', showPatientRegisters),
(r'^registers/all/(?P<patientId>\d+)/$', showPatientAllRegisters),
(r'^triagemName/(?P<patientId>\d+)/$', retrieveTriagemName),
(r'^healthCenter/(?P<opt>\w+?)/$', retrieveUS),
(r'^patientLastRegisterByType/(?P<patientId>\d+)/(?P<type>\w+)/$', retrieveLastReportByType),
(r'^patients/$', show_patients),
(r'^listPatients/$', list_patients),
(r'^js/createHeaderFooter/$', jsFunctionCreateHeaderFooter),
(r'^$', homepage_view),
(r'^download/(?P<format>\w+)/$', db2file),
(r'^login/$', sapem_login),
(r'^logout/$', sapem_logout),
(r'^art_image/(?P<formId>\d+)/(?P<patientId>\d+)/$', art_view),
(r'^art/(?P<formId>\d+)/(?P<patientId>\d+)/$', showARTResult),
(r'^unidadesSaude/json/$', retrieveUnidadesSaude),
(r'^unidadesSaude/change/$', select_unidade_saude),
(r'^reports/create/$', create_configuration_reports),
(r'^reports/view/$', view_configuration_reports),
(r'^reports/removeConfig/(?P<configId>\d+)/$', remove_configuration_reports),
(r'^reports/download/(?P<sid>\d+)/(?P<format>\w+)/$', configuration_db2file),
(r'^reports/showReport/(?P<configId>\d+)/$', show_report),
(r'^reports/configSettingXml/(?P<configId>\d+)/$', get_configSettingsXml),
(r'^reports/getData/(?P<configId>\d+)/(?P<formId>\d+)/(?P<variable>\w+)/$', get_dataXml),
)
|
996,575 | aa8d7ab5ba3b4540234a8eb4baecf8a47c096bef | class IOutputEngine(object):
def output(outputString: str): raise NotImplementedError
|
996,576 | 7fea48e28060e22ab8e171c87cb435e148d35294 | #! /usr/bin/env python
#
# Copyright Alex Tomkins 2010
#
import sys
import logging
import locale
import codecs
import time
import asyncore
from optparse import OptionParser
import ConfigParser
from iabot.bot import IAJabberBot
from iabot.jabber.stats import StatsThread
# Ensure we use the right encoding for the terminal
locale.setlocale(locale.LC_CTYPE, '')
encoding = locale.getlocale()[1]
if not encoding:
encoding = 'us-ascii'
sys.stdout = codecs.getwriter(encoding)(sys.stdout, errors='replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr, errors='replace')
# PyXMPP uses `logging` module for its debug output
# applications should set it up as needed
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG) # change to DEBUG for higher verbosity
# Command line config options
parser = OptionParser(usage='%prog conffile', version='%prog 0.1')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments')
config_filename = args[0]
# Load the relevant config file
config_file = ConfigParser.SafeConfigParser()
config_file.read([config_filename])
bot = IAJabberBot(config_file)
bot.connect()
try:
last_idle = time.time()
while True:
asyncore.loop(timeout=1, count=1)
# Try to call idle once every second
if time.time() >= last_idle+1:
bot.idle()
last_idle = time.time()
except KeyboardInterrupt:
print u"disconnecting..."
bot.disconnect()
if bot.stats_thread:
bot.stats_thread.exit()
print u"exiting..."
|
996,577 | 090faca49eddd99e2a825441c5b0c9de849761a9 |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Werkzeug-Raw',
version='0.0.1',
description='Werkzeug meets Raw HTTP',
long_description=long_description,
url='http://pythonhosted.org/Werkzeug-Raw/',
author='Nathan Cahill',
author_email='nathan@nathancahill.com',
license='MIT',
keywords='werkzeug flask http',
py_modules=['werkzeug_raw'],
install_requires=['werkzeug'],
tests_require=['nose', 'coverage'],
)
|
996,578 | 040ed371830fbf3dda4dbd043766c657e089a3fc | # Generated by Django 2.1.15 on 2020-05-27 12:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user_category',
name='user_category',
field=models.CharField(choices=[('S', 'Student'), ('P', 'Parent'), ('T', 'Teacher'), ('M', 'Management')], max_length=1),
),
]
|
996,579 | 0ede1d88049c802871422ae4f141357dfee21cf7 | def f(n, a, p, r):
for i in range(1, n):
t = p + a[i]
if p < 0 and t <= 0:
r += 1 - t
t = 1
elif p > 0 and t >= 0:
r += t + 1
t = -1
p = t
return r
n = int(input())
a = list(map(int, input().split()))
# 最初がプラス
p = a[0]
r = 0
if p <= 0:
r += 1 - p
p = 1
result1 = f(n, a, p, r)
# 最初がマイナス
p = a[0]
r = 0
if p >= 0:
r += p + 1
p = -1
result2 = f(n, a, p, r)
print(min(result1, result2))
|
996,580 | ad131574dff80f662b1d167ee94d0e241c79a29e | from django.shortcuts import render, redirect
from django.views import View
from store.models.customer import Customer
from django.contrib.auth.hashers import check_password
class Login(View):
def get(self, request):
return render(request, 'login.html')
def post(self, request):
errMsg = ''
data = self.request.POST
email = data.get('email')
password = data.get('password')
value = {
'email': email,
}
customer = Customer.get_user_by_email(email=email)
if customer:
if check_password(password, customer.password):
request.session["customer"] = customer.id
return redirect('/')
else:
errMsg = "Password not match !"
value["errMsg"] = errMsg
return render(request, 'login.html', value)
else:
errMsg = "Invalid Email"
value["errMsg"] = errMsg
return render(request, 'login.html', value)
def logout(request):
request.session.clear()
return redirect('/') |
996,581 | f0e4eddeffe520fcde693f85e1f86073d166ed84 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on 01 May 2020
@author: Fleur Couvreux
Modifications:
2020-06-04, R. Roehrig: Add z0 value + some cleaning/formatting
GABLS1 original case definition
From Kosovic and Curry 2000; Cuxart et al 2006; Beare et al 2006
From Kosovic and Curry 2000: The initial conditions,surface cooling rate, and the inversion strength for these simulations were based on the measurements made during BASE on 1 October 1994 from flight number 7
In the baseline simulation, the latitude was 73.8N, the geostrophic wind was set to 8 m s-1, thesurface cooling rate was 0.25 K h-1, the overlying inversion strength was 0.01 K m-1, and the surface roughness was 0.1 m. The baseline case roughness length is higher than the typical roughness length over sea ice in the Arctic ocean. However, due to the limitations of LES resolution, using a significantly lower roughness length would result in an underresolved surface layer.
"""
import os
import sys
sys.path = ['../../utils/',] + sys.path
import netCDF4 as nc
import numpy as np
from Case import Case
################################################
# 0. General configuration of the present script
################################################
lplot = True # plot all the variables
lverbose = False # print information about variables and case
################################################
# 1. General information about the case
################################################
# This is an idealized case so date is arbritrary
# lat/lon are fixed to a lat representative of Arctic conditions
# 8h duration with a constant surface cooling
case = Case('GABLS1/REF',
lat=73,
lon=123.33,
startDate="20000101100000",
endDate="20000101190000",
zorog=0.,
z0=0.1)
case.set_title("Forcing and initial conditions for GABLS1 case - Original definition")
case.set_reference("Beare et al. (2006, BLM), Cuxart et al (2006, BLM), Kosovic and Curry (2000)")
case.set_author("F. Couvreux")
case.set_script("driver_DEF.py")
# time units are expected to be seconds since startDate
t0 = 0 # 10:00 UTC, 1 January 2000
t1 = 32400 # 17:00 UTC, 1 January 2000
################################################
# 2. Initial state
################################################
# Surface pressure
ps = 101320.
case.add_variable('ps',[ps,])
# z (m) theta (K) rt (g kg-1) u (m s-1) v (m s-1)
init = [ 0.0, 265.0, 0.0, 0.0, 0.0,\
2.0, 265.0, 0.0, 8.0, 0.0,\
100.0, 265.0, 0.0, 8.0, 0.0,\
400.0, 268.0, 0.0, 8.0, 0.0,\
700.0, 271.0, 0.0, 8.0, 0.0]
init = np.array(init,dtype=np.float64)
z = init[0::5]
case.add_variable('theta',init[1::5], lev=z,levtype='altitude')
case.add_variable('rt', init[2::5]/1000.,lev=z,levtype='altitude') # converted in kg kg-1
case.add_variable('u', init[3::5], lev=z,levtype='altitude')
case.add_variable('v', init[4::5], lev=z,levtype='altitude')
case.add_variable('height',z,lev=z,levtype='altitude')
# Turbulent Kinetic Energy
ztke = range(0,400+1,10)
nztke = len(ztke)
tke = np.zeros(nztke,dtype=np.float64)
for iz in range(0,nztke):
if ztke[iz] < 250:
tke[iz] = 0.4*(1.-ztke[iz]/250.)**3
else:
tke[iz] = 0.
case.add_variable('tke',tke,lev=ztke,levtype='altitude')
################################################
# 3. Forcing
################################################
# Constant Geostrophic wind across the simulation
ug = np.zeros((2,5),dtype=np.float64)
ug[0,:] = 8.
ug[1,:] = 8.
vg = np.zeros((2,5),dtype=np.float64)
vg[0,:] = 0.
vg[1,:] = 0.
case.add_variable('ug',ug,time=[t0,t1],lev=z,levtype='altitude')
case.add_variable('vg',vg,time=[t0,t1],lev=z,levtype='altitude')
# Surface Forcing
# constant cooling rate 0.25K/hr from 265 K
ts=[265., 264.75, 264.5, 264.25, 264., 263.75, 263.5, 263.25, 263.0, 262.75]
timets=[0., 3600., 7200., 10800., 14400., 18000., 21600., 25200., 28800., 32400.]
case.add_variable('ts',ts,time=timets)
# The following flux-gradient relations are recommended
# du/dz= ∂v=dz u*/(Kz)*(1.+Bm(z/L))
# dtheta/dz= θ*/(Kz)*(1+Bh(z/L))
# K=0.4
# Bm=4.8
# Bh=7.8
# No advection forcing just a geostrophic wind
################################################
# 4. Attributes
################################################
# Radiation schemes are switched off
case.set_attribute("rad_theta","adv")
# Geostrophic wind forcing
case.set_attribute("forc_geo",1)
# Surface flux forcing, wind stress is computed using z0
case.set_attribute("surfaceType","land")
case.set_attribute("surfaceForcing","ts")
case.set_attribute("surfaceForcingWind","z0")
case.set_attribute("z0",0.1)
################################################
# 5. Writing file
################################################
case.write('GABLS1_REF_DEF_driver.nc',verbose=False)
if lverbose:
case.info()
################################################
# 6. Ploting, if asked
################################################
if lplot:
case.plot(rep_images='./images/driver_DEF/',timeunits='hours')
|
996,582 | 05339823adb0f2a136ce20da35eafc981b22cb0b | # http://bigocoder.com/courses/OBLUE01/OBLUE01_LEC15/BLUE_L15P08/statement
import queue
INF = 10E9
class Node:
def __init__(self,id,dist):
self.dist = dist
self.id = id
def __lt__(self,other):
return self.dist <= other.dist
def Prims(s):
pq = queue.PriorityQueue()
pq.put(Node(s,0))
dist[s] = 0
while not pq.empty():
top = pq.get()
u = top.id
visited[u] = True
for neighbor in graph[u]:
v = neighbor.id
w = neighbor.dist
if not visited[v] and w < dist[v]:
dist[v] = w
pq.put(Node(v,w))
n,m = map(int,input().split())
graph = [[] for i in range (n)]
dist = [INF for i in range (n)]
visited = [False for i in range (n)]
for i in range (m):
x,y,r = map(int,input().split())
graph[x-1].append(Node(y-1,r))
graph[y-1].append(Node(x-1,r))
s = int(input())
Prims(s-1)
res = 0
for i in range (n):
if dist[i] != INF:
res += dist[i]
print(res) |
996,583 | b17b5759b06a9251aef0261f08eb8cef4c1251bb | # Plotting results from the pen accelerometer
import serial
import matplotlib.pyplot as plt
import time
arduino = serial.Serial("COM5")
x = []
y = []
for i in range(1000):
line = str(arduino.readline()).decode("utf-8")
arr = line.split(",")
x.append(arr[0])
y.append(arr[1])
time.sleep(0.01)
plt.plot(x, y)
plt.show()
|
996,584 | 983a41b59a5af926d2b49da729c18182f5014f9d | import pandas as pd
dataset = pd.read_csv("data/crabs.csv")
dataset.drop("index", axis=1, inplace=True)
# convert M e F para numeros
dataset['sex'] = dataset['sex'].replace('M',0)
dataset['sex'] = dataset['sex'].replace('F',1)
dataset['sp'] = dataset['sp'].replace('B', 0)
dataset['sp'] = dataset['sp'].replace('O', 1)
dataset.rename(columns={'sp':'class'},inplace=True)
dataset.to_csv("data/base_crabs.csv", index=False)
# Primeira tentativa: truncar os floats para int a bruta
# {for key in dataset.keys():
# dataset[key] = dataset[key].astype(int)
#dataset.to_csv("data/truncint_crabs.csv",index=False)
|
996,585 | bd927465cc07659c1d87263d7841887e1c840fac | """wap to get the parent and process id"""
import os
print(os.getpid())
print(os.getpid())
print(os.getpid())
print('*'*18)
print(os.getppid())
print(os.getppid())
"""
output:
-------
23192
23192
23192
******************
11520
11520
"""
|
996,586 | 6c362f2b5091ecc86a31781df67ef3c136a000e5 | import tensorflow as tf
import numpy as np
class CNN(object):
def __init__(self, input_size, output_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
#placeholders for input, output, dropout
self.input_x = tf.placeholder(tf.float32, [None, input_size], name='input')
self.input_y = tf.placeholder(tf.float32, [None, output_size], name='output')
self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout')
print(self.input_x.shape)
l2_loss = tf.constant(0.0)
#为input增加维度input_x.shape拓宽为[batch_size, input_size, input_channels]
self.input_x_expanded = tf.expand_dims(self.input_x, -1)
#create convolution + maxpool for every layer
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
#convolution layers 卷积核的shape为[filter_size, input_channels, num_filters]
filter_shape = [filter_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W')
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name='b')
cnn = tf.nn.conv1d(
self.input_x_expanded,
W,
stride=1,
padding='VALID',
name='conv'
)
#卷积结果为[batch_size, input_size - filter_size + 1, num_filters]
#apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(cnn, b), name='relu')
#maxpooling layer
pooled = tf.nn.pool(
h,
window_shape=[input_size - filter_size + 1],
pooling_type='MAX',
padding='VALID',
name='max_pool'
)
#pooled.shape:[batch_size, 1, num_filters]
pooled_outputs.append(pooled)
#组合所有的池化之后的特征
num_filters_total = num_filters * len(filter_sizes)
#将列表中的vector在第三个维度上进行整合,shape:[batch_size, 1, num_filters_total]
self.h_pool = tf.concat(pooled_outputs, 2)
#将结果reshape成二维的数组,shape:[batch_size, num_filters_total]
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
#add dropout layer
self.drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
#output layter and predictions
with tf.name_scope('output'):
W = tf.get_variable(
'W',
shape=[num_filters_total, output_size],
initializer=tf.contrib.layers.xavier_initializer()
)
b = tf.Variable(tf.constant(0.1, shape=[output_size]), name='b')
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.drop, W, b, name='scores')
with tf.name_scope('loss'):
self.losses = abs(self.scores - self.input_y)
self.loss = tf.reduce_mean(self.losses)
with tf.name_scope('accuracy'):
correct_error = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.accuracy = l2_loss
#cnn = CNN(input_size=5, output_size=1, filter_sizes=[3,4,5], num_filters=10, l2_reg_lambda=0.0) |
996,587 | ad64c49e1ba6171b81961ba9b29b23b4dd801e78 | import requests
from bs4 import BeautifulSoup
from time import sleep
from random import choice
from termcolor import colored
from csv import DictWriter
base_url="http://quotes.toscrape.com"
def scrape_quotes():
all_quotes=[]
url="/page/1"
while url:
res=requests.get(f"{base_url}{url}")
#print(f"Now Scraping {base_url}{url}")
soup=BeautifulSoup(res.text,"html.parser")
quotes=soup.find_all(class_="quote")
for quote in quotes:
all_quotes.append({
"text":quote.find(class_="text").get_text(),
"author":quote.find(class_="author").get_text(),
"bio-link":quote.find("a")["href"]
})
next_btn=soup.find(class_="next")
url=next_btn.find("a")["href"] if next_btn else None
sleep(1)
return all_quotes
# write quotes to csv file
def write_quotes(quotes ):
with open("quotes.csv","w") as file:
headers=["text","author","bio-link"]
csv_writer = DictWriter(file, fieldnames=headers)
csv_writer.writeheader()
for quote in quotes:
csv_writer.writerow(quote)
quotes=scrape_quotes()
write_quotes(quotes)
|
996,588 | 03ccf1a91411389f00cc766826618780ee279440 | import RPi.GPIO as GPIO
import time
'''
LLCDHelper
'''
class Display:
""" This class encapsulated the Display """
# Define GPIO to LCD mapping
LCD_RS = 7
LCD_E = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18
# Define some device constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
def __init__(self):
self.setup_gpio()
self.initializeLcdDisplay()
def setup_gpio(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(self.LCD_E, GPIO.OUT) # E
GPIO.setup(self.LCD_RS, GPIO.OUT) # RS
GPIO.setup(self.LCD_D4, GPIO.OUT) # DB4
GPIO.setup(self.LCD_D5, GPIO.OUT) # DB5
GPIO.setup(self.LCD_D6, GPIO.OUT) # DB6
GPIO.setup(self.LCD_D7, GPIO.OUT) # DB7
def initializeLcdDisplay(self):
# Initialise display
self.lcd_byte(0x33,self.LCD_CMD) # 110011 Initialise
self.lcd_byte(0x32,self.LCD_CMD) # 110010 Initialise
self.lcd_byte(0x06,self.LCD_CMD) # 000110 Cursor move direction
self.lcd_byte(0x0C,self.LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
self.lcd_byte(0x28,self.LCD_CMD) # 101000 Data length, number of lines, font size
self.lcd_byte(0x01,self.LCD_CMD) # 000001 Clear display
time.sleep(self.E_DELAY)
def lcd_byte(self, bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(self.LCD_RS, mode) # RS
# High bits
GPIO.output(self.LCD_D4, False)
GPIO.output(self.LCD_D5, False)
GPIO.output(self.LCD_D6, False)
GPIO.output(self.LCD_D7, False)
if bits&0x10==0x10:
GPIO.output(self.LCD_D4, True)
if bits&0x20==0x20:
GPIO.output(self.LCD_D5, True)
if bits&0x40==0x40:
GPIO.output(self.LCD_D6, True)
if bits&0x80==0x80:
GPIO.output(self.LCD_D7, True)
# Toggle 'Enable' pin
self.lcd_toggle_enable()
# Low bits
GPIO.output(self.LCD_D4, False)
GPIO.output(self.LCD_D5, False)
GPIO.output(self.LCD_D6, False)
GPIO.output(self.LCD_D7, False)
if bits&0x01==0x01:
GPIO.output(self.LCD_D4, True)
if bits&0x02==0x02:
GPIO.output(self.LCD_D5, True)
if bits&0x04==0x04:
GPIO.output(self.LCD_D6, True)
if bits&0x08==0x08:
GPIO.output(self.LCD_D7, True)
# Toggle 'Enable' pin
self.lcd_toggle_enable()
def lcd_toggle_enable(self):
# Toggle enable
time.sleep(self.E_DELAY)
GPIO.output(self.LCD_E, True)
time.sleep(self.E_PULSE)
GPIO.output(self.LCD_E, False)
time.sleep(self.E_DELAY)
def lcd_string(self,message,line):
# Send string to display
message = message.ljust(self.LCD_WIDTH," ")
self.lcd_byte(line, self.LCD_CMD)
for i in range(self.LCD_WIDTH):
self.lcd_byte(ord(message[i]),self.LCD_CHR)
|
996,589 | fa7fbd736f9e08880c23927311f5b61a1c4d0262 | import os
class Config:
#秘钥
SECRET_KEY = os.environ.get('SECRET_KEY') or 'guess'
#数据库相关配置
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
#邮件发送
MAIL_SERVER = "smtp.googlemail.com"
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = "a694190253@gmail.com"
MAIL_PASSWORD = "sy3523802"
#图片上传字体大小
MAX_CONTENT_LENGTH = 2*1024*1024
#图片保存地址
UPLOADED_PHOTOS_DEST = os.getcwd()+'/app/static/img/'
@staticmethod
def init_app():
pass
#开发环境配置
class DevelopmentConfig(Config):
#数据库地址
SQLALCHEMY_DATABASE_URI = 'mysql://root:123456@localhost:3306/development'
#测试环境配置
class TestingConfig(Config):
SQLALCHEMY_DATABASE_URI = 'mysql://root:123456@localhost:3306/testing'
#生产环境配置
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'mysql://root:123456@localhost:3306/production'
config = {
'development':DevelopmentConfig,
'testing':TestingConfig,
'production':ProductionConfig,
'default':DevelopmentConfig
} |
996,590 | a99d5b1fd58c63bbb0e89a2a99066d93181b087e | import unittest
from werkzeug.exceptions import BadRequest
from unittest import mock
from parameterized import parameterized
from salsa.api import user_accounts as user_accounts_api
from salsa import permission
from tests.unit.api import ApiUnitTestCase, PermissionsTestCase
from tests import SalsaTestCase
from tests.factories import UserAccountFactory, UserRoleFactory
class TestUserAccountsController(ApiUnitTestCase, SalsaTestCase):
def setUp(self):
super(TestUserAccountsController, self).setUp()
self.api = user_accounts_api
self.factory = UserAccountFactory
def test_list_with_name(self):
for _ in range(3):
UserAccountFactory()
matched = UserAccountFactory(name='test_name')
res = self.api.retrieve_list(name=matched.name, token_info=self.user)
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.json), 1)
self.assertEqual(res.json[0]['id'], str(matched.id))
def test_list_with_filter(self):
self._list_with_filter(email='test_user_email_1')
def test_post(self):
user_role = UserRoleFactory()
body = {
'name': 'test_name',
'email': 'test_email@email.com',
'password': 'test_pass',
'extradata': '{"data": "me"}',
'user_role_id': str(user_role.id)
}
expected = body.copy()
expected.pop('password')
self._post_valid(body, expected=expected, use_expected_for_body=True)
def test_post_invalid_duplicate(self):
user_role = UserRoleFactory()
UserAccountFactory(email='test_email@email.com')
body = {
'name': 'test_name',
'email': 'test_email@email.com',
'extradata': '{"data": "me"}',
'password': 'test_pass',
'user_role_id': str(user_role.id)
}
self._post_invalid(body, err='{} already exists')
def test_post_invalid_email(self):
user_role = UserRoleFactory()
body = {
'name': 'test_name',
'email': 'invalid_email',
'extradata': '{"data": "me"}',
'password': 'test_pass',
'user_role_id': str(user_role.id)
}
with self.assertRaises(BadRequest):
self._post_invalid(body, err='Email is invalid')
def test_put(self):
body = {
'name': 'test_name',
'email': 'test_email@email.com'
}
self._put_valid(body)
def test_put_not_found(self):
self._put_not_found({'name': 'name1'})
def test_put_invalid_email(self):
body = {
'name': 'test_name',
'email': 'invalid_email'
}
with self.assertRaises(BadRequest):
self._put_valid(body)
class TestUserAccountsPermissions(PermissionsTestCase, SalsaTestCase):
def setUp(self):
super(TestUserAccountsPermissions, self).setUp()
self.api = user_accounts_api
self.factory = UserAccountFactory
def tearDown(self):
super(TestUserAccountsPermissions, self).tearDown()
def _setup_user_himself_and_roles(self):
self.admin_role = UserRoleFactory(title=permission.ADMIN_PERM_TITLE)
self.normal_role = UserRoleFactory(title=permission.USER_PERM_TITLE)
self.user_himself_inst = UserAccountFactory(user_role=self.normal_role)
self.user_himself = {
'prm': {
'title': permission.USER_PERM_TITLE
},
'usr': {
'id': str(self.user_himself_inst.id)
}
}
# Normal user.. get himself success
# Normal user.. get someone else normal success
# Normal user.. get someone else admin success
@parameterized.expand([
('normal', True),
('admin', True),
('normal', True),
])
def test_get(self, user_type, retrieve_himself):
self._setup_user_himself_and_roles()
if retrieve_himself:
user_try_to_fetch = self.user_himself_inst
else:
if user_type == 'normal':
user_try_to_fetch = UserAccountFactory(user_role=self.normal_role)
elif user_type == 'admin':
user_try_to_fetch = UserAccountFactory(user_role=self.admin_role)
res = self.api.retrieve(user_try_to_fetch.id, token_info=self.user_himself)
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.json['id'], str(user_try_to_fetch.id))
def test_list(self):
self._setup_user_himself_and_roles()
res = self.api.retrieve_list(token_info=self.user_himself)
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 403)
self.assertEqual(res.json['detail'], 'Insufficient permissions')
# Only reserved for admins.. Same as test_list
def test_list_empty(self):
pass
# No auth user.. create normal success
# No auth user.. create admin fail
@parameterized.expand([
('normal', True),
('admin', False),
])
def test_create_no_auth(self, user_type, is_successful):
self._setup_user_himself_and_roles()
body = {
'name': 'name_test',
'email': 'test_email@email.com',
'extradata': '{"data": "me"}',
'password': 'pass_test'
}
if user_type == 'normal':
body['user_role_id'] = str(self.normal_role.id)
elif user_type == 'admin':
body['user_role_id'] = str(self.admin_role.id)
res = self.api.create_no_auth(body=body, token_info={})
if is_successful:
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 201)
self.assertEqual(res.json['email'], body['email'])
else:
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 403)
self.assertEqual(
res.json['detail'], f'Using this role id <{str(self.admin_role.id)}> is not allowed.')
# Normal user.. update himself success
# Normal user.. update someone else normal fail
# Normal user.. update someone else admin fail
@parameterized.expand([
('normal', True),
('admin', False),
('normal', False),
])
def test_update(self, user_type, update_himself):
self._setup_user_himself_and_roles()
if update_himself:
user_try_to_update = self.user_himself_inst
else:
if user_type == 'normal':
user_try_to_update = UserAccountFactory(user_role=self.normal_role)
elif user_type == 'admin':
user_try_to_update = UserAccountFactory(user_role=self.admin_role)
res = self.api.update(user_try_to_update.id,
body={'name': 'new name'},
token_info=self.user_himself)
if update_himself:
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.json['id'], str(user_try_to_update.id))
self.assertEqual(res.json['name'], 'new name')
else:
self.assertIsNotNone(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(
res.json['detail'], f'User account with id {str(user_try_to_update.id)} not found')
if __name__ == '__main__':
unittest.main()
|
996,591 | 96f1bfded8984f8efa5ccbbf9f4fdcefb64de8c6 | # -*- coding: utf-8 -*-
import string
import codecs
from solvertools.util import get_dictfile
from solvertools.phonetic.arpabet import arpa_to_ipa
from collections import defaultdict
def translate_cmu_entry(entry):
text, phonemes = entry.split(' ')
if len(text) > 3 and text[-3] == '(' and text[-1] == ')':
# secondary entry
text = text[:-3]
phonetic = arpa_to_ipa(phonemes)
text = text.replace('-', '')
return text, phonetic
# This transformation is ultimately unnecessary, now that I changed the
# wordlist format. Oh well.
def read_cmu_dict(infile):
phondict = defaultdict(list)
for line in infile:
line = line.strip()
if line.startswith(';;'):
continue
while line[0] not in string.uppercase:
# trim weird punctuation entries
line = line[1:]
if not line: continue
text, phonetic = translate_cmu_entry(line.strip())
if phonetic not in phondict[text]:
phondict[text].append(phonetic)
return phondict
def make_dict():
infile = open(get_dictfile('cmudict.0.7a'))
phondict = read_cmu_dict(infile)
infile.close()
outfile = codecs.open(get_dictfile('phonetic.txt'), 'w',
encoding='utf-8')
keys = phondict.keys()
keys.sort()
for word in keys:
for phon in phondict[word]:
print >> outfile, u"%s,%s" % (word, phon)
print u"%s,%s" % (word, phon)
outfile.close()
if __name__ == '__main__':
make_dict()
|
996,592 | 65e5e3fdae9fe3dea2990ac1b9500f7acb01550b | def origin(messages, index):
j = 0
for j in range(0, len(messages)):
if messages[index] == index:
return index
break
else:
index = messages[index]
if __name__ == "__main__":
with open("MessageOriginsIN.txt", "r") as f:
while True:
s = f.readline()
if s == "":
break
data = s.split(" ")
N = int(data[len(data) - 1])
messages = [int(x) for x in data[:-1]]
print origin(messages, N)
|
996,593 | d5f42748ee1de12c7a07a4e22642e905d362cd5c | # Geral
from functools import partial
# Kivy
from kivy.clock import Clock
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.uix.textinput import TextInput
# Base
from plugins.db import DB
from plugins.ferramentas import Ferramentas
from gerenciador import Gerenciador
db = DB()
ge = Gerenciador()
fe = Ferramentas()
__all__ = ['db', 'ge', 'fe', 'Formulario', 'FormEntradaMascara', 'FormEntrada', 'FormConteudo', 'FormRotulo', 'FormAviso', 'FormBotao', 'FormSeletorOpcoesConteudo', 'FormSeletorOpcao', 'FormSeletor', 'FormSeletorPopupConteudo', 'MenuBotao', 'Menu', 'PesquisaBotao', 'PesquisaResultado', 'PesquisaResultadoConteudo', 'PesquisaResultadoTitulo', 'PesquisaResultadoTituloRotulo', 'PesquisaResultadoComum', 'PesquisaResultadoComumRotulo', 'PesquisaResultadoComumBotao']
# [1. Widgets dos Formulários]:
# Widget Principal
class Formulario(ScrollView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Identificar se Formulário está sendo enviado
self.enviando = False
# Exibir Aviso de Sucesso se o envio for bem sucedido
self.aviso_sucesso = False
# Armazena os dados do Formulário durante o envio
self.dados = {}
# Método responsável por enviar os dados dos Widgets do Formulário
def enviar_dados(self, nome):
# Se existirem Widgets Filhos no Formulário
if (len(self.children) > 0 and self.enviando == False):
self.enviando = True
# Acessando Widgets Filhos do Formulário
form_conteudo = self.children[0]
form_conteudo_itens = form_conteudo.children
# Resetando Dados
self.dados = {}
for i in range(len(form_conteudo_itens)):
widget = form_conteudo_itens[i]
# Se o Widget tiver o método "obter_valor()" e "nome", será adicionado à "self.dados"
try:
if (len(widget.nome) > 0):
self.dados[widget.nome] = widget.obter_valor()
except Exception as e:
pass
# Enviando o Formulário através do método "envio_formulário" de "Gerenciador
ge.envio_formulario(nome, self.dados, self, self.aviso_sucesso)
self.enviando = False
# Reseta as informações dos campos do Formulário
def resetar(self, manter_aviso=False):
# Se existirem Widgets Filhos no Formulário
if (len(self.children) > 0):
# Acessando Widgets Filhos do Formulário
form_conteudo = self.children[0]
form_conteudo_itens = form_conteudo.children
for i in range(len(form_conteudo_itens)):
widget = form_conteudo_itens[i]
# Se o Widget tiver o método "resetar()" ele terá suas informações resetadas
try:
if (manter_aviso == True):
# Se for um Widget "FormAviso"
if (widget.__class__.__name__ != 'FormAviso'):
widget.resetar()
else:
widget.resetar()
except Exception as e:
# Widget não possui o método "resetar()"
pass
# Exibe o AVISO do Formulário
def aviso(self, texto, cor='vermelho'):
# Se existirem Widgets Filhos no Formulário
if (len(self.children) > 0):
# Acessando Widgets Filhos do Formulário
form_conteudo = self.children[0]
form_conteudo_itens = form_conteudo.children
for i in range(len(form_conteudo_itens)):
widget = form_conteudo_itens[i]
try:
# Se for um Widget "FormAviso"
if (widget.__class__.__name__ == 'FormAviso'):
# Executa o método "revelar"
widget.revelar(texto, cor)
break
except Exception as e:
# Widget não é de FormAviso
pass
# TextInput com Máscara
class FormEntradaMascara(TextInput):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Formato da Máscara
self.mascara = ''
def resetar(self):
self.text = ''
def obter_valor(self):
return self.text
# Quando o texto for alterado
def on_text(self, instancia, texto):
# Aplicando método de máscara ao texto
texto = fe.mascara(texto, self.mascara)
# Modificando texto no TextInput para a versão mascarada
self.text = texto
# Mover cursor para o fim do texto
Clock.schedule_once(partial(self.do_cursor_movement, 'cursor_end'))
# TextInput Normal
class FormEntrada(TextInput):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def resetar(self):
self.text = ''
def obter_valor(self):
return self.text
class FormConteudo(BoxLayout):
pass
class FormRotulo(Label):
pass
class FormAviso(ButtonBehavior, BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cores do Aviso
self.cores = {
'vermelho': (.85, .27, .27, 1),
'azul': (.1, .5, .85, 1),
'verde': (.10, .85, .32, 1)
}
def resetar(self):
self.ocultar()
def ocultar(self):
self.texto = ''
self.cor = 0, 0, 0, 0
def revelar(self, texto, cor='vermelho'):
if (len(texto) > 0):
self.texto = texto
self.cor = self.cores[cor]
class FormBotao(ButtonBehavior, Label):
pass
class FormSeletorOpcoesConteudo(BoxLayout):
pass
class FormSeletorOpcao(ButtonBehavior, BoxLayout):
pass
class FormSeletor(ButtonBehavior, BoxLayout):
# Id (key) da opção escolhida
id_escolhido = None
# Texto exibido pelo seletor
texto = StringProperty('')
opcoes = ObjectProperty({})
# Id da opção default
opcoes_base = None
padrao = StringProperty('')
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.backup)
Clock.schedule_once(self.opcoes_via_db)
Clock.schedule_once(self.opcao_padrao)
# Gera um Backup das Informações atuais
def backup(self, *args):
self.base = {
'id_escolhido':self.id_escolhido,
'texto':self.texto,
'opcoes':self.opcoes,
'padrao':self.padrao
}
def opcoes_via_db(self, *args):
if (self.opcoes_base == None):
self.opcoes_base = self.opcoes
self.opcoes = self.opcoes_base
try:
if ('db' in self.opcoes):
db.checar(self.opcoes['db'][0], self.opcoes['db'][1], self.opcoes['db'][2], self.opcoes['db'][3])
resultado = db.resultado
if (len(self.opcoes['extra']) > 0):
self.opcoes = self.opcoes['extra']
else:
self.opcoes = {}
for i in range(len(resultado)):
self.opcoes[str(resultado[i]['id'])] = resultado[i]['valor']
except Exception as e:
print(e)
def resetar(self):
self.id_escolhido = self.base['id_escolhido']
self.texto = self.base['texto']
self.opcoes = self.base['opcoes']
self.padrao = self.base['padrao']
Clock.schedule_once(self.opcao_padrao)
def obter_valor(self):
return self.id_escolhido
def opcao_padrao(self, *args):
if (self.padrao in self.opcoes):
self.id_escolhido = self.padrao
self.texto = self.opcoes[self.padrao]
def opcao_escolhida(self, identificador, texto):
self.id_escolhido = identificador
self.texto = texto
self.popup.dismiss()
def exibir_opcoes(self):
self.opcoes_via_db()
self.conteudo = FormSeletorPopupConteudo()
self.fsoc_scroll = ScrollView(pos_hint = {'center_x': .5, 'center_y': .5}, bar_margin=2, do_scroll_x=False, do_scroll_y=True)
self.fsoc = FormSeletorOpcoesConteudo()
self.fsoc.form_seletor = self
self.fso = []
for i in self.opcoes.items():
ilista = list(i)
ide = ilista[0]
valor = ilista[1]
self.fso.append(FormSeletorOpcao())
self.fso[-1].identificador = ide
self.fso[-1].texto = valor
if (self.id_escolhido == self.fso[-1].identificador):
self.fso[-1].ativo = True
else:
self.fso[-1].ativo = False
self.fsoc.add_widget(self.fso[-1])
self.fsoc_scroll.add_widget(self.fsoc)
self.conteudo.add_widget(self.fsoc_scroll)
self.popup = Popup(title='', separator_height=0, size_hint=(.85, .85), auto_dismiss=True, content=self.conteudo, border=(0, 0, 0, 0), background='imagens/sistema/pixel_transparente.png', background_color=[0, 0, 0, .35])
self.popup.open()
class FormSeletorPopupConteudo(BoxLayout):
pass
# Widgets do Menu
class MenuBotao(ButtonBehavior, Label):
pass
class Menu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.construir_menu)
def construir_menu(self, *args):
op = ge.opcoes_menu()
for i in range(len(op)):
cor = (.85, .85, .85, 1) if (op[i]['tela'] != self.tela.name) else (.1, .5, .85, 1)
cor_borda = (.7, .7, .7, 1) if (op[i]['tela'] != self.tela.name) else (.08, .43, .69, 1)
icone = op[i]['icone'] if (op[i]['tela'] != self.tela.name) else op[i]['icone_i']
mb = MenuBotao()
mb.icone = icone
mb.cor = cor
mb.cor_borda = cor_borda
mb.bind(on_press=partial(self.tela.mudar_tela, op[i]['tela']))
self.ids.opcoes.add_widget(mb)
# Widgets de exibição dos resultados (Tabela)
class PesquisaBotao(ButtonBehavior, Label):
pass
class PesquisaResultado(ScrollView):
pass
class PesquisaResultadoConteudo(BoxLayout):
pass
class PesquisaResultadoTitulo(BoxLayout):
pass
class PesquisaResultadoTituloRotulo(Label):
pass
class PesquisaResultadoComum(BoxLayout):
pass
class PesquisaResultadoComumRotulo(Label):
pass
class PesquisaResultadoComumBotao(ButtonBehavior, Label):
pass |
996,594 | 199ea19d0fb83667d1fa63e183e96de28e9e710a | # try:
# from umihico_commons.google_cloud_vision_api.request import get_text_dict
# except (Exception, ) as e:
# from .umihico_commons.google_cloud_vision_api.request import get_text_dict
#
# from threading import Thread
#
#
|
996,595 | 135053df77c4f559b5c142f6dbb38cfa2eea0860 | # homework assignment section 8-16-2
from chapter08 import printing_functions as pf
pf.sandwich('ham', 'cheddar', 'mustard') |
996,596 | 4c98d9e8933e0174ef95cbc5e5de0ef77d578aaa | # -*- coding: utf-8 -*-
"""health-insurance-cost-predicition (1).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jqmXZi6u-gvmF1m7dUFVM9plXZT8scis
# Prediction of Health Insurance Cost by Linear Regression
### Loading the libraries and modules
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import scipy as sp
import sklearn as sk
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.model_selection import cross_val_score, KFold
from sklearn import model_selection
from sklearn import linear_model
from sklearn.metrics import mean_squared_error,mean_absolute_error
"""### Loading the data"""
from google.colab import files
uploaded = files.upload()
insurance = pd.read_csv('insurance.csv')
insurance.info()
"""#### First, we define a function to distinguish smokers and non-smokers"""
def map_smoking(column):
mapped=[]
for row in column:
if row=="yes":
mapped.append(1)
else:
mapped.append(0)
return mapped
insurance["smoker_norm"]=map_smoking(insurance["smoker"])
nonnum_cols=[col for col in insurance.select_dtypes(include=["object"])]
"""#### Also, we will create a new feature that distinguishes obese and non-obese individuals"""
def map_obese(column):
mapped=[]
for row in column:
if row>30:
mapped.append(1)
else:
mapped.append(0)
return mapped
insurance["obese"]=map_obese(insurance["bmi"])
insurance.head(5)
"""### We now explore the relation between the features given and the insurance costs"""
colnum=len(insurance.columns)-3
fig,ax=plt.subplots(colnum,1,figsize=(3,25))
ax[0].set_ylabel("charges")
p_vals={}
for ind,col in enumerate([i for i in insurance.columns if i not in ["smoker","region","charges","sex_norm"]]):
ax[ind].scatter(insurance[col],insurance.charges,s=5)
ax[ind].set_xlabel(col)
ax[ind].set_ylabel("charges")
plt.show()
corr_vals=[]
collabel=[]
for col in [i for i in insurance.columns if i not in nonnum_cols]:
p_val=sp.stats.pearsonr(insurance[col],insurance["charges"])
corr_vals.append(np.abs(p_val[0]))
print(col,": ",np.abs(p_val[0]))
collabel.append(col)
plt.bar(range(1,len(corr_vals)+1),corr_vals)
plt.xticks(range(1,len(corr_vals)+1),collabel,rotation=45)
plt.ylabel("Absolute correlation")
"""### Apparently, smoking, age and obesity are the factors that contribute the most in the calculation of insurance costs. We will only use those features for our predictions."""
cols_not_reg3=['age', 'obese', 'smoker_norm']
"""We will make our predictions using Linear Regression, for which we will model the relationship between the three variables and insurance costs by fitting a linear equation to observed data. <br>
We will assume that the model for multiple linear regression, given n=3 observations, is : <br>
y = a*x1 + b*x2 + c*x3 + i <br>
where:<br>
y is the health insurance cost <br>
a is the age penalty <br>
b is the obesity penalty, while x2 will accept a value of 1 for obese individuals and 0 for non-obese ones. <br>
c is the penalty to smokers, for which x3 will have a value of 1 <br>
i is the intercept of the equation <br>
### We will make our predictions using K-fold cross validation
In k-fold cross-validation, we create the testing and training sets by splitting the data into **k** equally sized subsets. We then treat a single subsample as the testing set, and the remaining data as the training set. We then run and test models on all **k** datasets, and average the estimates. Let’s try it out with 10 folds and using Linear Regression:
"""
kf=KFold(n_splits=10, random_state=1, shuffle=True)
intercepts=[]
mses=[]
coefs=[]
for train_index, test_index in kf.split(insurance[cols_not_reg3]):
lr=linear_model.LinearRegression()
lr.fit(insurance[cols_not_reg3].iloc[train_index],insurance["charges"].iloc[train_index])
lr_predictions=lr.predict(insurance[cols_not_reg3].iloc[test_index])
lr_mse=mean_squared_error(insurance["charges"].iloc[test_index],lr_predictions)
intercepts.append(lr.intercept_)
coefs.append(lr.coef_)
mses.append(lr_mse)
rmses=[x**.5 for x in mses]
avg_rmse=np.mean(rmses)
avg_intercept=np.mean(intercepts)
age_coefs=[]
obesity_coefs=[]
smoking_coefs=[]
for vals in coefs:
#print vals[0]
age_coefs.append(vals[0])
obesity_coefs.append(vals[1])
smoking_coefs.append(vals[2])
age_coef=np.mean(age_coefs)
obesity_coef=np.mean(obesity_coefs)
smoking_coef=np.mean(smoking_coefs)
print("a: ",age_coef," b: ",obesity_coef," c: ",smoking_coef," intercept: ",avg_intercept)
"""### After we obtain the LR coefficients, we define a function that will automatically predict a insurance cost value given age, obesity and smoking parameters"""
def calculate_insurance(age,obesity,smoking):
y=(age_coef*age)+(obesity_coef*obesity)+(smoking_coef*smoking)+avg_intercept
return y
"""### For example, a 34 year old, obese and smoker individual will have to pay the following price for his insurance:"""
print(calculate_insurance(34,1,1)) |
996,597 | d84b185c56939d66af615cd64e36c63739ab96f9 | #Booleans
print(10 < 8)
print(21 > 12)
print(34 == 22)
#If statements with booleans
a = 200
b = 33
if b > a:
print("b is greater than a")
else:
print("b is not greater than a")
#bool() function allows you to evaluate any value, and give you True or False in return,
print(bool("Hello"))
print(bool(15))
#List/Arrays
thislist =["apple", "banana", "cherry"]
print(thislist)
# Accessing an item in the Array
thislist =["apple", "banana", "cherry"]
print(thislist[1])
# Negative indexing means beginning from the end, -1 refers to the last item, -2 refers to the second last item etc.
thislist =["apple", "banana", "cherry"]
print(thislist[-1])
# grabbing values within the specify range of the array
thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"]
print(thislist[2:5])
#Looping through list
thislist = ["apple", "banana", "cherry"]
for x in thislist:
print(x)
# Use this to check if an item is in the array.
thislist = ["apple", "chess", "triangle"]
if "apple" in thislist:
print("Yes Apple is in this list")
#getting the length of the list
thislist = ["apple", "Brave", "Force"]
print(len(thislist))
#Appending to a list
thislist = ["apple", "Brave", "Force"]
thislist.append("Orange")
print(thislist)
#inserting an item into a specified position
thislist = ["apple", "Brave", "Force"]
thislist.insert(1,"Orange")
print(thislist)
#Remove(), pop() removes items from list
#Make a copy of a list with the copy() method:
thislist = ["apple", "banana", "cherry"]
mylist = thislist.copy()
print(mylist)
#join two list
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
list3 = list1 + list2
print(list3)
# Tuples are ordered list and unchangable they use ()
# Synthax for tuples are similar to list
# Cannot add items to it
#Sets are similar but the list can be changed
# A dictionary is a collection which is unordered, changeable and indexed.
# In Python dictionaries are written with curly brackets,
# and they have keys and values.
# example
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
print(thisdict)
#Create a dictionary that contain three dictionaries:
myfamily = {
"child1" : {
"name" : "Emil",
"year" : 2004
},
"child2" : {
"name" : "Tobias",
"year" : 2007
},
"child3" : {
"name" : "Linus",
"year" : 2011
}
}
print(myfamily)
#Create three dictionaries, than create one dictionary that will contain the other three dictionaries:
child1 = {
"name" : "Emil",
"year" : 2004
}
child2 = {
"name" : "Tobias",
"year" : 2007
}
child3 = {
"name" : "Linus",
"year" : 2011
}
myfamily = {
"child1" : child1,
"child2" : child2,
"child3" : child3
}
print(myfamily)
|
996,598 | 7449ff63b52da9b48764531d4fd9d74538cdac35 | n, l = map(int, input().split())
k = int(input())
a = list(map(int, (input().split())))
li = 0
ri = 10 ** 9
def check(i):
sp_cnt = 0
pre = 0
for j in range(n):
if a[j] - pre >= i and l - a[j] >= i:
sp_cnt += 1
pre = a[j]
if sp_cnt >= k:
return True
return False
while ri - li > 1:
mi = (ri + li) // 2
if check(mi):
li = mi
else:
ri = mi
print(li)
|
996,599 | d5e1c68cad6e6d73914c3b518e20b012b42c5608 | #!/usr/bin/env python3
#
"""Analyze and warn of memory leaks as well as uploading result on test machine."""
"""Usage: python3 ./memory_leak_analyser.py --file memory_info.log """
import argparse
import datetime
import json
import os
import statistics
import time
from enum import Enum
from functools import reduce
import math
import numpy as np
import requests
from elasticsearch import Elasticsearch
ELASTIC_URI = "https://es-xxxx"
WECHAT_URL = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=xxxx"
CONTEXT = {}
TODAY = '{0:%Y-%m-%d}'.format(datetime.datetime.now())
YESTERDAY = datetime.date.today() - datetime.timedelta(days=1)
class Threshold(Enum):
Notify_Peak = 0.001 # Exceeding this value will notify
Excess_Ratio = 0.2 # Rate of excess between two days
Error_Ratio = 0.01 # Error Rate of peak value in different ways
Overshoot_Ratio = 0.25 # Rate of overshoot at one analyser
Invalid_Count = 20 # Number of data less than the number of times is invalid
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--branch', metavar='branch', default='arsenal',
help='test branch for current run')
parser.add_argument('--platform', metavar='platform', default='',
help='override the smoke test platform')
parser.add_argument('--jobName', metavar='jobName', default='',
help='override the smoke test job name')
parser.add_argument('--buildNumber', metavar='buildNumber', default='',
help='override the smoke test build number')
parser.add_argument('--index', metavar='index', default='',
help='override the elastic search index')
parser.add_argument('--file', metavar='file', default='',
help='override the memory usage file of smoke test')
args = parser.parse_args()
date = '{0:%Y_%m}'.format(datetime.datetime.now())
index = "sdk_ng_performance_{}".format(date) if not args.index else args.index
# Save the config into context
CONTEXT['branch'] = "arsenal" # args.branch
CONTEXT['platform'] = "windows" # args.platform if args.platform else platform
CONTEXT['jobName'] = "Smoke_Test_Linux_Analysis_release" # args.jobName
CONTEXT['buildNumber'] = "4060" # args.buildNumber
CONTEXT['index'] = index
CONTEXT['file'] = "memory_info.log" # args.file
pprint("[Index]:", index.upper())
if not CONTEXT['file']:
pprint("No File Input")
return
memory_leak_analyser(CONTEXT['file'])
def memory_leak_analyser(file):
with open(file, "r") as f:
lines = f.readlines()
cache, proc_mem = [], []
for line in lines:
if line.startswith("i420_cache_usage"):
cache.append(int(line.split(":")[1].strip()))
continue
if line.startswith("process_mem_usage"):
proc_mem.append(int(line.split(":")[1].strip()))
continue
upload_to_esearch(
memory_peak_analyser(proc_mem)[1],
memory_overshoot_analyser(cache)[1])
exps = {
"Memory Peak Exception": memory_peak_analyser,
"Memory Overshoot Exception": memory_overshoot_analyser
}
for exp, analyser in exps.items():
if (exp == "Memory Peak Exception" and analyser(proc_mem)[0]) or (
exp == "Memory Overshoot Exception" and analyser(cache)[0]):
notify_wechat(CONTEXT['branch'], CONTEXT['platform'], exp, CONTEXT['jobName'], CONTEXT['buildNumber'])
def notify_wechat(target_branch, target_os, event, job_name, build_number, wechat_url=WECHAT_URL):
send_info = "<font color=\"red\">Smoke Test Memory Leak.</font> Please deal with them as soon as possible.\n " \
">** {gen_report_date} {target_branch} {target_os} **\n " \
">** Type : <font color=\"warning\">{event}</font> ** \n" \
"\n[前往CI查看详情](http://localhost/job/SDK_CI/job/Daily-Test/job/{job_name}/{build_number}/console)\n".format(
gen_report_date=TODAY, target_branch=target_branch, target_os=target_os, event=event, job_name=job_name,
build_number=build_number)
payload = {
"msgtype": "markdown",
"agentid": 1,
"markdown": {
"content": send_info
},
"safe": 0,
"enable_id_trans": 0,
"enable_duplicate_check": 0
}
requests.post(wechat_url, data=json.dumps(payload))
def memory_overshoot_analyser(mem, keywords="cache_usage"):
if len(set(mem)) == 1 and mem[0] == 0:
return (False, mem[0])
tday_avg = statistics.mean(mem)
yday_avg = search_from_esearch(
CONTEXT['branch'], CONTEXT['platform'], keywords)
pprint("[{} Usage]: {}".format(keywords, tday_avg))
try:
if abs((tday_avg - yday_avg) / yday_avg) >= Threshold.Excess_Ratio.value:
return (True, tday_avg)
except ZeroDivisionError:
pass
return (False, tday_avg)
def memory_peak_analyser(mem):
if len(set(mem)) == 1: # If the set elements are the same, it will not be analyzed
return (False, "InvalidValue")
if len(mem) <= Threshold.Invalid_Count.value: # If the data set is too small, it will not be analyzed
return (False, "InvalidValue")
if if_overshoot(mem) or calc_peak(mem) >= Threshold.Notify_Peak.value:
print("[Peak Value]: {}".format(calc_peak(mem)))
return (True, calc_peak(mem))
return (False, calc_peak(mem))
def calc_peak(mem):
"""
Description:
Ridge regression
Note:
The mem need to be converted to a special data structure like [[1.0, 1],[1.0, 2]...]
Website:
http://www.cuijiahua.com/
"""
xArr, yArr = [[1.0, space] for space in list(range(len(mem)))], mem
xMat = np.mat(xArr)
yMat = np.mat(yArr).T
xTx = xMat.T * xMat
if np.linalg.det(xTx) == 0.0:
print("This matrix is singular, cannot do inverse")
return
double_peak, line_pred_avg = (
xTx.I * (xMat.T * yMat))[1, 0], (
line_trend(mem) + line_fit(mem)) / 2 # Regression coefficient
peak = double_peak if abs(line_pred_avg -
double_peak) <= Threshold.Error_Ratio.value else line_pred_avg
return round(peak, 4)
def upload_to_esearch(proc_mem_peak, cache):
pprint('[INFO]: Upload to Elastic Search')
HEADERS = {"Content-Type": "application/json"}
AUTH = ('elastic', 'auth_xxxx')
url = "{}/{}/_doc".format(ELASTIC_URI, CONTEXT['index'])
with requests.Session() as s:
s.auth = AUTH
s.headers.update(HEADERS)
send_info = {
"branch": CONTEXT['branch'],
"platform": CONTEXT['platform'],
"date": TODAY,
"timestamp": int(time.time() * 1000),
"build_num": os.environ.get('BUILD_NUMBER', 0),
"proc_mem_peak": proc_mem_peak,
"cache_usage": cache,
}
try:
res = s.post(url, data=json.dumps(send_info))
except Exception as ex:
pass
print('[INFO]: Upload Done.')
def search_from_esearch(branch, platform, keywords):
pprint('[INFO]: Search From Elastic Search')
es = Elasticsearch(
["https://es-xxxx"],
http_auth=(
"username",
"password"),
scheme="https")
body = {'query': {'bool': {'must': [{'match': {'branch': branch}}, {
'match': {'platform': platform}}, {'match': {'date': YESTERDAY}}]}}}
try:
res = es.search(index=CONTEXT['index'], body=body)
except Exception as ex:
pass
try:
hits = res.get("hits").get("hits")
avg_val = reduce(lambda x, y: x + y,
[int(item.get("_source").get(keywords)) for item in hits]) / len(hits)
except ZeroDivisionError:
pass
print('[INFO]: Search Done.')
return avg_val
def line_fit(mem):
xArr, yArr = list(range(len(mem))), mem
N = float(len(xArr))
sx, sy, sxx, syy, sxy = 0, 0, 0, 0, 0
for elec in range(0, int(N)):
sx += xArr[elec]
sy += yArr[elec]
sxx += xArr[elec] * xArr[elec]
syy += yArr[elec] * yArr[elec]
sxy += xArr[elec] * yArr[elec]
der = (sy * sx / N - sxy) / (sx * sx / N - sxx)
fact = (sy - der * sx) / N
r = abs(sy * sx / N - sxy) / \
math.sqrt((sxx - sx * sx / N) * (syy - sy * sy / N))
return der
def line_trend(mem):
xArr, yArr = list(range(len(mem))), mem
[der, fact] = np.polyfit(xArr, yArr, 1)
return der
def if_overshoot(mem):
qb = len(mem) // 4
mem_avg = sum(mem) / len(mem)
return abs(sum(mem[-qb:]) / qb - mem_avg) / mem_avg >= Threshold.Overshoot_Ratio.value
def pprint(*args, **kwargs):
print(*args, **kwargs, flush=True)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.