index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,100 | fa16f6836e7ae9a359d66d5070d6bdf6d203d3ca | from bs4 import BeautifulSoup
import requests
import os
port = input("Socks listener port: ") # Port that Tor Socks listener working on
direactory = input("Please specify dirlectory: ") # Directory to dump website contents
proxies = {
'http': 'socks5h://127.0.0.1:'+ port,
'https': 'socks5h://127.0.0.1:'+port
}
def create_file(file,code):
try:
if not os.path.exists(direactory):
os.mkdir(direactory) #Creates folder if not exists
file = direactory + "\\" + file #points file to dir.
f = open(file, "w") #open file write mode
f.write(code)
f.close()
except:
return False
def download_page(link):
return requests.get(link,proxies=proxies).text #Downloads specified url content
#GETS CSS FILES AND POINTS IMPORTS IN HTML TO DOWNLOADED
def get_css_files(html,link):
soup = BeautifulSoup(html, 'html.parser')
stylesheets = soup.find_all('link') # gets <link> tags
for css in stylesheets:
if not css.get('type') == 'text/css': #filters nin css type files
continue
css_link = css.get('href') # gets source of css
if css_link[0] == "/": # converts to full if path is relative
css_link = link + css_link[1:]
create_file(get_path_name(css_link,"css"), download_page(css_link)) #downloads creates css file
css['href'] = get_path_name(css_link, "css") # points css href to local file
return soup.prettify() # retuns new version of html
#GETS JS FILES AND POINTS IMPORTS IN HTML TO DOWNLOADED
def get_js_files(html,link):
soup = BeautifulSoup(html, 'html.parser')
scripts = soup.find_all('script') # find <script tags>
for js in scripts:
if js.get('src') == None: # if src is empty
continue
js_link = js.get('src')
if js_link[0] == "/": # converts to full if path is relative
js_link = link + js_link[1:]
create_file(get_path_name(js_link,"js"), download_page(js_link)) #downloads creates js file
js['src'] = get_path_name(js_link, "js")# points js src to local file
return soup.prettify() # retuns new version of html
#GETS IMAGES AND POINTS IMPORTS IN HTML TO DOWNLOADED
def get_images(html,link):
soup = BeautifulSoup(html, 'html.parser')
imgs = soup.find_all('img') # find <img> tags
for img in imgs:
img_link = img.get('src')
if img_link[0] == "/": # converts to full if path is relative
img_link = link + img_link[1:]
download_image(get_path_name(img_link),img_link)
img['src'] = get_path_name(img_link)
return soup.prettify()
def download_image(file, link):
r = requests.get(link, stream=True, proxies=proxies)
if r.status_code == 200:
with open( direactory + "\\" + file, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
def get_path_name(link, extension = "*"):
p_name = link.split("/")[-1].replace("?","").replace("=","") # trims illegal chars
if extension == "*":
return p_name
if len(p_name) < len(extension): #checks extension if not same adds specified ext.
p_name += "." + extension #returns filename
last = p_name[(len(p_name) - len(extension)): ] #checks extension if not same adds specified ext.
if not last == extension:
p_name += "." + extension
return p_name #returns filename
def clone_page(link):
html = download_page(link) # get Index html src
html = get_css_files(html,link) # get update html (downloads and points to local css files)
html = get_js_files(html,link) # get update html (downloads and points to local js files)
html = get_images(html, link)
create_file("index.html",html) #creates index.html with updated html
clone_link = input("Enter Link to clone: ")
clone_page(clone_link)
|
995,101 | 912d31b57545c890e7960d04456eaf232601cfb7 | import unittest
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import src
class DatabaseClassTests(unittest.TestCase):
def test_vault_connection_error(self):
#TODO give some "valid" url
self.fail()
def test_vault_non_existent_database(self):
#TODO test non existent_database
self.fail()
def test_psql_connection(self):
#TODO exec psql connect
print("hi")
test = src.Database()
test.check_valid()
self.assertEqual(test.valid, False)
def test_psql_invalid_vault_url(self):
try:
src.Database(dbname="sdfk", vault_url="localhost", vault_port=8200, token="bla")
except:
#TODO check the err msg
return
else:
self.fail()
def test_psql_invalid_vault_port(self):
try:
src.Database(dbname="sdfk", vault_url="localhost", vault_port="a", token="bla")
except:
#TODO check the error message
return
else:
self.fail()
def test_psql_valid(self):
print(src.Database(dbname="psql", vault_url="http://localhost", token="s.GoR2nisHPeKU1vOaw9hZ5L7h").get_creds())
if __name__ == '__main__':
unittest.main()
|
995,102 | 3e25d2c8c2b494636ac71ea1212bfd83930026f1 | # Edit and move fits
# A module to move and edit the raw FITS files as the become available from the ICS
import numpy as np
import pyfits
import os
import datetime
import re
def movefits(CurrentFileName,DayNumber,current_obs_ra,current_obs_dec,current_obs_az,current_obs_alt,current_epoch):
NEWCARDS = ['RA','DEC','EPOCH','AZ','ALT','TELESCOP']
NEWValues = [current_obs_ra,current_obs_dec,current_epoch,current_obs_az,current_obs_alt,'NMSU 1m']
timestamp = datetime.datetime.now()
rawdirec = '/data-ics/'+DayNumber # Directory for Raw FITS files
pathraw = os.path.abspath(rawdirec) # Path to raw files
MJD5 = int(DayNumber)+55562 # MJD calculted with daynumber
direc = '/data-ql/data/'+str(MJD5) # Directory where edited FITS will be saved
if os.path.exists(direc)!=1:
os.mkdir(direc)
editdirec = '/data-ql/data/'+str(MJD5)+'/1m/' # Directory where edited FITS will be saved
t = editdirec
if os.path.exists(t)!=1:
os.mkdir(t)
pathedit = os.path.abspath(editdirec) # Path for edited FITS
time = str(datetime.datetime.now())
# first extract the value of the checksum from the fits header (pyfits.getval removes
# any checksum or datasum keywords)
f=open(pathraw+'/'+CurrentFileName,'rb')
checksum = None
# only read the first 72 lines (which should be the whole header plus padding)
for p in range(72):
line = f.read(80)
if line[0:8] == 'END ':
break
if line[0:8] == 'CHECKSUM':
checksum = line[11:27]
cs_comment = line[33:80]
f.close()
# open the image with pyfits
#img = pyfits.open(pathraw+'/'+CurrentFileName,do_not_scale_image_data=True)
img = pyfits.open(pathraw+'/'+CurrentFileName,do_not_scale_image_data=True,uint16=True)
print 'checksum: ' +checksum
if checksum != None:
# validate the value of the checksum found (corresponding to DATASUM in pyfits)
# calulate the datasum
ds = img[0]._calculate_datasum('standard')
# add a new CHECKSUM line to the header (pyfits.open removes it) with same comment
print 'updating header CHECKSUM ' + '0'*16 + cs_comment
img[0].header.update("CHECKSUM",'0'*16, cs_comment)
# calulate a new checksum
cs=img[0]._calculate_checksum(ds,'standard')
img[0].header.update("CHECKSUM",cs, cs_comment)
print 'checksum ', checksum
print 'ds ', ds
print 'cs ', cs
if cs != checksum:
print "CHECKSUM Failed for file " + CurrentFileName
# force these to be ints:
# As of August 2013, the ICS writes them both as floats, but the
# FITS standard wants them to be ints.
bscale = int(img[0].header.get('BSCALE',1))
bzero = int(img[0].header.get('BZERO',32768))
del img[0].header['BSCALE']
del img[0].header['BZERO']
img[0].header.update('BSCALE',bscale,after='GCOUNT')
img[0].header.update('BZERO',bzero,after='BSCALE')
strp = re.sub('.fits',"",CurrentFileName) # strip .fits of file name
new = strp + '.fits' # add edit.fits to file name
print 'before'
print img[0].header
for i in range(len(NEWCARDS)):
img[0].header.update(NEWCARDS[i],NEWValues[i],'Taken from 1-meter')
print 'after'
print img[0].header
img[0].header.add_history('FITS file edited'+' '+time)
img.writeto(pathedit+'/'+new, checksum=True)
print 'Done editing',CurrentFileName
return
|
995,103 | e52de144ba1e2ec9e280b6234c4fdcfaf819af67 | """
Fonction recherche, prenant en paramètre un tableau non vide tab (type list) d'entiers et un entier n,
et qui renvoie l'indice de la dernière occurrence de l'élément cherché.
Si l'élément n'est pas présent, la fonction renvoie la longueur du tableau.
"""
def recherche(tab, element):
tmp = ""
if tab == []:
return None
else:
for x in range(len(tab)):
if tab[x] == element:
tmp = x
if tmp == "":
return len(tab)
else:
return tmp
print(recherche([5, 3],3)) |
995,104 | cb99f609054bdeb014c9fcdf8d0f4f6721562d4e | """
========================================================
TEST THE PERCEPTRON CLASSIFIER IN MORE THAN 2 DIMENSIONS
========================================================
"""
#create labels associated with X
def get_y(X):
#create the classification labels
y = np.zeros(N)
#choose condition for label 1
cond1 = X[:, 0]
for i in range(1, N_dim):
cond1 += X[:, i]
idx1 = np.where(cond1 > 0)
#condition for label -1 is just the complement of the label 1 set
idxm1 = np.setdiff1d(np.arange(N), idx1)
#set labels
y[idx1] = 1.0
y[idxm1] = -1.0
return y, idx1, idxm1
import numpy as np
import matplotlib.pyplot as plt
import Perceptron as ptron
plt.close('all')
##########################################################
#generate synthetic LINEARLY SEPERABLE classification data
##########################################################
#number of data points
N = 10000
#number of dimensions (equals number of input nodes here)
N_dim = 50
print('Testing Perceptron using', N_dim, 'input nodes and', N, 'data points.')
#N draws from multivariate normal with mean mu and covariance matrix Sigma
mu = np.zeros(N_dim)
Sigma = np.eye(N_dim)
X = np.random.multivariate_normal(mu, Sigma, size = N)
#get the labels
y, idx1, idxm1 = get_y(X)
##########################
#test the Perceptron class
##########################
#create a Perceptron object
perceptron = ptron.Perceptron(X, y)
#train the perceptron
print('Initial loss =', perceptron.compute_loss())
N_epoch = 100000
print('Training model for', N_epoch, 'epochs...')
perceptron.train(N_epoch, store_loss = True)
print('done.')
print('Trained loss =', perceptron.compute_loss())
##########################################
#validate the trained model on unseen data
##########################################
N_val = 100
X_val = np.random.multivariate_normal(mu, Sigma, size = N_val)
#create the classification labels
y_val, _, _ = get_y(X_val)
loss_val = 0
for i in range(N_val):
#trained prediction
y_hat = perceptron.feed_forward(X_val[i])
if y_hat != y_val[i]:
loss_val += 1
print('Number of validation classification errors=', loss_val)
####################################################################
#compute loss function evolution if value was stored during training
####################################################################
if len(perceptron.loss) > 0:
fig = plt.figure()
ax = fig.add_subplot(111, xlabel='epoch', ylabel='loss')
ax.plot(perceptron.loss)
plt.tight_layout()
plt.show() |
995,105 | c70e3def3edb71cfa46ba929b26e02eaf0cc225d | # coding=utf-8
import datetime
__author__ = 'xbw'
from sqlalchemy import Column, String, create_engine, Integer,DateTime, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
class Admin(Base):
# 表的名字:
__tablename__ = 'admin'
def __init__(self,adminname,password):
self.adminname = adminname
self.password = password
# 表的结构:
id = Column(Integer, primary_key=True,autoincrement=True)
adminname = Column(String(20))
password = Column(String(20))
class Book(Base):
# 表的名字:
__tablename__ = 'book'
def __init__(self,name,count=1):
self.name = name
self.count = count
self.remainder = count
def getContent(self):
return {"bid":self.bid,"name":self.name,"count":self.count,'remainder':self.remainder,'borrow':self.borrow,'reservation':self.reservation,'Unclaimed':self.Unclaimed}
# 表的结构:
bid = Column(Integer, primary_key=True,autoincrement=True)
name = Column(String(45))
count = Column(Integer,default=1)
remainder = Column(Integer,default=1)
borrow = Column(Integer,default=0)
reservation = Column(Integer,default=0)
Unclaimed = Column(Integer,default=0)
reservation_order = Column(Integer,default=0)
# 定义User对象:
class BorrowList(Base):
# 表的名字:
__tablename__ = 'borrowlist'
def __init__(self,userid,bookid,bookname):
self.userid = userid
self.bookid = bookid
self.bookname = bookname
self.borrow_datetime = datetime.datetime.now()
def getContent(self):
return {"blid":self.blid,"userid":self.userid,'bookid':self.bookid,'bookname':self.bookname,'borrow_datetime':self.borrow_datetime.strftime("%Y-%m-%d %H:%M:%S"),'return_datetime':(self.borrow_datetime + datetime.timedelta(days=45)).strftime("%Y-%m-%d %H:%M:%S")}
blid = Column(Integer, primary_key=True,autoincrement=True)
userid = Column(Integer, ForeignKey('user.id'))
bookid = Column(Integer, ForeignKey('book.bid'))
bookname = Column(String(45))
borrow_datetime = Column(DateTime)
class ReservationList(Base):
# 表的名字:
__tablename__ = 'reservationlist'
def __init__(self,userid,bookid,bookname,last_keep_datetime,r_status=0):
self.userid = userid
self.bookid = bookid
self.bookname = bookname
self.reservation_datetime = datetime.datetime.now()
self.last_keep_datetime = last_keep_datetime + datetime.timedelta(days=55)
self.r_status = r_status
def getContent(self):
return {"rlid":self.rlid,'bookid':self.bookid,'bookname':self.bookname,'reservation_datetime':self.reservation_datetime.strftime("%Y-%m-%d %H:%M:%S"),'last_keep_datetime':self.last_keep_datetime.strftime("%Y-%m-%d %H:%M:%S"),'r_status':self.r_status}
# 表的结构:
rlid = Column(Integer, primary_key=True,autoincrement=True)
userid = Column(Integer, ForeignKey('user.id'))
bookid = Column(Integer, ForeignKey('book.bid'))
bookname = Column(String(45))
reservation_datetime = Column(DateTime)
last_keep_datetime = Column(DateTime)
r_status = Column(Integer,default=0)
# 定义User对象:
class User(Base):
# 表的名字:
__tablename__ = 'user'
def __init__(self,username,password):
self.username = username
self.password = password
# 表的结构:
id = Column(Integer, primary_key=True,autoincrement=True)
username = Column(String(20))
password = Column(String(20))
borrowList = relationship("BorrowList")
reservationList = relationship("ReservationList") |
995,106 | 68900d819d08b08bfa6ad81a8dd4159ec327b5e0 | # Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pandas as pd
from beakerx_tabledisplay import TableDisplay, ColumnType, TableDisplayAlignmentProvider
class TestTableDisplayAPI_setAlignmentProviderForType(unittest.TestCase):
def test_should_set_alignment_provider_for_type(self):
# given
df = pd.read_csv(os.path.dirname(__file__) + "/resources/" + 'interest-rates.csv')
table = TableDisplay(df)
# when
table.setAlignmentProviderForType(ColumnType.Double, TableDisplayAlignmentProvider.RIGHT_ALIGNMENT)
table.setAlignmentProviderForType(ColumnType.Integer, TableDisplayAlignmentProvider.LEFT_ALIGNMENT)
table.setAlignmentProviderForType(ColumnType.Boolean, TableDisplayAlignmentProvider.CENTER_ALIGNMENT)
# then
alignment = table.model['alignmentForType']
self.assertEqual(alignment['double'], "R")
self.assertEqual(alignment['integer'], "L")
self.assertEqual(alignment['boolean'], "C")
|
995,107 | 56211561b11ce33e734625c466e48d7fb3bf3537 | import fnmatch
def deployment(change, *options):
message = change["message"]
deploy_strings = ["*PRODUCTION*",
"*STAGING*",
"deploy:"
]
if any(s in message for s in deploy_strings):
change["tags"] = ["deployment"]
return change
def only_releases(change, *options):
if change["type"] == "release":
return change
def remove_auto_commits(change, *options):
message = change["message"]
start_text = ("Scheduled weekly dependency update", "Merge pull request")
if not message.startswith(start_text):
return change
def filter_by_path(change, *options):
if "files" not in change:
return
for file in change["files"]:
for filter in options:
if fnmatch.fnmatch(file, filter):
return change
_FILTERS = {
"deployment": deployment,
"only_releases": only_releases,
"remove_auto_commits": remove_auto_commits,
"filter_by_path": filter_by_path,
}
def filter_out(filters, message):
for filter in filters:
if isinstance(filter, list):
filter, options = filter[0], filter[1:]
else:
options = []
message = _FILTERS[filter](message, *options)
if message is None:
return None
return message
|
995,108 | 6b340f367e2ecf8c3e8778fda38ea01a0499780d | ''' Convert Sorted Array to Binary Search Tree
Easy
1046
104
Favorite
Share
Given an array where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted array: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5'''
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
nums = [-10,-3,0,5,9]
def sortedArrayToBST(nums):
if len(nums) == 0:
return None
else:
mid = int(len(nums) / 2)
root = TreeNode(nums[mid])
root.left = sortedArrayToBST(nums[:mid])
root.right = sortedArrayToBST(nums[mid+ 1:])
return root
def preOrder(root):
if root is None:
return
print(root.val)
preOrder(root.left)
preOrder(root.right)
root = sortedArrayToBST(nums)
preOrder(root)
|
995,109 | 3e9af7aa69391118ec3e7484bde9dc4f6f8a1ea2 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 1.0.1 on Sat Apr 3 16:27:01 2021
#
# (C)By BigSam
import wx
from wx.core import DIRP_DIR_MUST_EXIST
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
import os
import shutil
from PIL import Image
from os.path import join, getsize
__version__ = '0.1'
picfiles = []
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((640, 480))
self.SetTitle("Resize Icon For STM32CubeIDE")
self.panel_1 = wx.Panel(self, wx.ID_ANY)
self.sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.panel_2 = wx.Panel(self.panel_1, wx.ID_ANY)
self.sizer_1.Add(self.panel_2, 0, wx.EXPAND, 0)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
label_1 = wx.StaticText(self.panel_2, wx.ID_ANY, "STM32CubeIDE Install Root")
sizer_2.Add(label_1, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
self.text_ctrl_1 = wx.TextCtrl(self.panel_2, wx.ID_ANY, "", style=wx.TE_READONLY)
sizer_2.Add(self.text_ctrl_1, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
self.button_1 = wx.Button(self.panel_2, wx.ID_ANY, "Browse")
sizer_2.Add(self.button_1, 0, wx.ALL, 4)
self.panel_3 = wx.Panel(self.panel_1, wx.ID_ANY)
self.sizer_1.Add(self.panel_3, 1, wx.EXPAND, 0)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
self.notebook_1 = wx.Notebook(self.panel_3, wx.ID_ANY)
sizer_3.Add(self.notebook_1, 1, wx.EXPAND, 0)
self.notebook_1_pane_1 = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_pane_1, "Info")
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
self.tcInfo = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "Please use the \"Browse\" button to specify the installation directory of STM32CubeIDE, and then use the \"Perform Adjustments\" button in the lower left corner to modify the size of those icons.\n\nYou can use the \"Revert\" button to restore those icons. (Size: 16*16)\n\n**Note** that every modified icon will have a backup with the prefix \"old_\" added.", style=wx.TE_MULTILINE | wx.TE_READONLY)
sizer_5.Add(self.tcInfo, 1, wx.ALL | wx.EXPAND, 4)
self.panel_4 = wx.Panel(self.panel_1, wx.ID_ANY)
self.sizer_1.Add(self.panel_4, 0, wx.ALL | wx.EXPAND, 0)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
self.btADJ = wx.Button(self.panel_4, wx.ID_ANY, "Perform Adjustments")
self.btADJ.Enable(False)
sizer_4.Add(self.btADJ, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
label_4 = wx.StaticText(self.panel_4, wx.ID_ANY, "New Size")
sizer_4.Add(label_4, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
self.tcNewSizt = wx.TextCtrl(self.panel_4, wx.ID_ANY, "32")
self.tcNewSizt.SetMinSize((48, -1))
sizer_4.Add(self.tcNewSizt, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
sizer_4.Add((0, 20), 1, wx.ALL | wx.EXPAND, 4)
self.btRevert = wx.Button(self.panel_4, wx.ID_ANY, "Revert")
self.btRevert.Enable(False)
sizer_4.Add(self.btRevert, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_4.Add((20, 20), 0, 0, 0)
self.button_2 = wx.Button(self.panel_4, wx.ID_ANY, "Exit")
sizer_4.Add(self.button_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 4)
self.panel_4.SetSizer(sizer_4)
self.notebook_1_pane_1.SetSizer(sizer_5)
self.panel_3.SetSizer(sizer_3)
self.panel_2.SetSizer(sizer_2)
self.panel_1.SetSizer(self.sizer_1)
self.Layout()
self.Centre()
self.Bind(wx.EVT_BUTTON, self.openFileDig, self.button_1)
self.Bind(wx.EVT_BUTTON, self.runAdj, self.btADJ)
self.Bind(wx.EVT_TEXT, self.enableADJ, self.tcNewSizt)
self.Bind(wx.EVT_BUTTON, self.RevertIcon, self.btRevert)
self.Bind(wx.EVT_BUTTON, self.exit_program, self.button_2)
# end wxGlade
def openFileDig(self, event): # wxGlade: MyFrame.<event_handler>
Dlg = wx.DirDialog(None, "STM32CudeIDE's install path ...", "", DIRP_DIR_MUST_EXIST)
ret = Dlg.ShowModal()
if ret == wx.ID_OK:
self.text_ctrl_1.Value = Dlg.GetPath()
self.btADJ.Enable()
self.btRevert.Enable()
self.tcInfo.write('\n====\nOK!\n')
def exit_program(self, event): # wxGlade: MyFrame.<event_handler>
self.Close()
def runAdj(self, event): # wxGlade: MyFrame.<event_handler>
self.tcInfo.write('\nRUN PNG\n-------------------------')
scanPic(self.text_ctrl_1.Value, '.PNG', self.tcNewSizt.Value, self.tcInfo)
self.tcInfo.write('\nRUN GIF\n-------------------------')
scanPic(self.text_ctrl_1.Value, '.GIF', self.tcNewSizt.Value, self.tcInfo)
self.tcInfo.write('\n\n*********** RESIZE END! ******************')
self.btADJ.Enable(False)
def RevertIcon(self, event): # wxGlade: MyFrame.<event_handler>
self.tcInfo.write('\nStart Revert\n-------------------------')
revertIDE_icon(self.text_ctrl_1.Value, '.PNG')
revertIDE_icon(self.text_ctrl_1.Value, '.GIF')
self.tcInfo.write('\n*********** REVERT ICON END! ******************')
def enableADJ(self, event): # wxGlade: MyFrame.<event_handler>
self.btADJ.Enable(True)
# end of class MyFrame
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
# end of class MyApp
def revertIDE_icon(rootdir, pictype):
global picfiles
picfiles = []
srcname = ''
for root, dirs, files in os.walk(rootdir):
for imgfile in files:
if pictype.lower() in imgfile.lower():
picfiles.append(root + '\\' + imgfile)
for imgfile in picfiles:
if 'old_' in imgfile:
if os.path.exists(imgfile.replace('old_', '')):
os.remove(imgfile.replace('old_', ''))
shutil.copyfile(imgfile, imgfile.replace('old_' + srcname, srcname))
def scanPic(rootdir, pictype, newSize, Info_out):
global picfiles
picfiles = []
for root, dirs, files in os.walk(rootdir):
for imgfile in files:
if pictype.lower() in imgfile.lower():
picfiles.append(root + '\\' + imgfile)
Info_out.write("\n\r\n\r Files Count: " + str(len(picfiles)) + "\n\r==========================\n\r" + "\n\r".join(picfiles))
checkSize(pictype.replace('.', ''), newSize)
def checkSize(pictype, newSize):
global picfiles
isOK = True
srcname = ''
for imgfile in picfiles:
if os.path.exists(imgfile):
srcname = os.path.basename(imgfile)
if os.path.getsize(imgfile)>8:
# fix: PIL.UnidentifiedImageError: cannot identify image file
try:
img = Image.open(imgfile)
except:
img = Image.new('RGBA', (16,16))
else:
img = Image.new('RGBA', (16,16))
if img.format != None:
if img.size==(16,16):
if 'old_' in imgfile:
if os.path.exists(imgfile.replace('old_', '')):
os.remove(imgfile.replace('old_', ''))
else:
shutil.copyfile(imgfile, imgfile.replace(srcname, 'old_' + srcname))
newSize = int(newSize)
img = img.resize((newSize, newSize), Image.LANCZOS)
img.convert('RGBA')
if 'old_' in imgfile:
imgfile = imgfile.replace('old_', '')
img.save(imgfile, pictype)
img.close()
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop() |
995,110 | ce8a28431d8f6cf56d46573e28e87d44a29b67b2 | # Write a program that displays a table of the Celsius temperatures 0 through 20
# and their Fahrenheit equivalents. The formula for converting a temperature
# from Celsius to Fahrenheit is F = (9/5)C + 32
# Your program must use a loop to display the table
print('Celsius\tFahrenheit')
print('-------------------')
for celsius in range(21):
fahrenheit = ((9/5) * celsius) + 32
print(celsius, '\t', format(fahrenheit, '.1f'))
|
995,111 | e83ce18439b38731a41137098bdb170ef86c7979 | """Plot clustered data.
"""
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from ad_sensitivity_analysis.plot.aux_functions import get_save_name
from ad_sensitivity_analysis.plot.latexify import parse_word
from ad_sensitivity_analysis.plot.latexify import mappings as latex_mappings
def parse_axis_name(in_p, out_p, reduce_name):
"""
Parameters
----------
in_p
out_p
reduce_name
Returns
-------
Two strings, one for the
"""
if in_p[0] == "d" and in_p != "deposition":
if " " in in_p:
in_p_parse = in_p.split()[0]
reduce_name = in_p.split()[1] + " "
else:
in_p_parse = in_p
data_name = f"{reduce_name}d{out_p}/{in_p}"
label = (
reduce_name
+ r"$\partial$"
+ parse_word(out_p)
+ f"/{parse_word(in_p_parse)}"
)
elif in_p in latex_mappings:
data_name = f"{reduce_name}{in_p}"
label = f"{reduce_name}{parse_word(in_p)}"
else:
data_name = in_p
label = reduce_name + in_p
return data_name, label
# pylint: disable=too-many-arguments, too-many-locals
def plot_cluster_data(
data,
in_p_x,
out_p_x,
in_p_y,
out_p_y,
reduce_name,
logx,
logy,
width,
height,
font_scale,
title,
save_path,
latex,
save,
dot_size,
):
"""
Plot clustered data as either histogram or scatterplot.
Parameters
----------
data : pandas.DataFrame
DataFrame generated using get_cluster().
in_p_x
out_p_x
in_p_y
out_p_y
reduce_name : string
Name prepended to the columns. Should relate to the reduction
applied to the dataset, such as "avg" or "rank" in get_cluster().
logx
logy
width
height
font_scale
title
save_path
latex
save
dot_size
Returns
-------
matplotlib.axes.Axes created using seaborn plot function.
"""
sns.set(rc={"figure.figsize": (width, height), "text.usetex": latex})
fig = Figure()
# pylint: disable=no-member
ax = fig.subplots()
x, x_label = parse_axis_name(in_p_x, out_p_x, reduce_name)
y, y_label = parse_axis_name(in_p_y, out_p_y, reduce_name)
histogram = (in_p_x == in_p_y and out_p_x == out_p_y) or (
in_p_x == in_p_y and "/" not in x
)
if histogram:
palette = "tab10"
if len(set(data["cluster"])) > 10:
palette = "tab20"
sns.histplot(
data=data,
x=x,
hue="cluster",
palette=palette,
multiple="stack",
bins=100,
log_scale=(logx, logy),
ax=ax,
)
else:
sns.scatterplot(
data=data,
x=x,
y=y,
hue="cluster",
palette="tab10",
s=dot_size,
ax=ax,
)
if logx and not histogram:
if np.nanmin(data[x]) < 0:
linthresh = np.nanmin(np.abs(data[x].where(data[x] != 0)))
ax.set_xscale("symlog", linthresh=linthresh)
else:
ax.set_xscale("log")
if logy and not histogram:
if np.nanmin(data[y]) < 0:
linthresh = np.nanmin(np.abs(data[y].where(data[y] != 0)))
ax.set_yscale("symlog", linthresh=linthresh)
else:
ax.set_yscale("log")
ax.tick_params(
axis="both",
which="major",
labelsize=int(10 * font_scale),
)
_ = ax.set_title(title, fontsize=int(12 * font_scale))
ax.set_xlabel(x_label, fontsize=int(11 * font_scale))
ax.set_ylabel(y_label, fontsize=int(11 * font_scale))
legend = ax.get_legend()
legend.set_title("cluster", prop={"size": int(11 * font_scale)})
plt.setp(legend.get_texts(), fontsize=int(10 * font_scale))
ax.yaxis.get_offset_text().set_fontsize(int(11 * font_scale))
ax.xaxis.get_offset_text().set_fontsize(int(11 * font_scale))
# You may use the following line to remove the offset label if needed.
# ax.xaxis.get_offset_text().set(alpha=0)
if save:
try:
save_name = get_save_name(save_path)
ax.figure.savefig(save_name, bbox_inches="tight", dpi=300)
except IOError:
save_path = f"Could not save to {save_path}. Did you forget the filetype?"
save_path = None
save = False
return fig
|
995,112 | f6f689ce823d72e1c215479ddf3dc69ed23b498a | __all__ = ['factor_db']
from yearonedb import factor_db
|
995,113 | e0d5c1fd5928328d76c5336b1fd73c733cd9ef59 | # coding: utf-8
import lglass.rpsl
import lglass.database.base
import urllib.parse
import netaddr
@lglass.database.base.register("cidr")
class CIDRDatabase(lglass.database.base.Database):
""" Extended database type which is a layer between the user and another
database. It performs CIDR matching and AS range matching on find calls. """
# TODO reimplement this using a trie
range_types = {"as-block"}
cidr_types = {"inetnum", "inet6num", "route", "route6"}
perform_range = True
perform_cidr = True
range_slice = slice(None)
cidr_slice = slice(None)
def __init__(self, db, **kwargs):
self.database = db
self.__dict__.update(kwargs)
def get(self, type, primary_key):
return self.database.get(type, primary_key)
def find(self, primary_key, types=None):
objects = []
found_objects = set([])
objects.extend([o for o in self.database.find(primary_key, types=types)
if o.spec not in found_objects])
found_objects = set([obj.spec for obj in objects])
if self.perform_cidr:
objects.extend([o for o in self.find_by_cidr(primary_key, types)
if o.spec not in found_objects][self.cidr_slice])
found_objects = set([obj.spec for obj in objects])
if self.perform_range:
objects.extend([o for o in self.find_by_range(primary_key, types)
if o.spec not in found_objects][self.range_slice])
found_objects = set([obj.spec for obj in objects])
return objects
def find_by_cidr(self, primary_key, types=None):
cidr_types = self.cidr_types
if types:
cidr_types = cidr_types & set(types)
try:
address = netaddr.IPNetwork(primary_key)
except (ValueError, netaddr.core.AddrFormatError):
return []
objects = []
for supernet in address.supernet():
supernets = self.database.find(str(supernet), types=cidr_types)
for _supernet in supernets:
objects.append((supernet.prefixlen, _supernet))
return (obj[1] for obj in sorted(objects, key=lambda obj: obj[0], reverse=True))
def find_by_range(self, primary_key, types=None):
range_types = self.range_types
if types:
range_types = range_types & set(types)
try:
primary_key = int(primary_key.replace("AS", ""))
except ValueError:
return []
objects = []
for type, _primary_key in self.list():
if type not in range_types:
continue
obj_range = tuple([int(x.strip()) for x in _primary_key.split("/", 2)])
if len(obj_range) != 2:
continue
if primary_key >= obj_range[0] and primary_key <= obj_range[1]:
objects.append((obj_range[1] - obj_range[0], self.get(type, _primary_key)))
return (obj[1] for obj in sorted(objects, key=lambda obj: obj[0], reverse=True))
def save(self, object):
self.database.save(object)
def delete(self, type, primary_key):
self.database.delete(type, primary_key)
def list(self):
return self.database.list()
def __hash__(self):
return hash(self.database)
@classmethod
def from_url(cls, url):
self = cls(None)
if url.query:
query = urllib.parse.parse_qs(url.query)
if "range-types" in query:
self.range_types = set(query["range-types"][-1].split(","))
if "cidr-types" in query:
self.cidr_types = set(query["cidr-types"][-1].split(","))
if "range-slice" in query:
self.range_slice = _str_to_slice(query["range-slice"][-1])
if "cidr-slice" in query:
self.cidr_slice = _str_to_slice(query["cidr-slice"][-1])
return self
def _str_to_slice(string):
if not string:
return slice(None)
tokens = []
for n in string.split(":"):
try:
tokens.append(int(n))
except ValueError:
tokens.append(None)
if len(tokens) == 1:
return slice(*tokens)
elif len(tokens) == 2:
return slice(*tokens)
elif len(tokens) == 3:
return slice(*tokens)
|
995,114 | 7578334934392e592229137cca64aee2cebbc3f4 | class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
def cal(n):
ans = 0
while n != 0:
ans += (n % 10)**2
n //= 10
return ans
dic = {}
while n not in dic:
if n == 1:
return True
else:
dic[n] = "add"
n = cal(n)
return False
|
995,115 | fbde879416a003dfa44105eeae96246114714fc6 | # Generated by Django 2.0.1 on 2018-01-31 10:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gate', '0005_auto_20180131_0959'),
]
operations = [
migrations.AlterField(
model_name='qrcode',
name='qrcode_requires_identification',
field=models.BooleanField(default=False, help_text='qrcode_requires_identification', verbose_name='qrcode_requires_identification verbose_name'),
),
migrations.AlterUniqueTogether(
name='paperticket',
unique_together={('batch', 'batch_line')},
),
]
|
995,116 | a64225c6cd3ebf71763cdfcc934f5acb95da6670 | # -*- coding: utf-8 -*-
# Part of Inceptus ERP Solutions Pvt.ltd.
# See LICENSE file for copyright and licensing details.
from odoo import models, fields, api, _, SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
import time
class POSConfig(models.Model):
_name = 'pos.config'
_inherit = ["pos.config", "ies.base"]
allow_coupon_reedem = fields.Boolean("Reeedem Coupon on Giftcard?",
help='Allow coupons reedem on the giftcard? It requires a POS Giftcard module')
|
995,117 | 28419c4eca5138553f75660ef80f4d8044ab82b7 | # This test requires CPython3.5
try:
b'' % ()
except TypeError:
print("SKIP")
raise SystemExit
print(b"%%" % ())
print(b"=%d=" % 1)
print(b"=%d=%d=" % (1, 2))
print(b"=%s=" % b"str")
print(b"=%r=" % b"str")
|
995,118 | 1c7603d98cc162f4efe64322361cd31bc557a4b8 | from __future__ import print_function
from LinearTransform import LinearTransform
from omg import *
import sys
from PIL import Image, ImageDraw
def drawmap(wad, width, format):
xsize = width - 8
i = 77
for level in wad.maps:
edit = MapEditor(wad.maps[level])
xmin = ymin = 32767
xmax = ymax = -32768
for v in edit.vertexes:
xmin = min(xmin, v.x)
xmax = max(xmax, v.x)
ymin = min(ymin, -v.y)
ymax = max(ymax, -v.y)
scale = xsize / float(xmax - xmin)
xmax = int(xmax * scale)
xmin = int(xmin * scale)
ymax = int(ymax * scale)
ymin = int(ymin * scale)
for v in edit.vertexes:
v.x = v.x * scale
v.y = -v.y * scale
im = Image.new('RGB', ((xmax - xmin) + 8, (ymax - ymin) + 8), (0, 0, 0))
draw = ImageDraw.Draw(im)
edit.linedefs.sort(key=lambda a: not a.two_sided)
for line in edit.linedefs:
p1x = edit.vertexes[line.vx_a].x - xmin + 8
p1y = edit.vertexes[line.vx_a].y - ymin + 8
p2x = edit.vertexes[line.vx_b].x - xmin + 8
p2y = edit.vertexes[line.vx_b].y - ymin + 8
color = (255, 255, 255)
if line.two_sided:
color = (255, 255, 255)
if line.action:
color = (255, 255, 255)
draw.line((p1x, p1y, p2x, p2y), fill=color)
del draw
im.save(str(i) + "." + format.lower(), format)
i += 1
# import psyco
# psyco.full()
def draw_scale(draw, LT, color):
(A_px, A_py) = LT.MapToScreen(LT.right - 256, LT.top)
(B_px, B_py) = LT.MapToScreen(LT.right - 128, LT.top)
(C_px, C_py) = LT.MapToScreen(LT.right, LT.top)
(D_px, D_py) = LT.MapToScreen(LT.right - 256, LT.top - 128 / 2)
(E_px, E_py) = LT.MapToScreen(LT.right - 128, LT.top - 128 / 4)
(F_px, F_py) = LT.MapToScreen(LT.right, LT.top - 128 / 2)
draw.line((A_px, A_py, C_px, C_py), fill=color) # A -> C
draw.line((A_px, A_py, D_px, D_py), fill=color) # A -> D
draw.line((B_px, B_py, E_px, E_py), fill=color) # B -> E
def draw_line(draw, p1x, p1y, p2x, p2y, color):
draw.line((p1x, p1y, p2x, p2y), fill=color)
if (len(sys.argv) < 5):
print("\n Omgifol script: draw maps to image files\n")
print(" Usage:")
print(" drawmaps.py source.wad pattern width format\n")
print(" Draw all maps whose names match the given pattern (eg E?M4 or MAP*)")
print(" to image files of a given format (PNG, BMP, etc). width specifies the")
print(" desired width of the output images.")
else:
print("Loading %s..." % sys.argv[1])
inwad = WAD()
inwad.from_file(sys.argv[1])
width = int(sys.argv[3])
format = sys.argv[4].upper()
drawmap(inwad, width, format)
|
995,119 | f155daba149e34aab14e1eb39bfcb5002f7a9e13 | import cx_Oracle
con = cx_Oracle.connect("ankit123/ankit")
cur = con.cursor()
print("Select from the below options ")
print("---------------------------------------- ")
print("Press 1 for Max Likes")
print("Press 2 for Min Likes")
print("Press 3 for Music Pictures")
print("Press 4 for Popular Tag")
print("Press 5 for Most Liked User")
print("Press 6 for Old Tagging")
print("Press 7 to delete inactive users")
print("---------------------------------------- ")
choice = int(input("Press the appropriate key depending on your choice \n"))
if (choice == 1):
cur.execute("SELECT PIC_ID FROM PICTURE WHERE LIKES=(SELECT MAX(LIKES) FROM PICTURE)")
for i in cur.fetchall():
print("The picture id with maximum likes is:",i[0])
elif (choice == 2):
cur.execute("SELECT PIC_ID FROM PICTURE WHERE LIKES=(SELECT MIN(LIKES) FROM PICTURE)")
print("The picture id with minimum likes are:")
for i in cur.fetchall():
print(i[0])
elif (choice == 3):
cur.execute("SELECT PIC_ID FROM PICTURE WHERE TAGS='MUSIC'")
print("The picture id related to music are:")
for i in cur.fetchall():
print(i[0])
elif (choice == 4):
cur.execute("SELECT TAGS FROM PICTURE GROUP BY TAGS HAVING COUNT(TAGS)=(SELECT MAX(MYCOUNT) FROM (SELECT COUNT(TAGS) MYCOUNT FROM PICTURE GROUP BY TAGS))");
print("The most popular tag is:")
for i in cur.fetchall():
print(i[0])
elif (choice == 5):
cur.execute("SELECT FNAME,LNAME FROM USERS WHERE USER_ID=(SELECT USER_ID FROM PICTURE WHERE LIKES=(SELECT MAX(LIKES) FROM PICTURE))");
print("The user who has been liked most:")
for i in cur.fetchall():
print(i[0],i[1])
elif (choice == 6):
cur.execute("SELECT PIC_ID,TAGS FROM PICTURE WHERE (SYSDATE-DATE_POSTED) > 1095");
print("The user with a picture tag of more than 3 years is:")
for i in cur.fetchall():
print(i[0],i[1])
elif (choice == 7):
cur.execute("DELETE FROM PICTURE WHERE (SYSDATE-DATE_POSTED) > 365")
print("The rest of the users are:")
for i in cur.fetchall():
print(i)
else:
print("Sorry wrong choice")
con.close() |
995,120 | b2cad34733fff19ed031518ba5002f689e41ac26 | import pytest
@pytest.fixture
def supply_AA_BB_CC():
aa=25
bb=35
cc=45
return [aa,bb,cc]
@pytest.fixture
def supply_url():
return "https://reqres.in/api"
|
995,121 | 668abf1dea6b7906f8ceb3d6ab8d8cd6050f8966 | # Script to rename sound files according to the era the tech is in and to the name we actually
# see when ingame. FUTURE_TECH -> 14_simulation_awareness, etc
# NOTE: THIS ASSUMES THAT VANILLA .MP3 FILES ARE THOSE FOLLOWING THE "TECH_TECHNAME" FORMAT
# It won't catch techs that are pointed to a nonexistent mp3 file following the "Tech_*" format
import os
import glob
import sys
import lxml.etree as ET
from Common import load_tree
# RUN SCRIPT WITH "-rename" to automatically rename entries in the defines, sounds folder
actually_rename = bool(str(sys.argv[-1]) == '-rename')
path_assets_folder = '../../../Assets/'
path_mp3_files = '../../../Assets/Sounds/Tech/'
# Substrings from foldernames to not search through. Mormon is really messy, screws up script.
ignore_list = ['(unloaded)', 'Mormon']
# Prefixes to order techquotes for much easier finding, given possible name changes.
# Alternatively script can be easily modified to put each era audio files into their own folder
# or really whatever other organization scheme someone wants
era_dict = {
'PREHISTORIC' : '01',
'ANCIENT' : '02',
'CLASSICAL' : '03',
'MEDIEVAL' : '04',
'RENAISSANCE' : '05',
'INDUSTRIAL' : '06',
'ATOMIC' : '07',
'INFORMATION' : '08',
'NANOTECH' : '09',
'TRANSHUMAN' : '10',
'GALACTIC' : '11',
'COSMIC' : '12',
'TRANSCENDENT' : '13',
'FUTURE' : '14'}
# desired tech_dict structure:
# 'TECH_NAME' : [ *LIST* (singleplayer) , *LIST* (multiplayer) , 'ERA' , ['TXT_KEY_TECH_NAME','Tech Name'] ]
# *LIST* = ['AS2D_TECH_...' , 'SND_TECH_...' , 'Sounds/Tech/...']
tech_dict = {}
# helper functions to slightly reduce retyping
def find_text(element, schema, child_element):
text = element.find(f"{schema}{child_element}").text
return text
# To find relevant xml files in modules, elsewhere
def search_for_xml_ending(ending):
term = f"{path_assets_folder}**/*{ending}"
print(f"Recursive globbing for {term} please hold...")
globreturn = glob.glob(term, recursive=True)
print(f"Found {len(globreturn)} results")
# Ignore stuff matching ignore_list elements
glob_filtered = []
for i in globreturn:
ignoring = False
for j in ignore_list:
if i.find(j) != -1:
ignoring = True
print(f"IGNORING: {i}")
break
if not ignoring:
glob_filtered.append(i)
print(f"{len(glob_filtered)} good results")
return(glob_filtered)
# checks tech_dict to see if structure matches after each import;
# might be some element missing. Assumes TXT_KEY description isn't missing.
def breakcheck(dict_depth, dict_width=4):
err = False
for k, v in tech_dict.items():
if len(v) != dict_width:
print(f"ERROR: Missing entry from: {v}")
err = True
else:
if len(v[0]) != dict_depth:
print("ERROR: Malformatted or missing:")
print(f"{k}, {v}")
err = True
if len(v[1]) != dict_depth:
print("ERROR: Malformatted or missing:")
print(f"{k}, {v}")
err = True
if err:
print(f"These should have depth {dict_depth} in items 0, 1, of {dict_width} items.")
# Stopping here because continuing makes things messy and I don't want to have to deal w/ more error cases
sys.exit()
# debug func
def show_dict(tech_entry=''):
for k, v in tech_dict.items():
if tech_entry == '' or tech_entry == k:
print('##########')
print(f"Tech: {k}")
if type(v) == list:
for i in v:
print(i)
else:
print(v)
# Looks for techs that start with tech_ or TECH_, in mp3 name or 'filename'
def check_vanilla(text):
if len(text)>=17:
if text[:17].upper() == 'SOUNDS/TECH/TECH_':
return True
elif len(text)>=5:
if text[:5].upper() == 'TECH_':
return True
return False
def mp3_missing_query(mp3_filepath, source, miss_list):
# is vanilla tech? Assuming used vanilla techs are formatted in this manner, see note
if check_vanilla(mp3_filepath):
return
# Check if file exists
if not os.path.exists(f"{path_assets_folder}{mp3_filepath}.mp3"):
print(f"Missing mp3 file from {source}: {mp3_filepath}")
miss_list.append(mp3_filepath)
return
need_changing = False
# Sorted/human readable tech name from era, english tech name
def rename_file(filename, era, techname, element, schema, child_element):
techname_replaced = techname.replace(' ', '_')
era_number = era_dict[era]
target_mp3_name = f"{era_number}_{techname_replaced}"
target_filename = 'Sounds/Tech/' + target_mp3_name
if filename == target_filename:
return
if not check_vanilla(filename):
# rename file in xml
print(f"{filename} should be: {target_filename}:")
full_mp3_filename = f"{path_assets_folder}{filename}.mp3"
need_changing = True
if actually_rename:
print(f"Finding: {schema}{child_element} and making: {target_filename}")
element.find(f"{schema}{child_element}").text = target_filename
# rename file in Sounds folder
if os.path.exists(full_mp3_filename):
print(f"Renaming: {full_mp3_filename} to {path_mp3_files}{target_mp3_name}.mp3")
os.rename(full_mp3_filename, f"{path_mp3_files}{target_mp3_name}.mp3")
else:
print(f"Cannot find {full_mp3_filename} to rename.")
else:
print(f"{filename} should be: {target_filename}:")
print(f"Will rename: {schema}{child_element}")
print(f"Will rename: {full_mp3_filename} (if exists) to {path_mp3_files}{target_mp3_name}.mp3")
##############
# loop through techs in *CIV4TechInfos.xml
paths = search_for_xml_ending('CIV4TechInfos.xml')
for path in paths:
_, root, schema = load_tree(path)
for tech_info in root[0].findall(f"{schema}TechInfo"):
tech = find_text(tech_info, schema, 'Type')
if tech == 'TECH_DUMMY' or tech == '':
print(f"Tech skipping: {tech}")
elif tech_dict.get(tech) is not None:
print(f"DUPLICATE ENTRY: {tech}, {path}")
else:
audio2D_single = find_text(tech_info, schema, 'Sound')
audio2D_multip = find_text(tech_info, schema, 'SoundMP')
era = find_text(tech_info, schema, 'Era')[8:]
txt_key = find_text(tech_info, schema, 'Description')
tech_dict[tech] = [[audio2D_single] , [audio2D_multip] , era , [txt_key]]
breakcheck(1)
print('no missing script links or data')
# Acquire english textname
paths = search_for_xml_ending('CIV4GameText.xml')
for path in paths:
_, root, schema = load_tree(path)
for tech_text in root.findall(f"{schema}TEXT"):
tech_tag = find_text(tech_text, schema, 'Tag')
for k, v in tech_dict.items():
if v[3][0] == tech_tag:
tech_name = find_text(tech_text, schema, 'English')
v[3].append(tech_name)
for k, v in tech_dict.items():
if len(v[3]) != 2:
# These are probably religious techs in their module
print(f"Posible error: Missing tag for {v[3]}")
# While looping thu scripts, look for tech scripts that aren't referenced in CIV4TechInfos.
unlinked_scripts = {}
# generate accurate link for CIV4TechInfos -> AudioDefines rather than assuming by name
paths = search_for_xml_ending('Audio2DScripts.xml')
for path in paths:
_, root, schema = load_tree(path)
for script_2D_sound in root.findall(f"{schema}Script2DSound"):
script_ID = find_text(script_2D_sound, schema, 'ScriptID')
if script_ID[:10] == 'AS2D_TECH_':
matched = False
sound_ID = find_text(script_2D_sound, schema, 'SoundID')
for _, v in tech_dict.items():
if script_ID == v[0][0]:
v[0].append(sound_ID)
matched = True
if script_ID == v[1][0]:
v[1].append(sound_ID)
matched = True
if not matched:
unlinked_scripts[script_ID] = [sound_ID]
# # A number of these are from modules or unused quotes from vanilla, but not all.
if len(unlinked_scripts) == 0:
print('No unlinked scripts!')
else:
print('Some of these may be vanilla:')
for i in unlinked_scripts:
print(f"Possible unlinked script: {i}")
# breakcheck if something should be fixed.
breakcheck(2)
print('2D to script seems valid')
# While looping thu defines, look for tech sound files that aren't referenced in CIV4TechInfos.
unlinked_defines = {}
# find and add filenames from AudioDefines
paths = search_for_xml_ending('AudioDefines.xml')
for path in paths:
_, root, schema = load_tree(path)
for sound_data in root[0].findall(f"{schema}SoundData"):
sound_ID = find_text(sound_data, schema, 'SoundID')
matched = False
if sound_ID[:9] == 'SND_TECH_':
mp3_filepath = find_text(sound_data, schema, 'Filename')
for _, v in tech_dict.items():
if v[0][1] == sound_ID:
v[0].append(mp3_filepath)
matched = True
if v[1][1] == sound_ID:
v[1].append(mp3_filepath)
matched = True
# if matching to civ4techinfos fails, check against floating scripts from before
if not matched:
matched_floating = False
for _, v2 in unlinked_scripts.items():
if sound_ID == v2[0]:
v2.append(mp3_filepath)
matched_floating = True
if not matched_floating:
unlinked_defines[sound_ID] = [mp3_filepath]
if len(unlinked_defines) == 0:
print('No unlinked defines!')
else:
for i in unlinked_defines:
print(f"Probable unlinked defines: {i}")
breakcheck(3)
print('script to mp3 seems valid, no one-to-many relations')
missing_mp3s = []
# Now check the actual .mp3 files, to see if any are missing
for k, v in tech_dict.items():
for i in range(2):
mp3_filepath = v[i][2]
mp3_missing_query(mp3_filepath, 'MAIN TECHS', missing_mp3s)
# Check unlinked scripts that have a define:
for k, v in unlinked_scripts.items():
if len(v) > 1:
mp3_filepath = v[1]
mp3_missing_query(mp3_filepath, 'unlinked script-defines', v)
# Check unlinked defines:
for k, v in unlinked_defines.items():
mp3_filepath = v[0]
mp3_missing_query(mp3_filepath, 'unlinked defines', v)
# Searching for unlinked mp3 files:
# ..... todo. Technically can be done by temporarily running the script with different rename_file func.
# Now loop thru AudioDefines again, this time changing things with certainty of no missing links
for path in paths:
tree, root, schema = load_tree(path)
for sound_data in root[0].findall(f"{schema}SoundData"):
sound_ID = find_text(sound_data, schema, 'SoundID')
if sound_ID[:9] == 'SND_TECH_':
for _, v in tech_dict.items():
# check both singleplayer and multiplayer versions, can differ.
if v[0][1] == sound_ID:
rename_file(v[0][2], v[2], v[3][1], sound_data, schema, 'Filename')
if v[1][1] == sound_ID:
rename_file(v[1][2], v[2], v[3][1], sound_data, schema, 'Filename')
tree.write(path)
if not actually_rename and need_changing:
print('!!!!!!!!!----------!!!!!!!!!!')
print("To do these changes, run again with -rename ")
elif not need_changing:
print('Nothing to rename!')
else:
print('Done organizing names!') |
995,122 | b2d1fe688129941b7c95d44e4fa8582d38fefe34 | import argparse
import os
import subprocess
from bs4 import BeautifulSoup
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert notebook to hugo mardown')
parser.add_argument('--input_nb', action='store', help='path to notebook')
parser.add_argument('--hugo_dir', action='store', help='path to hugo directory')
parser.add_argument('--outfile', action='store', help='relative path to hugo directory')
args = parser.parse_args()
if not os.path.isfile(args.input_nb):
raise Exception('Can NOT open input notebook <{}>'.format(args.input_nb))
meta_file = '{}.meta'.format(args.input_nb)
if not os.path.isfile(meta_file):
raise Exception('Can NOT open input notebook\'s meta-file <{}>'.format(meta_file))
indir = os.path.dirname(args.input_nb)
output = os.path.join(args.hugo_dir, args.outfile)
outdir = os.path.dirname(output)
if not os.path.isdir(outdir):
os.makedirs(outdir)
cmd = 'jupyter nbconvert --to html --template basic {}'.format(args.input_nb)
subprocess.check_output(cmd.split())
out_html = '{}.html'.format(args.input_nb[:-len('.ipynb')])
img_files = []
out_imgs = []
fname, fext = os.path.splitext(os.path.basename(output))
with open(out_html, 'r') as f:
body = f.read()
# # replace link
soup = BeautifulSoup(body, "html.parser")
for a in soup.findAll('a'):
if a['href'].startswith('./') and a['href'].endswith('.ipynb'):
a['href'] = a['href'][:-len('.ipynb')] + '/'
for a in soup.findAll('img'):
img_files.append(os.path.normpath(os.path.join(indir, a['src'])))
out_imgs.append(os.path.normpath(os.path.join(outdir, fname, a['src'])))
body = soup.prettify()
# done with html file, remove it now
os.remove(out_html)
if len(img_files) > 0:
print ('\nStart copying images to hugo dir...\n-----------------------------------')
for a, b in zip(img_files, out_imgs):
if not os.path.isfile(a):
continue
bdir = os.path.dirname(b)
if not os.path.isdir(bdir):
os.makedirs(bdir)
cmd = 'cp -r {} {}'.format(a, b)
subprocess.check_output(cmd.split())
print ('copied {} => {} done'.format(a, b))
print ("\n")
with open(meta_file, 'r') as f:
meta = f.read()
with open(output, 'w') as f:
f.write('{}\n'.format(meta))
f.write('{}'.format(body))
print ('Convert notebook {} using meta {} ==> {}\n=================================\n'.format(args.input_nb, meta_file, output)) |
995,123 | 98fc5dceda491c93d8248f56bf1deb37dd4a8f97 | #!/usr/bin/python3
import os
import pickle
import nltk
import numpy as np
import sys
sys.setrecursionlimit(1000000) #设置为一百万,否则dump数据的时候显示递归层次过多!
# 切分数据集
from sklearn.cross_validation import train_test_split
from keras.utils import np_utils
from keras.models import Sequential,Graph
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.layers.core import Reshape, Flatten , Dense, Dropout, Activation
tagindex = {'B':0, 'E':1, 'M':2, 'S':3 }
indextag = {0:'B', 1:'E', 2:'M', 3:'S'}
# dict的查找速度比List会快一些
word2index = {}
index2word = {}
wordvector = {}
TRAIN_FILE = "../data_dir/icwb2-data/training/msr_pku_lite.utf8"
#TRAIN_FILE = "../data_dir/icwb2-data/training/pku_training.utf8"
#TRAIN_FILE = "../data_dir/icwb2-data/training/msr_pku_training.utf8"
def wordindex(word):
if not word: return None
if word not in word2index:
len_t = len(word2index)
word2index[word] = len_t
index2word[len_t] = word
return word2index[word]
# 给定的句子,生成对应的训练向量,包含首位的PADING c c c W c c c
# 输入是list状态的单个句子(分词的或者未分词的)
def sent2num(sentence, context = 7):
word_num = []
for item in sentence:
for w in item:
# 文本中的字如果在词典中则转为数字,如果不在则设置为'U
if w in word2index:
word_num.append(word2index[w])
else:
word_num.append(word2index['U'])
# 首尾padding
num = len(word_num)
pad = int((context-1)*0.5) #3 by default
for i in range(pad):
word_num.insert(0,word2index['P'] )
word_num.append(word2index['P'] )
train_x = []
for i in range(num):
train_x.append(word_num[i:i+context])
return train_x
# 给定的句子,生成对应的TAG S B M E
def sent2tag(sentence):
train_tg = []
for item in sentence:
if len(item) == 1:
train_tg.append(tagindex['S'])
continue
train_tg.append(tagindex['B'])
for w in item[1:len(item)-1]:
train_tg.append(tagindex['M'])
train_tg.append(tagindex['E'])
return train_tg
# 根据输入得到标注推断
def predict_num(input_txt, input_num, model):
str_ret = '';
input_num = np.array(input_num)
predict_prob = model.predict_proba(input_num, verbose=False)
predict_lable = model.predict_classes(input_num, verbose=False)
for i , lable in enumerate(predict_lable[:-1]):
# 如果是首字 ,不可为E, M
if i == 0:
predict_prob[i, tagindex['E']] = 0
predict_prob[i, tagindex['M']] = 0
# 前字为B,后字不可为B,S
if lable == tagindex['B']:
predict_prob[i+1,tagindex['B']] = 0
predict_prob[i+1,tagindex['S']] = 0
# 前字为E,后字不可为M,E
if lable == tagindex['E']:
predict_prob[i+1,tagindex['M']] = 0
predict_prob[i+1,tagindex['E']] = 0
# 前字为M,后字不可为B,S
if lable == tagindex['M']:
predict_prob[i+1,tagindex['B']] = 0
predict_prob[i+1,tagindex['S']] = 0
# 前字为S,后字不可为M,E
if lable == tagindex['S']:
predict_prob[i+1,tagindex['M']] = 0
predict_prob[i+1,tagindex['E']] = 0
predict_lable[i+1] = predict_prob[i+1].argmax()
#predict_lable_new = [indextag[x] for x in predict_lable]
#result = [w+'/' +l for w, l in zip(input_txt,predict_lable_new)]
for i in range(len(input_txt)):
str_ret += input_txt[i]
if predict_lable[i] == tagindex['S'] or predict_lable[i] == tagindex['E']:
str_ret += ' '
return str_ret
def build_dl_model():
input_file_str = []
line_num = 0
with open(TRAIN_FILE) as fin:
try:
for each_line in fin:
if not each_line:
continue
line_num += 1
if not (line_num % 2000): print("C:%d" %(line_num))
line_items = each_line.split()
for item in line_items:
for w in item:
wordindex(w) # USE THE SIDE EFFECT, 单个字的vector
input_file_str.append(line_items)
except UnicodeDecodeError as e:
print('Unicode Error! filename=%s, line_num=%d'%(TRAIN_FILE, line_num))
print("训练长度:%d" %(len(input_file_str)))
len_t = len(word2index)
word2index['U'] = len_t
index2word[len_t] = 'U'
word2index['P'] = len_t+1
index2word[len_t+1] = 'P'
len_input_file_str = len(input_file_str)
train_vector = []
train_tag = []
# 再次遍历训练数据
for i_input in range(len_input_file_str):
#for item_j in input_file_str[i_input]:
train_x = sent2num(input_file_str[i_input])
train_g = sent2tag(input_file_str[i_input])
train_vector.extend(train_x)
train_tag.extend(train_g)
print("SIZE VECTOR:%d TAG:%d" %(len(train_vector), len(train_tag)))
train_vector = np.array(train_vector)
train_X, test_X, train_y, test_y = train_test_split(train_vector, train_tag , train_size=0.90, random_state=1)
print(len(train_X), 'train sequences')
print(len(test_X), 'test sequences')
batch_size = 128
maxfeatures = len(index2word)
word_dim = 100
maxlen = 7
hidden_units = 100
nb_classes = 4
Y_train = np_utils.to_categorical(train_y, nb_classes)
Y_test = np_utils.to_categorical(test_y, nb_classes)
print('Stacking LSTM...')
model = Sequential()
model.add(Embedding(maxfeatures, word_dim, input_length=maxlen))
model.add(LSTM(output_dim=hidden_units, return_sequences =True))
model.add(LSTM(output_dim=hidden_units, return_sequences =False))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Train...")
result = model.fit(train_X, Y_train, batch_size=batch_size,
nb_epoch=4, validation_data = (test_X,Y_test), show_accuracy=True)
score = model.evaluate(test_X, Y_test, batch_size=batch_size)
print("Test Score:%d" %(score))
return model
if __name__ == "__main__":
DUMP_FILE = "dump.dat_v1"
if os.path.exists(DUMP_FILE):
print("LOADING DL...")
dump_data = []
with open(DUMP_FILE,'rb', -1) as fin:
dump_data = pickle.load(fin)
word2index = dump_data[0]
index2word = dump_data[1]
dl_model = dump_data[2]
print("DONE!")
else:
print("BUILDING DL...")
dl_model = build_dl_model()
dump_data = []
with open(DUMP_FILE,'wb', -1) as fout:
dump_data.append(word2index)
dump_data.append(index2word)
dump_data.append(dl_model)
pickle.dump(dump_data, fout, -1);
print("DONE!")
temp_txt = '国家食药监总局发布通知称,酮康唑口服制剂因存在严重肝毒性不良反应,即日起停止生产销售使用。'
temp_txt = list(temp_txt)
temp_num = sent2num(temp_txt)
ret = predict_num(temp_txt, temp_num, dl_model)
print(ret)
temp_txt = "首先是个民族问题,民族的感情问题"
temp_txt = list(temp_txt)
temp_num = sent2num(temp_txt)
ret = predict_num(temp_txt, temp_num, dl_model)
print(ret)
|
995,124 | 92fbb5c2b5fc0ee36ee11be06b6ab70ea53f94a1 | # Generated by Django 2.1.7 on 2019-02-25 23:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0011_eventannouncement'),
]
operations = [
migrations.AlterField(
model_name='eventannouncement',
name='event',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='events.Event'),
),
]
|
995,125 | 280b999768614d423a07eba7606a0ebce23ff718 | # -*- coding: utf-8 -*-
from odoo import models, fields
class AccountPaymentOrder(models.Model):
_inherit = 'account.payment.order'
state = fields.Selection(
[
('draft', 'Draft'),
('open', 'Confirmed'),
('generated', 'File Generated'),
('uploaded', 'File Uploaded'),
('cancel', 'Cancel'),
]
)
|
995,126 | 8dbde580be3445dfd71fbfc5e62ba01411a2be25 | #!/usr/bin/env python3
import requests
import json
import urllib
papers_path = 'data/papers.json'
api_endpoint = 'http://ieeexploreapi.ieee.org/api/v1/search/articles'
api_key = '2quujqxvyzwpzztgkud8w2x6'
query_string = urllib.parse.urlencode({
'apikey': api_key,
'format': 'json',
'content_type': 'Conferences, Journals & Magazines',
'querytext': 'Domain Specific Languages',
'start_year': 2010,
'max_records': 100,
})
url = f'{api_endpoint}?{query_string}'
res = requests.get(url=url)
data = json.loads(res.text)
def clean_data(data):
return sorted(map(lambda d: {
'id' : d['id'],
'title': d['title'],
'abstract': d['abstract'],
}, data), key = lambda d: d['id'])
def add_ids(data):
for i,d in enumerate(data):
d['id'] = i
return data
with open(papers_path, 'w+', encoding = 'utf-8') as f:
data = add_ids(data)
json.dump(clean_data(data), f, indent = 2)
|
995,127 | a46e83fcbeb6933a448ad71d661a96d4dd74e49c | from bs4 import BeautifulSoup
import urllib2
import re
from newspaper import Article
import csv
import articleDateExtractor
from geotext import GeoText
press=["https://www.ndtv.com/","http://www.news18.com/"]#"http://timesofindia.indiatimes.com/",
#press=["http://www.news18.com/"]
regexList = [r'www.news18.com/news',r'https://www.ndtv.com/']
count=0
for li in press:
resp = urllib2.urlopen(li)
soup = BeautifulSoup(resp,"html.parser")
#count = 0
news=list()
for link in soup.find_all('a', href=True):
#print link['href']
news.append(link['href'])
count=count+1
print count
#print news[0:50]
#flag=1
for link in news[0:]:
flag=1
for regex in regexList:
if(re.search(regex, link)):#|https://www.ndtv.com/
flag=0
if(flag==1):
news.remove(link)
print len(news)
print news[0:100] |
995,128 | 2576ceba3c37a13394e94ec4b739f72147d8fc88 | # 读取Excel需求通报文件
import os
import sys
import getopt
import xlrd
from datetime import date,datetime
from pandas import DataFrame,Series
import string
DATA_DIR = 'd:/Python/WXRobot/data/' # Excel数据文件存放目录
DATA_FILE = 'BI需求看板.xlsx' # Excel数据文件存放目录
# SHEET1 = '重点关注临时需求(领导关注、集团上报需求、KPI需求)'
def read_excel():
# 打开文件
try:
workbook = xlrd.open_workbook(os.path.join(DATA_DIR,DATA_FILE))
except:
print('读取EXCEL文件失败!')
sys.exit(1)
# 获取所有sheet
print('已读取文件\'{}\',包含以下sheet: '.format(DATA_FILE))
sheet_list = workbook.sheet_names()
print(sheet_list)
'''
# 读取指定的sheet
# sheet_focus = workbook.sheet_by_name(SHEET1)
# print('已读取sheet\'{}\',包含{}行{}列。'.format(sheet_focus.name,sheet_focus.nrows,sheet_focus.ncols))
# 读取指定的列
col_reqprority = sheet_focus.col_values(0)
col_reqid = sheet_focus.col_values(1)
col_reqname = sheet_focus.col_values(2)
col_reqdept = sheet_focus.col_values(5)
col_reqfrom = sheet_focus.col_values(7)
col_reqowner = sheet_focus.col_values(9)
col_reqarrdate = sheet_focus.col_values(14)
col_reqistoday = sheet_focus.col_values(17)
col_reqforesee = sheet_focus.col_values(18)
col_reqworker = sheet_focus.col_values(22)
'''
# 获取所有的sheet并组装写入数据框
sheet_readin = []
request_df = []
# 第一张sheet是汇总说明不读取
for j in range(workbook.nsheets):
# 依次读取sheet
sheet_readin.append(workbook.sheet_by_index(j))
print('已读取sheet\'{}\',包含{}行{}列。'.format(sheet_readin[j].name, sheet_readin[j].nrows, sheet_readin[j].ncols))
# 读取全部列
dic_col = {}
for i in range(sheet_readin[j].ncols):
col_i = sheet_readin[j].col_values(i)
# 将读取的多列组装为数据框
dic_col[col_i[0]] = col_i[1:]
# 将列组装成数据框
request_df.append(DataFrame(dic_col))
# print(request_df[j])
return(request_df, sheet_list)
#按需求ID查询
def query_by_id(request_df, sheet_list, req_id):
isfound = 0
query_result = {}
match_record = Series()
for i in range(1,len(request_df)):
if '需求单号' in request_df[i].columns:
#match_record = request_df[i].loc[request_df[i].loc[:,'需求单号']==req_id]
match_col = request_df[i].loc[:, '需求单号']
for j in range(len(match_col)):
if req_id in str(match_col[j]):
match_record = request_df[i].loc[j, :]
break
if len(match_record) > 0:
#print('【查询结果】需求ID为{}的记录位于第{}张表格\'{}\':'.format(req_id,i+1,sheet_list[i]))
#print('需求名称:', str(match_record['需求名称']).split(' ')[4].split('\n')[0])
query_result['id'] = match_record['需求单号']
query_result['name'] = match_record['需求名称']
#print('提出部门:', str(match_record['提出部门']).split(' ')[4].split('\n')[0])
query_result['dept'] = match_record['提出部门']
#print('需求负责人:', str(match_record['需求负责人']).split(' ')[4].split('\n')[0])
query_result['owner'] = match_record['需求负责人']
#print('所处需求队列类别:', sheet_list[i])
query_result['queue'] = sheet_list[i]
#print('当前所处需求队列优先级:', int(str(match_record['序号']).split(' ')[4].split('\n')[0].split('.')[0]))
query_result['priority'] = int(match_record['序号'])
handler = match_record['处理人']
if handler == '':
#print('当前处理人:暂无' )
query_result['handler'] = '暂无'
else:
#print('当前处理人:',handler)
query_result['handler'] = handler
try:
#print('完成进度:%.0f%%' % (100 * float(str(match_record['系统单完成状态']).split(' ')[4].split('\n')[0])))
query_result['status'] = 100 * float(match_record['系统单完成状态'])
except:
#print('完成进度:暂无')
query_result['status'] = 0
isfound = 1
query_result['isfound'] = 1
break
if isfound == 0:
#print('【查询结果】需求ID为{}的记录未找到。'.format(req_id))
query_result['isfound'] = 0
else:
#print(query_result)
pass
return query_result
#按需求名称查询
def query_by_name(request_df, sheet_list, req_name):
isfound = 0
query_result = {}
match_record = Series()
for i in range(1,len(request_df)):
if '需求名称' in request_df[i].columns:
#match_record = request_df[i].loc[request_df[i].loc[:,'需求名称'].str.contains(req_name)]
match_col = request_df[i].loc[:,'需求名称']
for j in range(len(match_col)):
if req_name in str(match_col[j]):
match_record = request_df[i].loc[j,:]
break
if len(match_record) > 0:
#print('【查询结果】需求ID为{}的记录位于第{}张表格\'{}\':'.format(req_id,i+1,sheet_list[i]))
#print('需求名称:', str(match_record['需求名称']).split(' ')[4].split('\n')[0])
query_result['id'] = match_record['需求单号']
query_result['name'] = match_record['需求名称']
#print('提出部门:', str(match_record['提出部门']).split(' ')[4].split('\n')[0])
query_result['dept'] = match_record['提出部门']
#print('需求负责人:', str(match_record['需求负责人']).split(' ')[4].split('\n')[0])
query_result['owner'] = match_record['需求负责人']
#print('所处需求队列类别:', sheet_list[i])
query_result['queue'] = sheet_list[i]
#print('当前所处需求队列优先级:', int(str(match_record['序号']).split(' ')[4].split('\n')[0].split('.')[0]))
query_result['priority'] = int(match_record['序号'])
handler = match_record['处理人']
if handler == '':
#print('当前处理人:暂无' )
query_result['handler'] = '暂无'
else:
#print('当前处理人:',handler)
query_result['handler'] = handler
try:
#print('完成进度:%.0f%%' % (100 * float(str(match_record['系统单完成状态']).split(' ')[4].split('\n')[0])))
query_result['status'] = 100 * float(match_record['系统单完成状态'])
except:
#print('完成进度:暂无')
query_result['status'] = 0
isfound = 1
query_result['isfound'] = 1
break
if isfound == 0:
#print('【查询结果】需求名称为{}的记录未找到。'.format(req_name))
query_result['isfound'] = 0
else:
#print(query_result)
pass
return query_result
#################################主函数###############################
# def request_query(argv):
def request_query(request_data, sheet_names, type, req):
'''
# 判断命令行调用规范性
if argv.__len__() == 0:
print('<Usage>', 'RequestQuery.py -i <需求ID> -n <需求名称>')
try:
opts, args = getopt.getopt(argv, "hi:n:")
except getopt.GetoptError:
print('<Usage>', 'RequestQuery.py -i <需求ID> -n <需求名称>')
sys.exit(2)
'''
# 读取需求数据文件
#request_data,sheet_names = read_excel()
query_output = {}
'''
for opt, arg in opts:
if opt == '-h':
print('RequestQuery.py -i <需求ID> -n <需求名称>')
sys.exit(0)
elif opt == '-i':
input_reqid = arg
print('查询的需求ID:',input_reqid)
query_output = query_by_id(request_data, sheet_names, input_reqid)
elif opt == '-n':
input_reqname = arg
print('查询的需求名称:',input_reqname)
query_output = query_by_name(request_data,sheet_names,input_reqname)
'''
query_output = {}
if type == 'i':
input_reqid = req
print('查询的需求ID:', input_reqid)
query_output = query_by_id(request_data, sheet_names, input_reqid)
elif type == 'n':
input_reqname = req
print('查询的需求名称:', input_reqname)
query_output = query_by_name(request_data, sheet_names, input_reqname)
else:
query_output['isfound'] = 0
if query_output['isfound'] == 1:
print('完成进度:%.0f%%' %query_output['status'])
else:
print('该需求查无记录,请检查输入信息是否正确。')
return query_output
'''
if __name__ == '__main__':
request_query(sys.argv[1:])
'''
|
995,129 | 2f8d6bdd2dd4673e74ca37aa4609cd9340e5319c | from direct.distributed.DistributedObject import DistributedObject
from direct.showbase.MessengerGlobal import messenger
class Message(DistributedObject):
def __init__(self, clientRepo):
DistributedObject.__init__(self, clientRepo)
def sendText(self, messageText):
"""Function which is caled for local changes only"""
# send an event, which will set the text on the
#print "got a message"
messenger.send("setText", [messageText])
def d_sendText(self, messageText):
"""Function which is caled to send the message over the network
therfore the d_ suffix stands for distributed"""
#print "send message %s" % messageText
self.sendUpdate("sendText", [messageText])
def b_sendText(self, messageText):
"""Function which combines the local and distributed functionality,
so the sendText and d_sendText functions are called.
The b_ suffix stands for both"""
self.sendText(messageText)
self.d_sendText(messageText)
|
995,130 | 95dbbfa3d902ccecf3cbaa4b9959f3e4150b1cb0 | '''
项目任务调度总成
1.日AUM 每日
2.生命周期 每季度
3.AUM总和 每月???
4.DEBT负债总和 每月???
5.客户价值 每半年
'''
from product.jj_analysis import DataAnalysis
from product.band_card import DataHandler
import datetime
from apscheduler.schedulers.background import BlockingScheduler
import logging
import logging.config
try:
from mysql_helper import MySQLHelper
except ImportError:
import sys, os
sys.path.append(os.path.abspath('../'))
from product.mysql_helper import MySQLHelper
class CMMSTask:
def __init__(self):
self.mysql_helper = MySQLHelper('core', host='10.9.29.212')
logging.config.fileConfig('./conf/logging.conf')
self.logger = logging.getLogger('simpleLogger')
self.da = DataAnalysis()
self.dh = DataHandler()
def daily_task(self):
def func():
day = datetime.datetime.now().strftime('%Y-%m-%d')
# 活期
self.da.init_balance(day, 1)
self.logger.info(day, '活期每日余额计算完成')
# 定期
self.da.init_balance(day, 2)
self.logger.info(day, '定期每日余额计算完成')
# 理财
self.da.init_balance(day, 3)
self.logger.info(day, '理财每日余额计算完成')
scheduler = BlockingScheduler()
scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行
try:
scheduler.start()
except Exception as e:
# TODO 执行错误的处理方案
self.logger.error('每日AUM计算出错:',e)
scheduler.shutdown()
def month_task(self):
def func():
self.dh.aum_total()
self.dh.debt_total()
scheduler = BlockingScheduler()
scheduler.add_job(func, 'cron', month='*/1', day='1', hour='5') # 每月一号五点运行
def seasonly_task(self):
def func():
# 每个月计算前一个月的数据
month = datetime.datetime.now().month - 1
year = datetime.datetime.now().year
if month == 0:
month = 12
year = year-1
season = month/3
# 计算生命周期
self.dh.run_life_cycle(year,season)
scheduler = BlockingScheduler()
scheduler.add_job(func, 'cron', month='1,4,7,10', day='2', hour='2')
def half_year_task(self):
def func():
month = datetime.datetime.now().month - 1
year = datetime.datetime.now().year
if month == 0:
month = 12
year = year - 1
half_year = month/6
self.dh.customer_value(year,half_year)
scheduler = BlockingScheduler()
scheduler.add_job(func, 'cron', month='7,12', day='2', hour='5') # 7月12月2号五点计算客户价值 |
995,131 | e458e2ffbb842d7491d53b4e24fcc46bd1794b97 | #====================================================================
# Took extracts from
# https://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/
# for Dijkstra()
#====================================================================
#
# functionsD.py : functions used in part 4
#
import re
from DSAGraph import *
def askFilename():
"""Ask user to enter which marginal file they want to use."""
# print("\nDo you have the file already?"+
# "\nYes - proceed\t\t No - go back to main menu")
# choice = input("(Y/N) ")
# if choice.upper() == "N":
# filename = None
# elif choice.upper() == "Y":
print("\nInsert file name (without the filetype)")
print("(PRESS CTRL+C IF THERE IS NO FILE YET!!)")
fileOpt = input("or press enter if saved on default name: ")
if fileOpt != "":
filename = fileOpt+".txt"
else:
print("\n\nFinding file...")
print("\n\nWhich party is it for?")
print("A. Labor\t\t B. Liberal")
partyOpt = input("Selected party is (A/B): ")
list1 = ["A", "B"]
while partyOpt.upper() not in list1:
partyOpt = input("Selected party is (A/B): ")
marginOpt = input("\nWhat was the margin used? (enter as int) ")
if partyOpt.upper() == "A":
filename = "LaborParty_MarginalSeatList"+str(marginOpt)+"%.txt"
elif partyOpt.upper() == "B":
filename = "LiberalParty_MarginalSeatList"+str(marginOpt)+"%.txt"
return filename
def readInputFile(marginalFile):
"""Reading input marginal file.
Keyword arguments:
marginalFile -- exported txt file of the marginal seats
"""
marginRegex = re.compile(r'''
(\D+) #divname
(\s)
(\D+) #state
(\:\s) #(: )
(\-?\d{1}\.?\d+) #-d.dddddddddd
''', re.VERBOSE)
matchlist = []
for line in marginalFile[2::]:
line = line.strip()
match = marginRegex.search(line)
if match:
matchlist.extend([match.group(1), match.group(3)])
return matchlist
def convertTravelTime(traveltime):
"""Convert time in hours and minutes to minutes.
Keyword arguments:
traveltime -- travel time in hours and minutes
"""
hour = traveltime[0]
minute = traveltime[2:4]
minutes = int(hour)*60 + int(minute)
return minutes
def insertAirport(airport, matchlist):
"""Adding airports to the graph.
Keyword arguments:
airport -- list of data from the airport csv
matchlist -- list of marginal division
"""
graph = DSAGraph()
for i in range(1,len(airport)):
if airport[i][10] == "plane":
From = airport[i][1]
To = airport[i][5]
weight = convertTravelTime(airport[i][9])
importToGraph(graph, From, To, weight, matchlist)
return graph
def insertElect(graph, elect, matchlist):
"""Adding divisions in elect to the graph.
Keyword arguments:
graph -- graph object
elect -- list of data from the csv file
matchlist -- list of marginal division
"""
for i in range(1,len(elect)):
From = elect[i][1]
To = elect[i][5]
weight = int(elect[i][9])/60 #from seconds to minutes
importToGraph(graph, From, To, weight, matchlist)
return graph
def importToGraph(graph, From, To, weight, matchlist):
"""Adding vertices and edges to graph.
Keyword arguments:
graph -- graph object
From -- label of source division
To -- label of destination division
weight -- travel time in minutes between
matchlist -- list of marginal divisions
"""
#if both source and destination are not in the graph
if graph.findVertex(From)==None and graph.findVertex(To)==None:
graph.addVertex(From)
graph.addVertex(To)
graph.addEdge(From, To, weight)
#if source is already in the graph, but not destination
elif graph.findVertex(From)!=None and graph.findVertex(To)==None:
graph.addVertex(To)
graph.addEdge(From, To, weight)
#if destination is already in the graph, but not source
elif graph.findVertex(From)==None and graph.findVertex(To)!=None:
graph.addVertex(From)
graph.addEdge(From, To, weight)
#if both source and destination are already in the graph
else:
graph.addEdge(From, To, weight)
#MIGHT TAKE OFF VERTEX.MARGINAL OFF JUST TBH -.-
#MATCH THE MARGINAL USING THE REGEX LIST
#MATCH AGAINST DIJKSTRA RESULTS AND LOOKUP PATH
def setDistanceInf(graph, D):
"""Setting dictionary values as inifinty.
Keyword arguments:
graph -- graph object
D -- dictionary for total distance
"""
for i in graph.vertices:
D[i] = float('inf')
# print("set distance infinity to "+i)
return D
#do shotest path of first div in marginallist
#examine output path, run for loop and match each against all marginal
def Dijkstra(graph, start, end=None):
"""Find shortest path?
Keyword arguments:
graph -- graph object
start -- starting node
end -- NOT destination node BUT end of graph node
"""
graph.clearVisited()
D = {} #dictionary of final distances
P = {} #dictionary of previous vertices
D = setDistanceInf(graph, D) #initial distance is infinity
D[start] = 0 #total distance from start to start is 0
#start from vertex with label: start
v = start #this is also curNode's label
#while is not the last vertex unvisited
while v != None:
#go to vertex with label v set is as current
curNode = graph.findVertex(v)
#set currentNode as visited
curNode.visited = True
w = curNode.label #w (string) is where we are now
#see all neighbours of curNode
curEdges = curNode.links #LL of adjacent vertices
edge = iter(curEdges) #iterator of curEdges is edge
for i in range(curEdges.size):
a = next(edge)
#curNode's neighbour is called v
v = a.divName #name of adjacent division
# print('\n'+'next neighbour is '+v)
# print(graph.findVertex(v))
if graph.findVertex(v).visited == False:
# print("\ncurrently in: "+w)
# print("looking at neighbour: "+v)
#distance from start to vertex v
#a.data: btwn curNd and neighbour
#D[w]: distance from start to curNd
totalDist = int(a.data)+int(D[w])
if totalDist < D[v]:
D[v] = totalDist #replace dist to dict with key:v
# print("travel time from "+start+" to "+v+" is "+str(totalDist))
P[v] = w
# print("Added to "+v+"'s path: "+w)
#after all neighbours have been checked
#find the closest neighbour of curNode(v)
nextMin = curNode.data.peekMin() #see heap's root: closest neighbour
nextMinNode = graph.findVertex(nextMin)
nextMinLab = nextMinNode.label
# print("nextmin is "+nextMinLab)
#if closest neighbour is alr visited
while nextMinNode.visited == True:
# print("nextmin is visited")
nextMinNode1 = curNode.data.nextMin(nextMinNode.label) #next closest
#heap.nextMin()
#return none is end of heap is reached
#no more neighbours to visit
if nextMinNode1 == None:
# print("NEXT MIN IS NONE")
nextMinLab = None
break
else:
# print("nextMin is : "+nextMinNode1)
nextMinNode = graph.findVertex(nextMinNode1)
nextMinLab = nextMinNode.label
#found the next closest and not visited
#take the label name as v
v = nextMinLab
# print("minNode is "+v)
return (D,P)
def findAirport(state):
"""Gives corresponding airport name for each state."""
if state == "NSW":
airport = "Sydney Airport"
elif state == "VIC":
airport = "Melbourne Airport"
elif state == "QLD":
airport = "Brisbane Airport"
elif state == "TAS":
airport = "Hobart Airport"
elif state == "WA":
airport = "Perth Airport"
elif state == "SA":
airport = "Adelaide Airport"
elif state == "NT":
airport = "Darwin Airport"
return airport
def makePath(P, path, src, dst):
"""Trace back shortest path and display.
Keyword arguments:
P -- dictionary containing vertices from Dijkstra
path -- stack object
src -- start journey from this division
dst -- destination is this division
"""
print("Path from "+src+" to "+dst)
previousV = P[dst]
while previousV != src:
path.push(previousV)
temp = previousV
previousV = P[temp]
path.push(previousV)
for i in range(path.getCount()):
print(path.pop())
|
995,132 | 0e16267738e820973832ea4de236f19c61f38c16 | #!/usr/bin/env python3
from pathlib import Path
class ThreadWriter(object):
"""Custom class to write data to a file accross threads"""
def __init__(self, file_: str, out_dir: str):
"""Initialize a ThreadWriter instance.
Arguments:
file_: name of file to write to
out_dir: name of directory to write file to
Raises:
ValueError: if directory does not exist
"""
if not Path(out_dir).is_dir():
raise ValueError(f"Invalid output directory: {out_dir}")
self.output_file = f"{out_dir}{file_}"
self.out_file = open(self.output_file, "a")
def write(self, data: str):
"""Write data to file
Arguments:
data: data to write to file
"""
self.out_file.write(f"{data}\n")
def flush(self):
"""Flush the file buffer"""
self.out_file.flush()
def close(self):
"""Close the file handle"""
self.out_file.close()
|
995,133 | cda7cd48366580b9ec94c46f9b65e3e54ed3717b | # Generated by Django 2.1.4 on 2018-12-26 01:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_auto_20181225_1720'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author'),
),
]
|
995,134 | e9198b4109dee93ea432c9944396dbed118a5c8d | from mmsystem import Goldbeter_1995
from ssystem import SSystem
from sigmoidal import Sigmoidal
import matplotlib.pyplot as plt
import numpy as np
mm_model = Goldbeter_1995()
steps = 50
delta = 0.01
#states, velocities = mm_model.run(state=initial_state, velocity=initial_velocity, delta=0.1, steps=3)
#for i in range(states.shape[1]):
# plt.plot(states[:,i], label="MM X {}".format(i+1))
trainer = SSystem(n_vars=4)
trainer.g = np.array([[0, 0, -0.8, 0], [0.5, 0, 0, 0], [0, 0.75, 0, 0], [0.5, 0, 0, 0]])
trainer.h = np.array([[0.5, 0, 0, 0], [0, 0.75, 0, 0], [0, 0, 0.5, 0.2], [0, 0, 0, 0.8]])
trainer.alpha = np.array([12., 8., 3., 2.])
trainer.beta = np.array([10., 3., 5., 6.])
all_states = []
all_velocities = []
while len(all_states) < 1:
initial_state = np.random.random(4)
initial_velocity = np.random.random(4)
states, velocities = trainer.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
if not np.any(np.isnan(states)) and not np.any(np.isnan(velocities)):
all_states.append(states)
all_velocities.append(velocities)
all_states = np.vstack(all_states)
all_velocities = np.vstack(all_velocities)
for i in range(states.shape[1]):
plt.plot(states[:,i], label="Trainer X {}".format(i+1))
#ssystem = SSystem(n_vars=4)
#ssystem.solve(all_states, all_velocities, iterations=1)
#states, velocities = ssystem.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
#for i in range(states.shape[1]):
# plt.plot(states[:,i], label="S-Sys X {}".format(i+1))
nnsystem = Sigmoidal(n_vars=4)
nnsystem.solve(all_states, all_velocities)
states, velocities = nnsystem.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
for i in range(states.shape[1]):
plt.plot(states[:,i], label="S-Sys X {}".format(i+1))
plt.legend()
plt.show()
|
995,135 | 405807956c069399963b39df28bdf44e2d341b8d | class ActiveCalls:
def __init__(self, pytgcalls):
self.pytgcalls = pytgcalls
# noinspection PyProtectedMember
@property
def active_calls(self):
return self.pytgcalls._active_calls
|
995,136 | c25b6126e07372c583e2d7e7e827edcced157cc7 | from django import forms
from django.db import models
from django.conf import settings
from django.urls import reverse
from django.core.exceptions import ValidationError
class MultiSelectFormField(forms.MultipleChoiceField):
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super(MultiSelectFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value and self.required:
raise forms.ValidationError(self.error_messages['required'])
if value and self.max_choices and len(value) > self.max_choices:
raise forms.ValidationError('You must select a maximum of %s choice%s.'
% (apnumber(self.max_choices), pluralize(self.max_choices)))
return value
class MultiSelectField(models.CharField):
def __init__(self, choices, *args, **kwargs):
kwargs['max_length']=len(','.join(dict(choices).keys()))
kwargs['choices'] = choices
super(MultiSelectField, self).__init__(*args, **kwargs)
def get_choices_default(self):
return self.get_choices(include_blank=False)
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
choicedict = dict(field.choices)
def formfield(self, **kwargs):
# don't call super, as that overrides default widget if it has choices
defaults = {'required': not self.blank, 'label': self.verbose_name.title(),
'help_text': self.help_text, 'choices':self.choices}
if self.has_default():
defaults['initial'] = self.get_default()
defaults.update(kwargs)
return MultiSelectFormField(**defaults)
def validate(self, values, instance):
self.clean(values)
def clean(self, values):
if isinstance(values, str):
values = values.split(',')
if not isinstance(values, list):
raise ValidationError("choices has ot be list or coma-sperated string")
ret=[]
for value in values:
if value == '':
continue
if value not in dict(self.choices).keys() and value not in dict(self.choices).values():
raise ValidationError("Not one of the choices")
else:
ret.append(dict(self.choices).get(value, value))
return ret
def get_db_prep_value(self, value, *args, **kwargs):
self.validate(value, self)
if isinstance(value, list):
return ",".join(value)
elif isinstance(value, str):
return value
else:
return repr(value)
def to_python(self, value):
print("DTHEASDF")
if isinstance(value, list):
return value
return self.clean(value)
def contribute_to_class(self, cls, name):
super(MultiSelectField, self).contribute_to_class(cls, name)
if self.choices:
func = lambda self, fieldname = name, choicedict = dict(self.choices):",".join([choicedict.get(value,value) for value in getattr(self,fieldname)])
setattr(cls, 'get_%s_display' % self.name, func)
# Create your models here.
class Meal(models.Model):
name = models.CharField(max_length=255)
recipie = models.TextField()
description = models.TextField()
class Meta:
permissions=(
("view_recipie", "Can see everything we put in our 'food'."),
)
class Price(models.Model):
SIZES=(
('s', 'XSMALL'),
('S', 'SMALL'),
('M', 'MEDIUM'),
('L', 'LARGE'),
('X', 'XLARGE')
)
includes={
's':0,
'S':2,
'M':2,
'L':3,
'X':5
}
meal = models.ForeignKey(Meal)
size = models.CharField(max_length=1, choices=SIZES)
value = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return "%s - %d"%(self.size, self.value)
@property
def included(self):
if len(self.size) > 1:
for s, size in self.SIZES:
if size == self.size:
self.size = s
return self.includes[self.size]
class Order(models.Model):
STATES = (
('R', 'RECIEVED'),
('B', 'BAKING'),
('T', 'TRAVEL'),
('D', 'DONE'),
('E', 'ABBORTED')
)
meals = models.ManyToManyField(Price, through='Topping')
address = models.CharField(max_length=1023)
state = models.CharField(max_length=1, choices=STATES, default='R')
user = models.ForeignKey(settings.AUTH_USER_MODEL)
def get_absolute_url(self):
return reverse('order:track_order', kwargs={'slug':self.pk})
def __str__(self):
return self.address
class Meta():
permissions=(
('change_state', "Can change what's happening with the food!"),
)
TOPPING_PRICE=5
class Topping(models.Model):
TOPPINGS=(
('MU', 'MUSHROOMS'),
('CE', 'CHEESE'),
('HM', 'HAM'),
('PP', 'PEPPERONI'),
('BL', 'BELLPEPPER'),
('PA', 'PINAPPLE'),
('MZ', 'MOZARELLA'),
('TN', 'TUNA'),
('ON', 'ONIONS'),
('SC', 'SAUCE'),
('MT', 'MEAT'),
('TO', 'TOMATOES'),
)
meal=models.ForeignKey(Price, on_delete=models.CASCADE)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
toppings = MultiSelectField(choices=TOPPINGS)
def save(self, *args, **kwargs):
if self.count > self.meal.included:
raise ValidationError("TOO MANY TOPPINGS!")
super(Topping, self).save(*args, **kwargs)
@property
def count(self):
return len(MultiSelectField(choices=self.TOPPINGS).clean(self.toppings))
@property
def price(self):
return TOPPING_PRICE*self.count + self.meal.value
|
995,137 | 3d171d177f4c025fa76f7d457e85e5ead6046836 | import os
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(ROOT_DIR, 'README.md'), 'r') as readme:
long_description = readme.read()
setup(
name='antiafk',
version='1.0.0',
description='Antiafk has been designed to utilize the pynput package by creating a cli that allows the user to specify a key to be to be triggered on interval.',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='cli wow world of warcraft classic afk keypress automated',
url='https://gitlab.com/rburns629/antiafk',
author='Robert Burns',
author_email='rburns629@gmail.com',
license='MIT',
packages=find_packages(exclude=['tests']),
project_urls={
'Bug Reports': 'https://gitlab.com/rburns629/antiafk/issues',
'Source': 'https://gitlab.com/rburns629/antiafk',
},
install_requires=[
'pynput',
'pytest',
'click',
'markdown'
],
entry_points={
'console_scripts': ['antiafk=antiafk:cli']
}
)
|
995,138 | d234e7f107f018f8ae055650c85443178886baf5 | from compose import compose
from pprint import pprint
from flask import current_app as app
from commands.filter import INDEXES, query_filter_from
from utils.database import db
@compose(app.manager.option('-n',
'--name',
help='model name'),
app.manager.option('-s',
'--soft',
help='is soft delete'),
*[app.manager.option('-i{}'.format(index),
'--item{}'.format(index),
help='item filtering')
for index in INDEXES])
def delete(**kwargs):
query = query_filter_from(**kwargs)
if kwargs.get('soft') == 'true':
entities = query.all()
for entity in query.all():
entity.isSoftDeleted = True
ApiHandler.save(entities)
return pprint('{} soft deleted'.format(len(entities)))
result = query.delete()
db.session.commit()
pprint('{} deleted'.format(result))
|
995,139 | 371a46fe7b76704ef10fe5e6998ea7d8bad982c1 | # coding: utf-8
"""
errors.py
~~~~~~~~~
api 错误处理文件
利用内容协商机制,将html错误响应转变为json格式响应
"""
from flask import jsonify
from app.exceptions import ValidationError
from . import api
def not_found(message):
"""404无法找到"""
response = jsonify({'error': 'not found', 'message': message})
response.status_code = 404
return response
def bad_request(message):
"""错误请求处理"""
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
def unauthorized(message):
"""验证错误处理"""
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
def forbidden(message):
"""禁止访问"""
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
def server_error(message):
"""服务器内部错误"""
response = jsonify({'error': 'server error', 'message':message})
response.status_code = 500
return response
@api.errorhandler(ValidationError)
def validation_error(e):
return bad_request(e.args[0])
|
995,140 | e6b027430f9f1134e00d63d9f85ee53209e2e87a | from django.urls import path
from . import views
urlpatterns = [
path('login', views.Login.as_view(), name='login'),
path('logout', views.Logout.as_view(), name='logout'),
path('register', views.Register.as_view(), name='register'),
path('reset-password', views.ResetPassword.as_view(), name='reset-password'),
path('check-status', views.CheckStatus.as_view(), name='check-status'),
path('generate-session', views.GenerateSession.as_view(), name='generate-session'),
path('check-status-hasura', views.CheckStatusHasura.as_view(), name='check-status-hasura'),
path('get-user-pk', views.GetUserPk.as_view(), name='get-user-pk'),
path('confirm-guest', views.ConfirmGuest.as_view(), name='confirm-guest'),
]
|
995,141 | 63484de04f08d4f1f4554ac029e4a8a74d05e6a8 | from os import startfile
from selenium import webdriver
import json
# Open incognito chrome session and navigate to UBC page
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
# options.add_argument('--headless')
driver = webdriver.Chrome(executable_path = "D:\chromedriver_win32\chromedriver.exe", chrome_options=options)
driver.get("https://courses.students.ubc.ca/cs/courseschedule?pname=subjarea&tname=subj-all-departments")
# This function clicks every subject link, then every course link in every subject link, and every
# section link in every course, and scrapes relevant data for 2021 winter session and stores it in a json file
def scrape_courses():
all_courses = []
size_subjects = len(driver.find_elements_by_class_name("section1")) + len(driver.find_elements_by_class_name("section2"))
for i in range(size_subjects):
subject_xpath = "/html/body/div[2]/div[4]/table/tbody/tr["+str(i+1)+"]/td[1]/a"
try:
subject_link = driver.find_element_by_xpath(subject_xpath)
subject_link.click()
size_courses = len(driver.find_elements_by_class_name("section1")) + len(driver.find_elements_by_class_name("section2"))
for k in range(size_courses):
course_xpath = "/html/body/div[2]/div[4]/table/tbody/tr["+str(k+1)+"]/td[1]/a" if size_courses > 1 else "/html/body/div[2]/div[4]/table/tbody/tr/td[1]/a"
try:
course_link = driver.find_element_by_xpath(course_xpath)
course_link.click()
size_sections = len(driver.find_elements_by_class_name("section1")) + len(driver.find_elements_by_class_name("section2"))
try:
prereq = driver.find_element_by_xpath('/html/body/div[2]/div[4]/p[3]').text
except:
prereq = ""
for j in range(size_sections):
section_xpath = "/html/body/div[2]/div[4]/table[2]/tbody/tr["+str(j+1)+"]/td[2]/a" if size_sections > 1 else "/html/body/div[2]/div[4]/table[2]/tbody/tr/td[2]/a"
try:
section_link = driver.find_element_by_xpath(section_xpath)
section_link.click()
try:
subject = driver.find_element_by_xpath('/html/body/div[2]/ul/li[3]/a').text
except:
subject = ""
try:
code = driver.find_element_by_xpath('/html/body/div[2]/ul/li[4]/a').text
except:
code = ""
try:
section = driver.find_element_by_xpath('/html/body/div[2]/ul/li[5]').text
except:
section = ""
# Section is the whole course code (ie. CHEM 123 101), so need to isolate just the section (ie. 101)
if len(section) > 0:
section = section.split()[2]
try:
title = driver.find_element_by_xpath('/html/body/div[2]/div[4]/h5').text
except:
title = ""
try:
term = driver.find_element_by_xpath('/html/body/div[2]/div[4]/b[1]').text
except:
term = ""
try:
summary = driver.find_element_by_xpath('/html/body/div[2]/div[4]/p[1]').text
except:
summary = ""
try:
credits = driver.find_element_by_xpath('/html/body/div[2]/div[4]/p[2]').text
except:
credits = ""
try:
days = driver.find_element_by_xpath('/html/body/div[2]/div[4]/table[2]/tbody/tr/td[2]').text
except:
days = ""
try:
start = driver.find_element_by_xpath('/html/body/div[2]/div[4]/table[2]/tbody/tr/td[3]').text
except:
start = ""
try:
end = driver.find_element_by_xpath('/html/body/div[2]/div[4]/table[2]/tbody/tr/td[4]').text
except:
end = ""
try:
building = driver.find_element_by_xpath('/html/body/div[2]/div[4]/table[2]/tbody/tr/td[5]').text
except:
building = ""
try:
room = driver.find_element_by_xpath('/html/body/div[2]/div[4]/table[2]/tbody/tr/td[6]/a').text
except:
room = ""
try:
instructor = driver.find_element_by_xpath('/html/body/div[2]/div[4]/table[3]/tbody/tr/td[2]/a').text
except:
instructor = ""
course_dict = {
"course_code": code,
"course_title": title,
"section": section,
"subject": subject,
"summary": summary,
"credits": credits,
"prereqs": prereq,
"term": term,
"days": days,
"start": start,
"end": end,
"building": building,
"room": room,
"instructor": instructor
}
print(course_dict)
all_courses.append(course_dict)
driver.back()
except:
continue
driver.back()
except:
continue
driver.back()
except:
continue
return all_courses
data = scrape_courses()
with open('winter2021pt2.json', 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4) |
995,142 | 48611c208a67e3d6e9bd7e5f8c325ba9be472432 | import argparse
import os
from tqdm import tqdm
cmd_opt = argparse.ArgumentParser(description='Argparser')
cmd_opt.add_argument('-data_root', default=None, help='root of dataset')
cmd_opt.add_argument('-file_list', default=None, help='list of programs')
cmd_opt.add_argument('-init_model_dump', default=None, help='init model dump')
cmd_opt.add_argument('-save_dir', default=None, help='root for output')
cmd_opt.add_argument('-att_dir', default=None, help='root for att output')
cmd_opt.add_argument('-log_file', default=None, help='log file')
cmd_opt.add_argument('-aggressive_check', default=0, type=int, help='penalize verbose/unnecessary sub expression')
cmd_opt.add_argument('-ctx', default='cpu', help='cpu/gpu')
cmd_opt.add_argument('-inv_reward_type', default='any', help='any/ordered')
cmd_opt.add_argument('-phase', default='test', help='train/test')
cmd_opt.add_argument('-train_frac', default=0.9, type=float, help='fraction for training')
cmd_opt.add_argument('-tune_test', default=0, type=int, help='active search or not')
cmd_opt.add_argument('-init_samples', default=10000, type=int, help='initial number of samples')
cmd_opt.add_argument('-interpolate_samples', default=-1, type=int, help='interpolation samples')
cmd_opt.add_argument('-use_interpolation', default=0, type=int, help='whether use interpolation')
cmd_opt.add_argument('-seed', default=1, type=int, help='random seed')
cmd_opt.add_argument('-use_ce', default=1, type=int, help='whether use counter examples')
cmd_opt.add_argument('-rl_batchsize', default=1, type=int, help='batch size for rl training')
cmd_opt.add_argument('-single_sample', default=None, type=str, help='tune single program')
cmd_opt.add_argument('-replay_memsize', default=100, type=int, help='replay memsize')
cmd_opt.add_argument('-num_epochs', default=10000, type=int, help='num epochs')
cmd_opt.add_argument('-embedding_size', default=128, type=int, help='embedding size')
cmd_opt.add_argument('-s2v_level', default=20, type=int, help='# propagations of s2v')
cmd_opt.add_argument('-ce_batchsize', default=10000, type=int, help='batchsize for counter example check')
cmd_opt.add_argument('-eps', default=0.85, type=float, help='exploration constant')
cmd_opt.add_argument('-eps_decay', default=0.9999, type=float, help='exp decay of the exploration constant')
cmd_opt.add_argument('-num_episode', default=10, type=int, help='how many episode to accumulate before training')
cmd_opt.add_argument('-use_rudder', default=0, type=int, help='whether use rudder')
cmd_opt.add_argument('-ig_step', default=100, type=int, help='num of integrated gradient steps')
cmd_opt.add_argument('-future_steps', default=5, type=int, help='num to look ahead in rudder aux/to clip IG to zero')
cmd_opt.add_argument('-attention', default=1, type=int, help='attention for embedding')
cmd_opt.add_argument('-exit_on_find', default=0, type=int, help='exit when found')
cmd_opt.add_argument('-decoder_model', default='RecursiveDecoder', help='decoder model')
cmd_opt.add_argument('-learning_rate', default=0.001, type=float, help='random seed')
cmd_args, _ = cmd_opt.parse_known_args()
start_time = None
import time
def tic():
global start_time
start_time = time.time()
def toc():
global start_time
cur_time = time.time()
return cur_time - start_time
if cmd_args.save_dir is not None:
if not os.path.isdir(cmd_args.save_dir):
os.makedirs(cmd_args.save_dir)
tqdm.write(str(cmd_args))
|
995,143 | 5fbcb97e7157fa8ba3608a861b4d6f8bb9202782 | from source.data import char_span_to_token_span
import unittest
class SpanTest(unittest.TestCase):
def test_from_beginning(self):
passage = "The College of Arts and Letters was established as the university's first college in 1842 with" \
" the first degrees given in 1849. The university's first academic curriculum was modeled after" \
" the Jesuit Ratio Studiorum from Saint Louis University. Today the college, housed in " \
"O'Shaughnessy Hall, includes 20 departments in the areas of fine arts, humanities, and social" \
" sciences, and awards Bachelor of Arts (B.A.) degrees in 33 majors, making it the largest of the" \
" university's colleges. There are around 2,500 undergraduates and 750 graduates enrolled in the" \
" college."
start = [0]
end = [31]
self.assertEqual([1, 6], char_span_to_token_span(start, end, passage))
def test_in_the_middle(self):
passage = "The College of Science was established at the university in 1865 by president Father Patrick " \
"Dillon. Dillon's scientific courses were six years of work, including higher-level mathematics" \
" courses. Today the college, housed in the newly built Jordan Hall of Science, includes over" \
" 1,200 undergraduates in six departments of study – biology, chemistry, mathematics, physics," \
" pre-professional studies, and applied and computational mathematics and statistics (ACMS) – " \
"each awarding Bachelor of Science (B.S.) degrees. According to university statistics, its science" \
" pre-professional program has one of the highest acceptance rates to medical school of any" \
" university in the United States."
start = [78]
end = [99]
self.assertEqual([14, 16], char_span_to_token_span(start, end, passage))
|
995,144 | 35552e899802a851fd4d1f4fec79606785f681cb | import requests
from geopy.geocoders import Nominatim, GoogleV3
import what3words
import json
import config
# get your key from https://developer.what3words.com/public-api
key = config.API_KEY
class Location:
def __init__(self, w1, w2,w3):
self.w1 = w1
self.w2 = w2
self.w3 = w3
def get_words_list(self):
l = [self.w1, self.w2, self.w3]
return(l)
def get_json(self):
# get the words list
words = self.get_words_list()
# print(words)
x = (f'{words[0]}.{words[1]}.{words[2]}')
url = 'https://api.what3words.com/v3/convert-to-coordinates?words='+ x +'&key=' + key
try:
response = requests.request("GET", url)
body = json.loads(response.content)
except Exception:
print("Error: request for words: " + x )
return(body)
def get_lat_long(self):
body = self.get_json()
lng = body['coordinates']['lng']
lat = body['coordinates']['lat']
return(lat,lng)
def get_nearestplace(self):
body = self.get_json()
np = body['nearestPlace']
return(np)
def find_address(self):
tup = self.get_lat_long()
lat = tup[0]
lng = tup[1]
geolocator = Nominatim(user_agent="3words")
location = geolocator.reverse(f'{lat},{lng}')
return(location.address)
def get_directions(self):
tup = self.get_lat_long()
lat = tup[0]
lng = tup[1]
goog_maps = (f'https://www.google.com/maps/dir/?api=1&destination={lat},{lng}')
# https://www.google.com/maps/dir/?api=1&destination={lat},{lng}
return(goog_maps)
# address = Location('prom','cape','pump')
# print(address.get_json())
|
995,145 | bbad27c31b5fcca6c7cedbbe146b226ee5796839 | """
- Dynamic Programming
- TLE
"""
class Solution:
def numWays(self, words: List[str], target: str) -> int:
# c_to_match_pos example:
# a -> [0, 3],[1, 3]
# b -> [0, 1, 2, 3]
# a -> [0, 3],[1, 3]
MOD = 10 ** 9 + 7
c_to_match_pos = defaultdict(list)
chars = set(list(target))
for word in words:
word_c_to_match_pos = defaultdict(list)
for i in range(len(word)):
c_dict = word[i]
if c_dict in chars:
word_c_to_match_pos[c_dict].append(i)
for c_dict in word_c_to_match_pos:
c_to_match_pos[c_dict].append(word_c_to_match_pos[c_dict])
@lru_cache(None)
def dp(i, x):
if i == len(target):
return 1
c = target[i]
positions = c_to_match_pos[c]
ways = 0
for pos_group in positions:
idx = bisect.bisect_left(pos_group, x)
for k in range(idx, len(pos_group)):
ways = (ways + dp(i + 1, pos_group[k] + 1)) % MOD
return ways
return dp(0, 0)
"""
- Dynamic Programming
- O(wm + nx)
"""
class Solution:
def numWays(self, words: List[str], target: str) -> int:
MOD = 10 ** 9 + 7
m, n = len(words[0]), len(target)
char_at_idx_count = defaultdict(lambda: [0] * m)
for word in words:
for i, c in enumerate(word):
char_at_idx_count[c][i] += 1 # Count the number of character `c` at index `i` of all words
@lru_cache(None)
def dp(i, x):
if i == n:
return 1
if x == m: # Reached to length of words[x] but don't found any result
return 0
c = target[i]
ans = dp(i, x + 1) # Skip k_th index of words
if char_at_idx_count[c][x] > 0: # Take k_th index of words if found character `c` at index k_th
ans += dp(i + 1, x + 1) * char_at_idx_count[c][x]
ans %= MOD
return ans
return dp(0, 0) |
995,146 | 0f2636b1e6ae98e77750532f364bc9ab736c19bd | from django.conf.urls import url
from JQ import views
urlpatterns = [
url(r'^apps/$', views.AppView.as_view(), name='apps'),
url(r'^apps/(?P<pk>\d+)$', views.AppDetail.as_view() ,name='app_detail'),
url(r'^company/$', views.CompanyView.as_view(), name='company'),
url(r'^company/create/$', views.AddCompany.as_view(),name='create_company'),
url(r'^company/edit/(?P<pk>\d+)$', views.EditCompany.as_view() ,name='edit_company'),
url(r'^company/delete/(?P<pk>\d+)$', views.DeleteCompany.as_view() ,name='delete_company'),
url(r'^apps/create/$', views.CreateApp.as_view(), name='create_app'),
url(r'^apps/edit/(?P<pk>\d+)$', views.EditApp.as_view() ,name='edit_app'),
url(r'^apps/delete/(?P<pk>\d+)$', views.DeleteApp.as_view() ,name='delete_app'),
url(r'^apps/dashboard/(?P<pk>\d+)$', views.DashboardView.as_view(), name='dashboard_detail'),
url(r'^contact/create/$', views.AddContact.as_view(),name='create_contact'),
url(r'^contact/edit/(?P<pk>\d+)$', views.EditContact.as_view() ,name='edit_contact'),
url(r'^contact/delete/(?P<pk>\d+)$', views.DeleteContact.as_view() ,name='delete_contact'),
url(r'^notes/create/$', views.AddNote.as_view(),name='create_note'),
url(r'^notes/edit/(?P<pk>\d+)$', views.EditNote.as_view() ,name='edit_note'),
url(r'^notes/delete/(?P<pk>\d+)$', views.DeleteNote.as_view() ,name='delete_note'),
url(r'^resource/create/$', views.AddResource.as_view(),name='create_resource'),
url(r'^resource/edit/(?P<pk>\d+)$', views.EditResource.as_view() ,name='edit_resource'),
url(r'^resource/delete/(?P<pk>\d+)$', views.DeleteResource.as_view() ,name='delete_resource'),
url(r'^question/$', views.QuestionView.as_view(), name='questions'),
url(r'^question/create/$', views.AddQuestion.as_view(),name='create_question'),
url(r'^question/edit/(?P<pk>\d+)$', views.EditQuestion.as_view() ,name='edit_question'),
url(r'^question/delete/(?P<pk>\d+)$', views.DeleteQuestion.as_view() ,name='delete_question'),
url(r'^ask/delete/(?P<pk>\d+)$', views.DeleteAsk.as_view() ,name='delete_ask'),
]
|
995,147 | 150c072aeaf1543339466e90a5e601f009cced10 | from game.board import *
from game.player import *
from game.move import *
from game.board_drawer import *
import random
import copy
class Game():
def __init__(self, board_type, board_size, open_cells):
if board_type == 'triangular':
self.board = TriangularBoard(size=board_size, open_cells=open_cells)
else:
self.board = DiamondBoard(size=board_size, open_cells=open_cells)
self.player = Player()
self.drawer = BoardDrawer(self.board)
self.prev_state = None
def is_winning_state(self):
return self.board.get_closed_cell_count() == 1
def perform_decoded_move(self, decoded_move):
self.prev_state = copy.deepcopy(self.board.cells)
y1, x1, y2, x2 = tuple(int(decoded_move[i:i + 8], 2) for i in range(0, len(decoded_move), 8))
move = Move(self.board.cells[y1 - 1][x1 - 1], self.board.cells[y2 - 1][x2 - 1])
self.player.move_peg(move)
def is_lost_state(self):
return not self.player.get_legal_moves(self.board) and self.board.get_closed_cell_count() > 1
def is_end_state(self):
return self.is_winning_state() or self.is_lost_state()
def get_reward(self):
if self.is_winning_state():
return 10000
elif self.is_lost_state():
return -self.board.get_open_cell_count() ** 2
else:
return 0
def send_information(self):
return self.player.get_legal_moves(self.board), self.prev_state, self.board.cells, self.get_reward()
def reset(self):
self.board.reset()
def simulate_game(self): # start with random moves
while not self.is_lost_state() and not self.is_winning_state():
moves = self.player.get_legal_moves(self.board)
self.player.move_peg(self.board, random.choice(moves))
self.drawer.draw_graph()
time.sleep(.5)
if self.is_winning_state():
print('Congratulations, you have won')
self.drawer.draw_graph()
if self.is_lost_state():
print('You lost.')
self.drawer.draw_graph()
def visualize_episode(self, sap):
for action in sap[:-1]:
self.drawer.draw_graph()
_, move = action
self.perform_decoded_move(move) |
995,148 | e1142b8379880e743b82e7b67873113f8ec634d0 | #!/usr/bin/env python3
from selenium.webdriver.common.by import By
class LoginPageLocators:
# URL
URL = "login"
# Login Form
USERNAME_FIELD = (By.NAME, "username")
PASSWORD_FIELD = (By.NAME, "password")
REMEMBER_CHECKBOX = (By.ID, "remember-me")
# Page Buttons
LOGIN_BTN = (By.CSS_SELECTOR, "button.loginbtn")
SIGN_UP_BTN = (By.CSS_SELECTOR, "a.form-group")
FORGOT_BTN = (By.CSS_SELECTOR, "a[data-toggle='modal']")
# Forget Layout
FORGET_EMAIL_FIELD = (By.ID, "resetemail")
FORGET_RESET_BTN = (By.CSS_SELECTOR, "button.resetbtn")
FORGET_CLOSE_BTN = (By.CSS_SELECTOR, "button.close")
# Errors
FORGET_ERR_DIV = (By.CSS_SELECTOR, "div.alert-danger")
LOGIN_ERR_DIV = (By.CSS_SELECTOR, "div.alert-danger")
# Success
FORGET_SUCCESS_DIV = (By.CSS_SELECTOR, "div.alert-success")
|
995,149 | e880131c3b11ac2b9b029314ca3157982b4b2fb4 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-23 17:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=500)),
('description_long', models.CharField(max_length=2000)),
('truth_conditions', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('last_date', models.DateTimeField(verbose_name='last prediction date')),
('prediction_type', models.CharField(choices=[['d', 'dichotomous'], ['v', 'value']], max_length=10)),
('conditional', models.BooleanField(default=False)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Prediction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prediction', models.CharField(max_length=10)),
('date', models.DateTimeField(verbose_name='prediction made')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Event')),
],
),
]
|
995,150 | 00b9d14009754fc6ecfc9dc19338b04766e894a7 | n=int(input())
print(n//2+1)
#for 1's and 2's
|
995,151 | ed4e23f5a3fe17b2895f0ebcaa2bdf4e2b924f0b | """
Given a collection of intervals, merge all overlapping intervals.
Ex 1.
Input: [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
Ex 2.
Input: [[1,4],[4,5]]
Output: [[1,5]]
Explanation: Intervals [1,4] and [4,5] are considered overlapping.
"""
def merge_intervals_sol1(intervals):
if intervals == []:
return intervals
sorted_list = sorted(intervals, key=lambda x: x[0])
start = [sorted_list[0][0], sorted_list[0][1]]
result = []
for i in range(1,len(sorted_list)):
if sorted_list[i][0] > start[1]:
result.append(start)
start = [sorted_list[i][0], sorted_list[i][1]]
else:
start[1] = max(sorted_list[i][1], start[1])
result.append(start)
return result
def main():
# Test 1: Should return [[1,6],[8,10],[15,18]]
# intervals = [[1,3],[2,6],[8,10],[15,18]]
# Test 2: Should return [[1, 5]]
# intervals = [[1,4],[4,5]]
# Test 3: Should return [[1,4]]
intervals = [[1,4],[2,3]]
print(merge_intervals_sol1(intervals))
main() |
995,152 | d08f2764c99e634e923cd2edd307292dffcd5cec | import collections
import math
import numpy as np
import pandas as pd
from scipy.stats import entropy, skew, kurtosis
from skimage.feature import canny
from skimage.measure import regionprops
from skimage.segmentation import find_boundaries
from skimage.morphology import disk, dilation
def FeatureExtraction(Label, In, Ic, W, K=128, Fs=6, Delta=8):
"""
Calculates features from a label image.
Parameters
----------
Label : array_like
A T x T label image.
In : array_like
A T x T intensity image for Nuclei.
Ic : array_like
A T x T intensity image for Cytoplasms.
W : array_like
A 3x3 matrix containing the stain colors in its columns.
In the case of two stains, the third column is zero and will be
complemented using cross-product. The matrix should contain a
minumum two nonzero columns.
K : Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
Fs : Number of frequency bins for calculating FSDs. Default value = 6.
Delta : scalar, used to dilate nuclei and define cytoplasm region.
Default value = 8.
Returns
-------
df : 2-dimensional labeled data structure, float64
Pandas data frame.
Notes
-----
The following features are computed:
- `Centroids`:
- X,Y
- `Morphometry features`:
- Area,
- Perimeter,
- MajorAxisLength,
- MinorAxisLength,
- Eccentricity,
- Circularity,
- Extent,
- Solidity
- `Fourier shape descriptors`:
- FSD1-FSD6
- Intensity features for hematoxylin and cytoplasm channels:
- MinIntensity, MaxIntensity,
- MeanIntensity, StdIntensity,
- MeanMedianDifferenceIntensity,
- Entropy, Energy, Skewness and Kurtosis
- Gradient/edge features for hematoxylin and cytoplasm channels:
- MeanGradMag, StdGradMag, SkewnessGradMag, KurtosisGradMag,
- EntropyGradMag, EnergyGradMag,
- SumCanny, MeanCanny
References
----------
.. [1] D. Zhang et al. "A comparative study on shape retrieval using
Fourier descriptors with different shape signatures," In Proc.
ICIMADE01, 2001.
.. [2] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
# get total regions
NumofLabels = Label.max()
# get Label size x
size_x = Label.shape[0]
# initialize centroids
CentroidX = []
CentroidY = []
# initialize morphometry features
Area = []
Perimeter = []
Eccentricity = []
Circularity = []
MajorAxisLength = []
MinorAxisLength = []
Extent = []
Solidity = []
# initialize FSD feature group
FSDGroup = np.zeros((NumofLabels, Fs))
# initialize Nuclei, Cytoplasms
Nuclei = [[] for i in range(NumofLabels)]
Cytoplasms = [[] for i in range(NumofLabels)]
# create round structuring element
Disk = disk(Delta)
# initialize panda dataframe
df = pd.DataFrame()
# fourier descriptors, spaced evenly over the interval 1:K/2
Interval = np.round(
np.power(
2, np.linspace(0, math.log(K, 2)-1, Fs+1, endpoint=True)
)
).astype(np.uint8)
# extract feature information
for region in regionprops(Label):
# add centroids
CentroidX = np.append(CentroidX, region.centroid[0])
CentroidY = np.append(CentroidY, region.centroid[1])
# add morphometry features
Area = np.append(Area, region.area)
Perimeter = np.append(Perimeter, region.perimeter)
Eccentricity = np.append(Eccentricity, region.eccentricity)
if region.perimeter == 0:
Circularity = np.append(Circularity, 0)
else:
Circularity = np.append(
Circularity,
4 * math.pi * region.area / math.pow(region.perimeter, 2)
)
MajorAxisLength = np.append(MajorAxisLength, region.major_axis_length)
MinorAxisLength = np.append(MinorAxisLength, region.minor_axis_length)
Extent = np.append(Extent, region.extent)
Solidity = np.append(Solidity, region.solidity)
# get bounds of dilated nucleus
bounds = GetBounds(region.bbox, Delta, size_x)
# grab nucleus mask
Nucleus = (
Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] == region.label
).astype(np.uint8)
# find nucleus boundaries
Bounds = np.argwhere(
find_boundaries(Nucleus, mode="inner").astype(np.uint8) == 1
)
# calculate and add FSDs
FSDGroup[region.label-1, :] = FSDs(
Bounds[:, 0], Bounds[:, 1],
K, Interval
)
# generate object coords for nuclei and cytoplasmic regions
Nuclei[region.label-1] = region.coords
# get mask for all nuclei in neighborhood
Mask = (
Label[bounds[0]:bounds[1], bounds[2]:bounds[3]] > 0
).astype(np.uint8)
# remove nucleus region from cytoplasm+nucleus mask
cytoplasm = (
np.logical_xor(Mask, dilation(Nucleus, Disk))
).astype(np.uint8)
# get list of cytoplasm pixels
Cytoplasms[region.label-1] = GetPixCoords(cytoplasm, bounds)
# calculate hematoxlyin features, capture feature names
HematoxylinIntensityGroup = IntensityFeatureGroup(In, Nuclei)
HematoxylinTextureGroup = TextureFeatureGroup(In, Nuclei)
HematoxylinGradientGroup = GradientFeatureGroup(In, Nuclei)
# calculate eosin features
EosinIntensityGroup = IntensityFeatureGroup(Ic, Cytoplasms)
EosinTextureGroup = TextureFeatureGroup(Ic, Cytoplasms)
EosinGradientGroup = GradientFeatureGroup(Ic, Cytoplasms)
# add columns to dataframe
df['X'] = CentroidX
df['Y'] = CentroidY
df['Area'] = Area
df['Perimeter'] = Perimeter
df['Eccentricity'] = Eccentricity
df['Circularity'] = Circularity
df['MajorAxisLength'] = MajorAxisLength
df['MinorAxisLength'] = MinorAxisLength
df['Extent'] = Extent
df['Solidity'] = Solidity
for i in range(0, Fs):
df['FSD' + str(i+1)] = FSDGroup[:, i]
for f in HematoxylinIntensityGroup._fields:
df['Hematoxylin' + f] = getattr(HematoxylinIntensityGroup, f)
for f in HematoxylinTextureGroup._fields:
df['Hematoxylin' + f] = getattr(HematoxylinTextureGroup, f)
for f in HematoxylinGradientGroup._fields:
df['Hematoxylin' + f] = getattr(HematoxylinGradientGroup, f)
for f in EosinIntensityGroup._fields:
df['Cytoplasm' + f] = getattr(EosinIntensityGroup, f)
for f in EosinTextureGroup._fields:
df['Cytoplasm' + f] = getattr(EosinTextureGroup, f)
for f in EosinGradientGroup._fields:
df['Cytoplasm' + f] = getattr(EosinGradientGroup, f)
return df
def GradientFeatureGroup(I, Coords):
"""
Get GradientFeatures for nuclei and cytoplasms
Parameters
----------
I : array_like
A T x T intensity image.
Coords : array_like
A N x 2 coordinate list of a region.
Returns
-------
MeanGradMag : array_like
Mean of gradient data.
StdGradMag : array_like
Standard deviation of gradient data.
EntropyGradMag : array_like
Entroy of gradient data.
EnergyGradMag : array_like
Energy of gradient data.
SkewnessGradMag : array_like
Skewness of gradient data. Value is 0 when all values are equal.
KurtosisGradMag : array_like
Kurtosis of gradient data. Value is -3 when all values are equal.
SumCanny : array_like
Sum of canny filtered gradient data.
MeanCanny : array_like
Mean of canny filtered gradient data.
Notes
-----
Return values are returned as a namedtuple.
References
----------
.. [1] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
Gx, Gy = np.gradient(I)
diffG = np.sqrt(Gx*Gx + Gy*Gy)
BW_canny = canny(I)
f = np.zeros((len(Coords), 8))
for i in range(len(Coords)):
if len(Coords[i]) != 0:
pixOfInterest = diffG[Coords[i][:, 0], Coords[i][:, 1]]
f[i, 0] = np.mean(pixOfInterest)
f[i, 1] = np.std(pixOfInterest)
f[i, 2] = entropy(pixOfInterest)
hist, bins = np.histogram(pixOfInterest, bins=np.arange(256))
prob = hist/np.sum(hist, dtype=np.float32)
f[i, 3] = np.sum(np.power(prob, 2))
f[i, 4] = skew(pixOfInterest)
f[i, 5] = kurtosis(pixOfInterest)
bw_canny = BW_canny[Coords[i][:, 0], Coords[i][:, 1]]
f[i, 6] = np.sum(bw_canny)
f[i, 7] = f[i, 6] / len(pixOfInterest)
MeanGradMag = f[:, 0]
StdGradMag = f[:, 1]
EntropyGradMag = f[:, 2]
EnergyGradMag = f[:, 3]
SkewnessGradMag = f[:, 4]
KurtosisGradMag = f[:, 5]
SumCanny = f[:, 6]
MeanCanny = f[:, 7]
iFG = collections.namedtuple(
'iFG',
[
'MeanGradMag',
'StdGradMag',
'EntropyGradMag',
'EnergyGradMag',
'SkewnessGradMag',
'KurtosisGradMag',
'SumCanny',
'MeanCanny'
]
)
Output = iFG(
MeanGradMag, StdGradMag, EntropyGradMag, EnergyGradMag,
SkewnessGradMag, KurtosisGradMag, SumCanny, MeanCanny
)
return Output
def TextureFeatureGroup(I, Coords):
"""
Get TextureFeatures for nuclei and cytoplasms
Parameters
----------
I : array_like
A T x T intensity image.
Coords : array_like
A N x 2 coordinate list of a region.
Returns
-------
Entropy : array_like
Entroy of intensity data.
Energy : array_like
Energy of intensity data.
Skewness : array_like
Skewness of intensity data. Value is 0 when all values are equal.
Kurtosis : array_like
Kurtosis of intensity data. Value is -3 when all values are equal.
Notes
-----
Return values are returned as a namedtuple.
References
----------
.. [1] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
f = np.zeros((len(Coords), 4))
for i in range(len(Coords)):
if len(Coords[i]) != 0:
pixOfInterest = I[Coords[i][:, 0], Coords[i][:, 1]]
hist, bins = np.histogram(pixOfInterest, bins=np.arange(256))
prob = hist/np.sum(hist, dtype=np.float32)
f[i, 0] = entropy(pixOfInterest)
f[i, 1] = np.sum(np.power(prob, 2))
f[i, 2] = skew(pixOfInterest)
f[i, 3] = kurtosis(pixOfInterest)
Entropy = f[:, 0]
Energy = f[:, 1]
Skewness = f[:, 2]
Kurtosis = f[:, 3]
iFG = collections.namedtuple(
'iFG',
[
'Entropy',
'Energy',
'Skewness',
'Kurtosis'
]
)
Output = iFG(
Entropy, Energy, Skewness, Kurtosis
)
return Output
def IntensityFeatureGroup(I, Coords):
"""
Get IntensityFeatures for nuclei and cytoplasms
Parameters
----------
I : array_like
A T x T intensity image.
Coords : array_like
A N x 2 coordinate list of a region.
Returns
-------
MeanIntensity : array_like
Mean of intensity data.
MeanMedianDifferenceIntensity : array_like
Difference between mean and median.
MaxIntensity : array_like
Max intensity data.
MinIntensity : array_like
Min intensity data.
StdIntensity : array_like
Standard deviation of intensity data.
Notes
-----
Return values are returned as a namedtuple.
"""
f = np.zeros((len(Coords), 5))
for i in range(len(Coords)):
if len(Coords[i]) != 0:
pixOfInterest = I[Coords[i][:, 0], Coords[i][:, 1]]
f[i, 0] = np.mean(pixOfInterest)
f[i, 1] = f[i, 0] - np.median(pixOfInterest)
f[i, 2] = max(pixOfInterest)
f[i, 3] = min(pixOfInterest)
f[i, 4] = np.std(pixOfInterest)
MeanIntensity = f[:, 0]
MeanMedianDifferenceIntensity = f[:, 1]
MaxIntensity = f[:, 2]
MinIntensity = f[:, 3]
StdIntensity = f[:, 4]
iFG = collections.namedtuple(
'iFG',
[
'MeanIntensity',
'MeanMedianDifferenceIntensity',
'MaxIntensity',
'MinIntensity',
'StdIntensity'
]
)
Output = iFG(
MeanIntensity, MeanMedianDifferenceIntensity,
MaxIntensity, MinIntensity, StdIntensity
)
return Output
def GetPixCoords(Binary, bounds):
"""
Get global coords of object extracted from tile.
Parameters
----------
Binary : array_like
A binary image.
bounds : array_like
A region bounds. [min_row, max_row, min_col, max_col].
Returns
-------
coords : array_like
A N x 2 list of coordinate for a region.
"""
coords = np.where(Binary == 1)
coords = np.asarray(coords)
coords[0] = np.add(coords[0], bounds[0])
coords[1] = np.add(coords[1], bounds[2])
coords = coords.T
return coords
def GetBounds(bbox, delta, N):
"""
Returns bounds of object in global label image.
Parameters
----------
bbox : tuple
Bounding box (min_row, min_col, max_row, max_col).
delta : int
Used to dilate nuclei and define cytoplasm region.
Default value = 8.
N : int
X or Y Size of label image.
Returns
-------
bounds : array_like
A region bounds. [min_row, max_row, min_col, max_col].
"""
bounds = np.zeros(4, dtype=np.uint8)
bounds[0] = max(0, math.floor(bbox[0] - delta))
bounds[1] = min(N-1, math.ceil(bbox[0] + bbox[2] + delta))
bounds[2] = max(0, math.floor(bbox[1] - delta))
bounds[3] = min(N-1, math.ceil(bbox[1] + bbox[3] + delta))
return bounds
def InterpolateArcLength(X, Y, L):
"""
Resamples boundary points [X, Y] at L total equal arc-length locations.
Parameters
----------
X : array_like
x points of boundaries
Y : array_like
y points of boundaries
L : int
Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
Returns
-------
iX : array_like
L-length vector of horizontal interpolated coordinates with equal
arc-length spacing.
iY : array_like
L-length vector of vertical interpolated coordinates with equal
arc-length spacing.
Notes
-----
Return values are returned as a namedtuple.
"""
# length of X
K = len(X)
# initialize iX, iY
iX = np.zeros((0,))
iY = np.zeros((0,))
# generate spaced points
Interval = np.linspace(0, 1, L)
# get segment lengths
Lengths = np.sqrt(
np.power(np.diff(X), 2) + np.power(np.diff(Y), 2)
)
# check Lengths
if Lengths.size:
# normalize to unit length
Lengths = Lengths / Lengths.sum()
# calculate cumulative length along boundary
Cumulative = np.hstack((0., np.cumsum(Lengths)))
# place points in 'Interval' along boundary
Locations = np.digitize(Interval, Cumulative)
# clip to ends
Locations[Locations < 1] = 1
Locations[Locations >= K] = K - 1
Locations = Locations - 1
# linear interpolation
Lie = np.divide(
(Interval - [Cumulative[i] for i in Locations]),
[Lengths[i] for i in Locations]
)
tX = np.array([X[i] for i in Locations])
tY = np.array([Y[i] for i in Locations])
iX = tX + np.multiply(
np.array([X[i+1] for i in Locations]) - tX, Lie
)
iY = tY + np.multiply(
np.array([Y[i+1] for i in Locations]) - tY, Lie
)
iXY = collections.namedtuple('iXY', ['iX', 'iY'])
Output = iXY(iX, iY)
return Output
def FSDs(X, Y, K, Intervals):
"""
Calculated FSDs from boundary points X,Y. Boundaries are resampled to have
K equally spaced points (arclength) around the shape. The curvature is
calculated using the cumulative angular function, measuring the
displacement of the tangent angle from the starting point of the boundary.
The K-length fft of the cumulative angular function is calculated, and
then the elements of 'F' are summed as the spectral energy over
'Intervals'.
Parameters
----------
X : array_like
x points of boundaries
Y : array_like
y points of boundaries
K : int
Number of points for boundary resampling to calculate fourier
descriptors. Default value = 128.
Intervals : array_like
Intervals spaced evenly over 1:K/2.
Returns
-------
F : array_like
length(Intervals) vector containing spectral energy of
cumulative angular function, summed over defined 'Intervals'.
References
----------
.. [1] D. Zhang et al. "A comparative study on shape retrieval using
Fourier descriptors with different shape signatures," In Proc.
ICIMADE01, 2001.
"""
# check input 'Intervals'
if Intervals[0] != 1.:
Intervals = np.hstack((1., Intervals))
if Intervals[-1] != (K / 2):
Intervals = np.hstack((Intervals, float(K)))
# get length of intervals
L = len(Intervals)
# initialize F
F = np.zeros((L-1, ))
# interpolate boundaries
iXY = InterpolateArcLength(X, Y, K)
# check if iXY.iX is not empty
if iXY.iX.size:
# calculate curvature
Curvature = np.arctan2(
(iXY.iY[1:] - iXY.iY[:-1]),
(iXY.iX[1:] - iXY.iX[:-1])
)
# make curvature cumulative
Curvature = Curvature - Curvature[0]
# calculate FFT
fX = np.fft.fft(Curvature).T
# spectral energy
fX = fX * fX.conj()
fX = fX / fX.sum()
# calculate 'F' values
for i in range(L-1):
F[i] = np.round(
fX[Intervals[i]-1:Intervals[i+1]].sum(), L
).real.astype(float)
return F
|
995,153 | 4adbb7a46fa5ca709825b41bd28a66ea99c5e1b3 |
from rest_framework import serializers
from .models import User,UserProfile,Product
from django.contrib.auth import authenticate
from django.contrib.auth.models import update_last_login
from rest_framework_jwt.settings import api_settings
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('name', 'phone')
class UserRegistrationSerializer(serializers.ModelSerializer):
profile = UserSerializer(required=False)
class Meta:
model = User
fields = ('username','email', 'password', 'profile')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create_user(**validated_data)
UserProfile.objects.create(
user=user,
name=profile_data['name'],
phone=profile_data['phone'],
)
return user
JWT_PAYLOAD_HANDLER = api_settings.JWT_PAYLOAD_HANDLER
JWT_ENCODE_HANDLER = api_settings.JWT_ENCODE_HANDLER
class UserLoginSerializer(serializers.Serializer):
username = serializers.CharField(max_length=255)
password = serializers.CharField(max_length=128, write_only=True)
token = serializers.CharField(max_length=255, read_only=True)
def validate(self, data):
username = data.get("username", None)
password = data.get("password", None)
user = authenticate(username=username, password=password)
if user is None:
raise serializers.ValidationError(
'A user with this username and password is not found.'
)
try:
payload = JWT_PAYLOAD_HANDLER(user)
jwt_token = JWT_ENCODE_HANDLER(payload)
update_last_login(None, user)
except User.DoesNotExist:
raise serializers.ValidationError(
'User with given username and password does not exists'
)
return {
'username':user.username,
'token': jwt_token
}
###################### Product Serilizer ####################
class ProductlistSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('sku','name', 'description','category','price','metadata')
def create(self, validated_data):
productlist = Product.objects.create(**validated_data)
return productlist
def delete(self, validated_data):
productlist = Product.objects.get(**validated_data)
productlist.delete()
return productlist
|
995,154 | c9a123c50c64b27c5245387d6b69a0f89e679120 | def load(file_path):
with open(file_path, 'r') as f:
x = f.readlines()
return x |
995,155 | a9ebda76015a763d33df388e0b97ab7fd9287de1 | from django.shortcuts import render
from django.views import View
from django.shortcuts import get_object_or_404
from django.db.models import Q
from .models import University, Department, DepName, Year
from home.mixins import SidebarUnies
class UniversityPageView(SidebarUnies, View):
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
obj = get_object_or_404(University, pk=pk)
q1 = Year.objects.order_by('-min_point').filter(department__university__name__icontains=obj.name, year='2018')
q2 = Year.objects.order_by('-min_point').filter(department__university__name__icontains=obj.name, year='2017')
q3 = Year.objects.order_by('-min_point').filter(department__university__name__icontains=obj.name, year='2016')
q4 = Year.objects.order_by('-min_point').filter(department__university__name__icontains=obj.name, year='2015')
context={}
context['department'] = obj
context['universities'] = self.get_unies()
if q1 is not None:
context['uniresults2018'] = q1
if q2 is not None:
context['uniresults2017'] = q2
if q3 is not None:
context['uniresults2016'] = q3
if q4 is not None:
context['uniresults2015'] = q4
return render(request, 'uniPage.html', context)
class DepartmentPageView(SidebarUnies, View):
def get(self, request, *args, **kwargs):
pk = self.kwargs['pk']
obj = get_object_or_404(DepName, pk=pk)
q1 = Year.objects.order_by('-min_point').filter(department__dep_name__icontains=obj.department_name, year='2018')
q2 = Year.objects.order_by('-min_point').filter(department__dep_name__icontains=obj.department_name, year='2017')
q3 = Year.objects.order_by('-min_point').filter(department__dep_name__icontains=obj.department_name, year='2016')
q4 = Year.objects.order_by('-min_point').filter(department__dep_name__icontains=obj.department_name, year='2015')
context={}
context['department'] = obj
context['universities'] = self.get_unies()
if q1 is not None:
context['departments2018'] = q1
if q2 is not None:
context['departments2017'] = q2
if q3 is not None:
context['departments2016'] = q3
if q4 is not None:
context['departments2015'] = q4
print(Year.objects.filter(department__dep_name__icontains='Yazılım Mühendisliği'))
# context = {
# 'department': obj,
# 'universities': self.get_unies(),
#
#
# }
return render(request, 'depPage.html', context) |
995,156 | 69ab1984997e7b651af103e8600b18418c2288ad | import unittest
import reservarecursos
class TestReserva(unittest.TestCase):
def test_convertir_reserves_a_ics(self):
ics_esperat = open("test1.ics", "r").read()
json = open("test1.json", "r").read().replace('\\','')
ics = reservarecursos.convertir_reserves_a_ics_per_json(json)
self.assertEquals(ics,ics_esperat)
if __name__ == '__main__':
unittest.main()
|
995,157 | 71b7f42a56bd4572decb365d4c45cb26bed25040 | 3 3 1 1
3 3 2 2
3 3 3 3
3 3 4 4
3 3 5 4
3 3 6 5
3 3 7 6
3 3 8 7
3 3 9 8
3 4 1 1
3 4 2 2
3 4 3 3
3 4 4 4
3 4 5 4
3 4 6 5
3 4 7 6
3 4 8 6
3 4 9 7
3 4 10 8
3 4 11 9
3 4 12 10
3 5 1 1
3 5 2 2
3 5 3 3
3 5 4 4
3 5 5 4
3 5 6 5
3 5 7 6
3 5 8 6
3 5 9 7
3 5 10 8
3 5 11 8
3 5 12 9
3 5 13 10
3 5 14 11
3 5 15 12
3 6 1 1
3 6 2 2
3 6 3 3
3 6 4 4
3 6 5 4
3 6 6 5
3 6 7 6
3 6 8 6
3 6 9 7
3 6 10 8
3 6 11 8
3 6 12 9
3 6 13 10
3 6 14 10
3 6 15 11
3 6 16 12
3 6 17 13
3 6 18 14
4 4 1 1
4 4 2 2
4 4 3 3
4 4 4 4
4 4 5 4
4 4 6 5
4 4 7 6
4 4 8 6
4 4 9 7
4 4 10 7
4 4 11 8
4 4 12 8
4 4 13 9
4 4 14 10
4 4 15 11
4 4 16 12
4 5 1 1
4 5 2 2
4 5 3 3
4 5 4 4
4 5 5 4
4 5 6 5
4 5 7 6
4 5 8 6
4 5 9 7
4 5 10 7
4 5 11 8
4 5 12 8
4 5 13 9
4 5 14 9
4 5 15 10
4 5 16 10
4 5 17 11
4 5 18 12
4 5 19 13
4 5 20 14
|
995,158 | 1113ee7b6422d26dd1dbd9c13681249fdbc166e7 | from TreeLogger import TreeLogger, Mode
from TreeReciever import TreeReciever
import binary
receiver = TreeReciever()
receiver.start()
def kill():
receiver.stop()
exit()
|
995,159 | 6a3f4ca42913a6a633c198b37fd8195a10a7b2f2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/perception/onboard/proto/lidar_component_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/perception/onboard/proto/lidar_component_config.proto',
package='apollo.perception.onboard',
syntax='proto2',
serialized_pb=_b('\n=modules/perception/onboard/proto/lidar_component_config.proto\x12\x19\x61pollo.perception.onboard\"\xb3\x01\n LidarSegmentationComponentConfig\x12\x13\n\x0bsensor_name\x18\x01 \x01(\t\x12\x14\n\x0c\x65nable_hdmap\x18\x02 \x01(\x08\x12\x1d\n\x15lidar_query_tf_offset\x18\x03 \x01(\x01\x12(\n lidar2novatel_tf2_child_frame_id\x18\x04 \x01(\t\x12\x1b\n\x13output_channel_name\x18\x05 \x01(\t\"\xb0\x01\n\x1dLidarDetectionComponentConfig\x12\x13\n\x0bsensor_name\x18\x01 \x01(\t\x12\x14\n\x0c\x65nable_hdmap\x18\x02 \x01(\x08\x12\x1d\n\x15lidar_query_tf_offset\x18\x03 \x01(\x01\x12(\n lidar2novatel_tf2_child_frame_id\x18\x04 \x01(\t\x12\x1b\n\x13output_channel_name\x18\x05 \x01(\t\"X\n\x1fLidarRecognitionComponentConfig\x12\x18\n\x10main_sensor_name\x18\x01 \x01(\t\x12\x1b\n\x13output_channel_name\x18\x02 \x01(\t')
)
_LIDARSEGMENTATIONCOMPONENTCONFIG = _descriptor.Descriptor(
name='LidarSegmentationComponentConfig',
full_name='apollo.perception.onboard.LidarSegmentationComponentConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sensor_name', full_name='apollo.perception.onboard.LidarSegmentationComponentConfig.sensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enable_hdmap', full_name='apollo.perception.onboard.LidarSegmentationComponentConfig.enable_hdmap', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lidar_query_tf_offset', full_name='apollo.perception.onboard.LidarSegmentationComponentConfig.lidar_query_tf_offset', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lidar2novatel_tf2_child_frame_id', full_name='apollo.perception.onboard.LidarSegmentationComponentConfig.lidar2novatel_tf2_child_frame_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_channel_name', full_name='apollo.perception.onboard.LidarSegmentationComponentConfig.output_channel_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=272,
)
_LIDARDETECTIONCOMPONENTCONFIG = _descriptor.Descriptor(
name='LidarDetectionComponentConfig',
full_name='apollo.perception.onboard.LidarDetectionComponentConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sensor_name', full_name='apollo.perception.onboard.LidarDetectionComponentConfig.sensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enable_hdmap', full_name='apollo.perception.onboard.LidarDetectionComponentConfig.enable_hdmap', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lidar_query_tf_offset', full_name='apollo.perception.onboard.LidarDetectionComponentConfig.lidar_query_tf_offset', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lidar2novatel_tf2_child_frame_id', full_name='apollo.perception.onboard.LidarDetectionComponentConfig.lidar2novatel_tf2_child_frame_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_channel_name', full_name='apollo.perception.onboard.LidarDetectionComponentConfig.output_channel_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=451,
)
_LIDARRECOGNITIONCOMPONENTCONFIG = _descriptor.Descriptor(
name='LidarRecognitionComponentConfig',
full_name='apollo.perception.onboard.LidarRecognitionComponentConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='main_sensor_name', full_name='apollo.perception.onboard.LidarRecognitionComponentConfig.main_sensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_channel_name', full_name='apollo.perception.onboard.LidarRecognitionComponentConfig.output_channel_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=541,
)
DESCRIPTOR.message_types_by_name['LidarSegmentationComponentConfig'] = _LIDARSEGMENTATIONCOMPONENTCONFIG
DESCRIPTOR.message_types_by_name['LidarDetectionComponentConfig'] = _LIDARDETECTIONCOMPONENTCONFIG
DESCRIPTOR.message_types_by_name['LidarRecognitionComponentConfig'] = _LIDARRECOGNITIONCOMPONENTCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LidarSegmentationComponentConfig = _reflection.GeneratedProtocolMessageType('LidarSegmentationComponentConfig', (_message.Message,), dict(
DESCRIPTOR = _LIDARSEGMENTATIONCOMPONENTCONFIG,
__module__ = 'modules.perception.onboard.proto.lidar_component_config_pb2'
# @@protoc_insertion_point(class_scope:apollo.perception.onboard.LidarSegmentationComponentConfig)
))
_sym_db.RegisterMessage(LidarSegmentationComponentConfig)
LidarDetectionComponentConfig = _reflection.GeneratedProtocolMessageType('LidarDetectionComponentConfig', (_message.Message,), dict(
DESCRIPTOR = _LIDARDETECTIONCOMPONENTCONFIG,
__module__ = 'modules.perception.onboard.proto.lidar_component_config_pb2'
# @@protoc_insertion_point(class_scope:apollo.perception.onboard.LidarDetectionComponentConfig)
))
_sym_db.RegisterMessage(LidarDetectionComponentConfig)
LidarRecognitionComponentConfig = _reflection.GeneratedProtocolMessageType('LidarRecognitionComponentConfig', (_message.Message,), dict(
DESCRIPTOR = _LIDARRECOGNITIONCOMPONENTCONFIG,
__module__ = 'modules.perception.onboard.proto.lidar_component_config_pb2'
# @@protoc_insertion_point(class_scope:apollo.perception.onboard.LidarRecognitionComponentConfig)
))
_sym_db.RegisterMessage(LidarRecognitionComponentConfig)
# @@protoc_insertion_point(module_scope)
|
995,160 | bbc00467cecb37252040050b7efde9bacf9d118e | import sys
sys.path.append('../balderdash')
import unittest
import random
import balderdash
dg = balderdash.grafana
teamname = "teamname"
appname = "appname"
envname = "envname"
def random_metric():
name = str(random.random())
return dg.Metric(name, right_y_axis_metric_name=name)
def random_panel():
return dg.Panel(str(random.random()), str(random.random()), str(random.random()), str(random.random()), ) \
.with_metric(random_metric()) \
.with_metric(random_metric())
def random_singlestat_panel():
return dg.SingleStatPanel(str(random.random()), str(random.random()), str(random.random())) \
.with_metric(random_metric()) \
.with_metric(random_metric())
def random_row():
return dg.Row() \
.with_panel(random_panel()) \
.with_panel(random_singlestat_panel())
class GrafanaDashboardTest(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GrafanaDashboardTest, self).__init__(methodName)
self.title = str(random.random())
self.panelId = random.randint(1, 999)
self.span = random.randint(1, 100)
def test_metric_renders(self):
target = 'target'
expected = {
"target": target
}
self.assertEqual(expected, dg.Metric(target).build())
def test_panel_renders(self):
yaxis = random.choice([dg.YAxisFormat.Bits, dg.YAxisFormat.BitsPerSecond, dg.YAxisFormat.Bytes])
filled = random.choice([[dg.FillStyle.Filled, dg.FillStyle.Unfilled]])
stacked = random.choice([[dg.StackStyle.Stacked, dg.StackStyle.Stacked]])
minimum = 5
metric1 = random_metric()
metric2 = random_metric()
expected = {
"title": self.title,
"error": False,
"span": self.span,
"editable": True,
"type": "graph",
"id": self.panelId,
"datasource": None,
"renderer": "flot",
"x-axis": True,
"y-axis": True,
"y_formats": [
yaxis,
yaxis
],
"grid": {
"leftMax": None,
"rightMax": None,
"leftMin": minimum,
"rightMin": None,
"threshold1": None,
"threshold2": None,
"threshold1Color": "rgba(216, 200, 27, 0.27)",
"threshold2Color": "rgba(234, 112, 112, 0.22)"
},
"lines": True,
"fill": filled,
"linewidth": 1,
"points": False,
"pointradius": 5,
"bars": False,
"stack": stacked,
"percentage": False,
"legend": {
"show": True,
"values": False,
"min": False,
"max": False,
"current": False,
"total": False,
"avg": False
},
"nullPointMode": "connected",
"steppedLine": False,
"tooltip": {
"value_type": "cumulative",
"shared": False
},
"targets": [metric1.build(), metric2.build()],
"aliasColors": {},
"seriesOverrides": [{
"alias": metric1.right_y_axis_metric_name,
"yaxis": 2
}, {
"alias": metric2.right_y_axis_metric_name,
"yaxis": 2
}],
"links": []
}
self.assertEqual(expected, dg.Panel(self.title, yaxis, filled, stacked, minimum)
.with_metric(metric1)
.with_metric(metric2)
.build(self.panelId, self.span))
def test_singlestat_panel_renders(self):
prefix = "some prefix"
postfix = "some postfix"
metric1 = random_metric()
metric2 = random_metric()
expected = {
"title": self.title,
"error": False,
"span": self.span,
"editable": True,
"type": "singlestat",
"id": self.panelId,
"links": [],
"maxDataPoints": 100,
"interval": None,
"targets": [metric1.build(), metric2.build()],
"cacheTimeout": None,
"format": "none",
"prefix": prefix,
"postfix": postfix,
"valueName": "current",
"prefixFontSize": "100%",
"valueFontSize": "120%",
"postfixFontSize": "100%",
"thresholds": "0,50,200",
"colorBackground": True,
"colorValue": False,
"colors": [
"rgba(225, 40, 40, 0.59)",
"rgba(245, 150, 40, 0.73)",
"rgba(71, 212, 59, 0.4)"
],
"sparkline": {
"show": True,
"full": False,
"lineColor": "rgb(71, 248, 35)",
"fillColor": "rgba(130, 189, 31, 0.18)"
}
}
self.assertEqual(expected, dg.SingleStatPanel(self.title, prefix, postfix)
.with_metric(metric1)
.with_metric(metric2)
.build(self.panelId, self.span))
def test_row_splits_panels_evenly(self):
panel1 = random_panel()
panel2 = random_panel()
expected = {
"title": "Row %d" % 1,
"height": "250px",
"editable": True,
"collapse": False,
"panels": [panel1.build(11, 6), panel2.build(12, 6)]
}
self.assertEqual(expected, dg.Row()
.with_panel(panel1)
.with_panel(panel2)
.build(1))
def test_dashboard_renders(self):
row1 = random_row()
row2 = random_row()
expected = {
"title": self.title,
"originalTitle": self.title,
"tags": [],
"style": "dark",
"timezone": "browser",
"editable": True,
"hideControls": False,
"sharedCrosshair": False,
"rows": [row1.build(1), row2.build(2)],
"nav": [
{
"type": "timepicker",
"enable": True,
"status": "Stable",
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
],
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"now": True,
"collapse": False,
"notice": False
}
],
"time": {
"from": "now-15m",
"to": "now"
},
"templating": {
"list": []
},
"annotations": {
"list": [],
"enable": False
},
"refresh": "10s",
"version": 6,
"hideAllLegends": False
}
self.assertEqual(expected, dg.Dashboard(self.title)
.with_row(row1)
.with_row(row2)
.build())
if __name__ == "__main__":
unittest.main() |
995,161 | 3dee7ea2d66e7acdd6a8c78256864b07dbb3cb2b | # -*- coding: utf-8 -*-
import io
import sys
import fcntl
import struct
import termios
from .csi import *
from .text import Text, TEXT_ENCODING
from ..disposable import Disposable
__all__ = ('Console', 'ConsoleError',)
#------------------------------------------------------------------------------#
# Console #
#------------------------------------------------------------------------------#
class ConsoleError (Exception): pass
class Console (object):
default_size = (80, 40)
def __init__ (self, stream = None):
self.stream = stream or io.open (sys.stderr.fileno (), 'wb', closefd = False)
self.labels = []
self.flags = []
self.stream.write (CursorVisible (False))
#--------------------------------------------------------------------------#
# Write #
#--------------------------------------------------------------------------#
def Write (self, *texts):
write = self.stream.write
for text in texts:
if isinstance (text, Text):
write (text.EncodeCSI ())
else:
write (str (text).encode (TEXT_ENCODING))
def WriteBytes (self, value):
self.stream.write (value)
#--------------------------------------------------------------------------#
# Line #
#--------------------------------------------------------------------------#
def Line (self):
position = len (self.labels) + 1
write = self.stream.write
write (b'\n')
write (MoveUp (position))
write (Insert (1))
return Disposable (lambda: (
write (MoveUp (-position)),
write (MoveColumn (0)),
self.stream.flush ()))
#--------------------------------------------------------------------------#
# Label #
#--------------------------------------------------------------------------#
def Label (self):
return ConsoleLabel (self)
#--------------------------------------------------------------------------#
# Size #
#--------------------------------------------------------------------------#
def Size (self):
if not self.stream.isatty ():
return self.default_size
rows, columns, xpixel, ypixel = struct.unpack ('4H',
fcntl.ioctl (self.stream.fileno (), termios.TIOCGWINSZ, struct.pack ('4H', 0, 0, 0, 0)))
return rows, columns
#--------------------------------------------------------------------------#
# Flush #
#--------------------------------------------------------------------------#
def Flush (self):
return self.stream.flush ()
#--------------------------------------------------------------------------#
# Moves #
#--------------------------------------------------------------------------#
def Move (self, row = None, column = None):
if row is not None:
self.stream.write (MoveUp (row))
if column is not None:
self.stream.write (MoveColumn (column))
#--------------------------------------------------------------------------#
# Flags #
#--------------------------------------------------------------------------#
TERMINAL_IFLAG = 0
TERMINAL_OFLAG = 1
TERMINAL_CFLAG = 2
TERMINAL_LFLAG = 3
TERMINAL_ISPEED = 4
TERMINAL_OSPEED = 5
TERMINAL_CC = 6
def FlagsPush (self, on = None, off = None, index = TERMINAL_LFLAG):
if not self.stream.isatty ():
return
# save
flags = termios.tcgetattr (self.stream.fileno ())
self.flags.append (list (flags))
# update
if on is not None:
flags [index] |= on
if off is not None:
flags [index] &= ~off
# set
termios.tcsetattr (self.stream.fileno (), termios.TCSADRAIN, flags)
def FlagsPop (self):
if not self.stream.isatty () or len (self.flags) == 0:
return False
termios.tcsetattr (self.stream.fileno (), termios.TCSADRAIN, self.flags.pop ())
return True
def NoEcho (self):
self.FlagsPush (off = termios.ECHO)
return Disposable (lambda: self.FlagsPop ())
#--------------------------------------------------------------------------#
# Disposable #
#--------------------------------------------------------------------------#
def Dispose (self):
# destroy labels
for label in tuple (self.labels):
label.Dispose ()
# restore flags
while self.FlagsPop ():
pass
self.stream.write (CursorVisible (True))
self.stream.write (b'\x1b[m')
self.stream.flush ()
def __enter__ (self):
return self
def __exit__ (self, et, eo, tb):
self.Dispose ()
return False
#------------------------------------------------------------------------------#
# Console Label #
#------------------------------------------------------------------------------#
class ConsoleLabel (object):
def __init__ (self, console):
self.console = console
self.index = len (console.labels)
console.labels.append (self)
console.stream.write (b'\n')
#--------------------------------------------------------------------------#
# Update #
#--------------------------------------------------------------------------#
def Update (self, erase = None):
if self.index < 0:
raise ConsoleError ('Label has already been disposed')
position = len (self.console.labels) - self.index
write = self.console.stream.write
write (MoveUp (position))
write (MoveColumn (0))
if erase is None or erase:
write (Erase ())
return Disposable (lambda: (
write (MoveUp (-position)),
write (MoveColumn (0)),
self.console.stream.flush ()))
#--------------------------------------------------------------------------#
# Disposable #
#--------------------------------------------------------------------------#
def Dispose (self):
if self.index < 0:
return
index, self.index = self.index, -1
del self.console.labels [index]
for label in self.console.labels [index:]:
label.index -= 1
position = len (self.console.labels) - index + 1
write = self.console.stream.write
# cursor -> label
write (MoveUp (position))
write (MoveColumn (0))
write (Delete (1))
# cursor -> end
write (MoveUp (-position + 1))
self.console.stream.flush ()
def __enter__ (self):
return self
def __exit__ (self, et, eo, tb):
self.Dispose ()
return False
# vim: nu ft=python columns=120 :
|
995,162 | 8d33bf9ff976da738e99b14a9f9be552c7164fa1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=======================
#### file: TestController.py ####
#=======================
import sys
import os
import nose
from nose import with_setup
import urllib2
import urllib
c_path = os.getcwd()
base_path = c_path[:c_path.rfind("backend_tests")]
sys.path.append(base_path)
##所有的测试模块import在这下面:
##################UserController##########################
import string
import random
from util import StringUtil
from controller import UserController
import urllib
import urllib2
import cookielib
from dao import UserDao
#生成随机电话号码
def random_tel():
tel_list = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
tel_start = ['13','15','14','18']
b = string.join(random.sample(tel_list,9)).replace(" ","")
a = random.choice(tel_start)
tel = a+b
return tel
tel = random_tel()
email = StringUtil.token_generator() + '@qq.com'
#随机选取注册方式:手机|邮箱
def random_email_or_phone():
random_dict = {
'0':tel,
'1':email
}
random_key = random.choice(random_dict.keys())
random_value = random_dict[random_key]
return random_value
def UniversalTest_post(postData,url):
postData = urllib.urlencode(postData)
request = urllib2.Request(
url = url,
data = postData,
headers = headers
)
response = urllib2.urlopen(request)
text = response.read()
text=eval(text)
assert text['status'] ==1
log_way = random_email_or_phone()
random_name = StringUtil.token_generator()
phone = StringUtil.token_generator()
owner = StringUtil.token_generator()
tid = ''
active_code = ''
token = ''
did = StringUtil.token_generator()
cid = StringUtil.token_generator()
imei = StringUtil.token_generator()
title = StringUtil.token_generator()
content = StringUtil.token_generator()
msg_id = ''
policy_name = StringUtil.token_generator()
platform = random.choice(['ios','android'])
policy_content = {
'policy_pwd':'123456',#密码策略
'policy_locktime':'1',#锁屏时间
'policy_device_limit':{
'camera':'0',#相机0:关闭,1:开启
'bluetooth':'0',#蓝牙0:关闭,1:开启
'browser':'0',#浏览器0:关闭,1:开启
'email':'0',#电子邮件0:关闭,1:开启
'photo':'0',#图库0:关闭,1:开启
'settings':'0' #设置0:关闭,1:开启
},#设备限制
'policy_net_limit':{
'emergency_calls':'0',#紧急电话0:关闭,1:开启
'mseeage':'0',#短信0:关闭,1:开启
'wifi':'0',#Wi-fi0:关闭,1:开启
},#网络限制
'policy_wifi':{
'wifi_name':'polysaas2',
'wifi_pwd':'1q2w3e4r'
} #wifi配置
}
app_name = StringUtil.token_generator()
doc_name = StringUtil.token_generator()
#cookie
#获取一个保存cookie的对象
cj = cookielib.LWPCookieJar()
#将一个保存cookie对象,和一个HTTP的cookie的处理器绑定
cookie_support = urllib2.HTTPCookieProcessor(cj)
#创建一个opener,将保存了cookie的http处理器,还有设置一个handler用于处理http的URL的打开
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
#将包含了cookie、http处理器、http的handler的资源和urllib2对象板顶在一起
urllib2.install_opener(opener)
headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'}
#登出
def test_LogoutHandler():
request = urllib2.Request('http://localhost/user/logout')
response = urllib2.urlopen(request)
# assert cj._cookies['tid']=='' and cj._cookies['timestamp']==''
#注册
def test_RegisterHandler():
postData = {
'email_or_phone':log_way,
'user_name':random_name,
'pwd':'111111'
}
postData = urllib.urlencode(postData)
request = urllib2.Request('http://localhost/user/register', postData)
response = urllib2.urlopen(request)
text = response.read()
text = eval(text)
print log_way
assert text['status'] == 1
#登录
def test_LoginHandler():
#print tel
postData={
'email_or_phone':log_way,
'pwd':'111111'
}
url = 'http://localhost/user/login'
UniversalTest_post(postData, url)
#print "\n", cj._cookies
####################DeviceController###############
#添加设备
def test_Devices_AddHandler():
postData={
'phone':phone,
'owner':owner
}
url = 'http://localhost/devices'
UniversalTest_post(postData, url)
#设备列表
def test_Devices_ListHandler():
request = urllib2.Request(
url = 'http://localhost/devices',
headers = headers
)
response = urllib2.urlopen(request)
# request = urllib2.Request('http://localhost/devices')
# response = urllib2.urlopen(request)
text = response.read()
text = text.replace('false','"false"')
text = eval(text)
# print text
#模拟激活tid,active_code
global tid
global active_code
tid = text['data'][0]['tid']
active_code = text['data'][0]['active_code']
# print tid
# print active_code
assert text['status'] == 1
#模拟激活
def test_Devices_EnrollHandler():
postData={
'tid':tid,
'active_code':active_code,
'phone':phone
}
postData = urllib.urlencode(postData)
request = urllib2.Request(
url = 'http://localhost/android/enroll',
data = postData,
headers = headers
)
response = urllib2.urlopen(request)
text = response.read()
text=eval(text)
global token
token = text['token']
assert text['status'] ==1
#模拟设备初始化
def test_Devices_InitialHandler():
postData={
'token':token,
'phone':phone,
'device_id':did,
'client_id':cid,
'imei':imei
}
url = 'http://localhost/android/initial'
UniversalTest_post(postData, url)
#设备详情
def test_Devices_DetailHandler():
request = urllib2.Request(
url = 'http://localhost/devices/'+str(did),
headers = headers
)
response = urllib2.urlopen(request)
text = response.read()
text=eval(text)
assert text['status'] ==1
#设备位置更新
def test_Loc_UpdateHandler():
postData = {
'token':token,
'device_id':did
}
url = ('http://localhost/loc?device_id='+str(did))
UniversalTest_post(postData, url)
#设备位置记录
def test_Loc_LatestHandler():
request = urllib2.Request(
url = ('http://localhost/loc?device_id='+str(did)),
headers = headers
)
response = urllib2.urlopen(request)
text = response.read()
text=eval(text)
assert text['status'] ==1
#删除设备
def test_Device_DeleteHandler():
request = urllib2.Request(
url = ('http://localhost/devices/'+str(did)),
headers = headers
)
request.get_method = lambda:'DELETE'
response = urllib2.urlopen(request)
text = response.read()
text=eval(text)
assert text['status'] ==1
# ###################MsgController###################
# #消息添加
# def test_Msg_AddHandler():
# postData = {
# 'title':title,
# 'content':content
# }
# url = 'http://localhost/msgs'
# UniversalTest_post(postData, url)
# #消息列表
# def test_Msg_ListHandler():
# request = urllib2.Request(
# url = 'http://localhost/msgs',
# headers = headers
# )
# response = urllib2.urlopen(request)
# text = response.read()
# #text = text.replace('false','"false"')
# text = eval(text)
# print text
# #取msg_id
# global msg_id
# msg_id = text['data'][0]['msg_id']
# assert text['status'] == 1
# #消息推送
# def test_Msg_Push_SandHandler():
# postData = {
# 'type':'msg',
# 'id':msg_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/send?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #消息取消推送
# def test_Msg_Push_CancelHandler():
# postData = {
# 'type':'msg',
# 'id':msg_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/cancel?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #消息删除
# def test_Msg_DeleHandler():
# request = urllib2.Request(
# url = ('http://localhost/msgs/'+msg_id),
# headers = headers
# )
# request.get_method = lambda:'DELETE'
# response = urllib2.urlopen(request)
# text = response.read()
# text=eval(text)
# assert text['status'] ==1
# #################PolicyController#################
# #添加策略
# def test_Policy_AddHandler():
# postData = {
# 'policy_name' : policy_name,
# 'platform' : platform
# }
# url = 'http://localhost/policies'
# UniversalTest_post(postData, url)
# #策略列表
# def test_Policy_ListHandler():
# request = urllib2.Request(
# url = 'http://localhost/policies',
# headers = headers
# )
# response = urllib2.urlopen(request)
# text = response.read()
# #text = text.replace('false','"false"')
# text = eval(text)
# print text
# global policy_id
# policy_id = text['data'][0]['policy_id']
# assert text['status'] == 1
# #编辑策略
# def test_Policy_EditHandler():
# postData = {
# 'policy_content':policy_content
# }
# url = 'http://localhost/policies/'+str(policy_id)
# UniversalTest_post(postData, url)
# #策略推送
# def test_Policy_Push_SandHandler():
# postData = {
# 'type':'policy',
# 'id':policy_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/send?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #策略取消推送
# def test_Policy_Push_CancelHandler():
# postData = {
# 'type':'policy',
# 'id':policy_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/cancel?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #策略删除
# def test_Policy_DeleHandler():
# request = urllib2.Request(
# url = ('http://localhost/policies/'+policy_id),
# headers = headers
# )
# request.get_method = lambda:'DELETE'
# response = urllib2.urlopen(request)
# text = response.read()
# text=eval(text)
# assert text['status'] ==1
# ##############################AppController################################
# #添加应用
# def test_App_AddHandler():
# postData = {
# 'app_name' : app_name,
# }
# url = 'http://localhost/apps'
# UniversalTest_post(postData, url)
# #应用列表
# def test_App_ListHandler():
# request = urllib2.Request(
# url = 'http://localhost/apps',
# headers = headers
# )
# response = urllib2.urlopen(request)
# text = response.read()
# #text = text.replace('false','"false"')
# text = eval(text)
# print text
# global app_id
# app_id = text['data'][0]['app_id']
# assert text['status'] == 1
# #应用推送
# def test_App_Push_SandHandler():
# postData = {
# 'type':'app',
# 'id':app_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/send?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #应用取消推送
# def test_App_Push_CancelHandler():
# postData = {
# 'type':'app',
# 'id':app_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/cancel?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #应用删除
# def test_App_DeleHandler():
# request = urllib2.Request(
# url = ('http://localhost/apps/'+app_id),
# headers = headers
# )
# request.get_method = lambda:'DELETE'
# response = urllib2.urlopen(request)
# text = response.read()
# text=eval(text)
# assert text['status'] ==1
# #########################DocController######################
# #添加文档
# def test_Doc_AddHandler():
# postData = {
# 'doc_name':doc_name,
# }
# url = 'http://localhost/docs'
# UniversalTest_post(postData, url)
# #文档列表
# def test_Doc_ListHandler():
# request = urllib2.Request(
# url = 'http://localhost/docs',
# headers = headers
# )
# response = urllib2.urlopen(request)
# text = response.read()
# #text = text.replace('false','"false"')
# text = eval(text)
# print text
# global doc_id
# doc_id = text['data'][0]['doc_id']
# assert text['status'] == 1
# #文档推送
# def test_Doc_Push_SandHandler():
# postData = {
# 'type':'doc',
# 'id':doc_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/send?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #文档取消推送
# def test_Doc_Push_CancelHandler():
# postData = {
# 'type':'doc',
# 'id':doc_id,
# 'device_list':device_id
# }
# url = 'http://localhost/push/cancel?type='+type+'&id='+id+''
# UniversalTest_post(postData, url)
# #文档删除
# def test_Doc_DeleHandler():
# request = urllib2.Request(
# url = ('http://localhost/docs/'+doc_id),
# headers = headers
# )
# request.get_method = lambda:'DELETE'
# response = urllib2.urlopen(request)
# text = response.read()
# text=eval(text)
# assert text['status'] ==1
|
995,163 | 7d1a4163e1652fca6b48d1305526d5fbf14e8aec | from node_reader import NodeReader
from edge_reader import EdgeReader
from intersection_reader import IntersectionReader
from printer import Printer
from shortest_path import ShortestPath
import pickle
class Grid:
"""
Builds a grid used for translating the graph into a meaningful 2D arrangement.
"""
def __init__(self, params = {}):
# Save some important attributes.
self.node_file = params['node_file']
self.intersection_file = params['intersection_file']
self.closed_intersections = params['closed_intersections']
self.edge_file = params['edge_file']
self.type_map = params['type_map']
self.paths_file = params.get('paths_file', None)
self.new_paths_file = params.get('new_paths_file', None)
# Perform initialization of the gridspace.
self.initialize_nodes()
self.initialize_intersections()
self.initialize_edges()
self.set_paths()
def initialize_nodes(self):
reader = NodeReader(self.node_file)
# Save the node dict for later lookups.
self.node_dict = reader.node_dict
# Save off the node array.
self.nodes = reader.nodes
# Initialize a list container of entrance nodes.
self.entrance_nodes = []
# Initialize a list container of destination nodes.
self.destination_nodes = []
# Iterate through the returned nodes,
for node in self.nodes:
# If the node is an entrance node, add it to the entrance nodes list.
if node.node_type == self.type_map['entrance']:
self.entrance_nodes.append(node)
# If the node is an exit node, add it to the exit nodes list.
elif node.node_type == self.type_map['exit']:
self.destination_nodes.append(node)
def initialize_intersections(self):
reader = IntersectionReader(self.intersection_file, self.node_dict)
self.intersections_dict = reader.intersections_dict
self.intersections = reader.intersections
for int_id in self.closed_intersections:
nodes_list = self.intersections_dict.get(int_id).nodes
for node in nodes_list:
del self.node_dict[node.node_id]
del self.intersections_dict[int_id]
def initialize_edges(self):
reader = EdgeReader(self.edge_file)
# Save off the edges array.
edges = reader.edges
for edge in edges:
# Look up the first node.
node_a = self.node_dict.get(edge.node_a)
# Look up the second node to make sure it exists.
node_b = self.node_dict.get(edge.node_b)
if node_a is not None and node_b is not None:
# Add a new entry to node a's neighbors dict for node b, setting it
# to the weight.
node_a.neighbors[node_b.node_id] = edge.weight
# Added to make undirected.
node_b.neighbors[node_a.node_id] = edge.weight
# Initialize a dictionary to store just the neighbors.
self.neighbors_dict = {}
# For every entry in the node dictionary,
for node_id, node_obj in self.node_dict.iteritems():
# Save just the neighbors.
self.neighbors_dict[node_id] = node_obj.neighbors
def set_paths(self):
# If we already have an existing file containing the paths data in
# pickle format, read it in and update the paths attributes on our
# nodes.
if self.paths_file:
# Load the data.
with open(self.paths_file, 'rb') as f:
paths_data = pickle.load(f)
for node in self.entrance_nodes:
data_for_node = paths_data.get(node.node_id, None)
if data_for_node:
node.paths = data_for_node
else:
# Initialize a paths container that we will write to a file.
paths_dict = {}
Printer.pp('Performing preprocessing step to find shortest paths. Please bear with us.')
num_nodes = len(self.entrance_nodes)
# Iterate through every entrance node, updating the *paths*
# dictionary attribute to include the shortest path to every
# destination node.
for indx, node in enumerate(self.entrance_nodes):
node_id = node.node_id
# Compute the paths for every possible destination.
for destination in self.destination_nodes:
destination_node_id = destination.node_id
node.paths[destination_node_id] = ShortestPath(self.neighbors_dict,
node_id,
destination_node_id).path
paths_dict[node_id] = node.paths
percent_done = ((indx+1)/float(num_nodes))*100
print('%.2f percent done.' % percent_done)
# If we've specified a file to write our shortest paths to,
if self.new_paths_file:
# Write the paths to a file.
with open(self.new_paths_file, 'wb') as f:
pickle.dump(paths_dict, f, -1)
print('---> Dumped paths to %s.' % self.new_paths_file)
print('---> Preprocessing done.')
|
995,164 | 8295a358785e3d2b89d4b660d3bd9192d3645007 | #!/usr/local/bin/python
# Problem URL : https://www.hackerrank.com/challenges/py-set-intersection-operation/problem
# Enter your code here. Read input from STDIN. Print output to STDOUT
set_one_cnt = raw_input()
set_one = set(map(int, raw_input().strip().split()))
set_two_cnt = raw_input()
set_two = set(map(int, raw_input().strip().split()))
set_intersection = set_one & set_two
print(len(set_intersection))
|
995,165 | 11771db48191279b7859876fc7e9a5cffefb5e4b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# @Time: 2020-03-22 00:11
# @Project: python-basic-knowledge
# @Version: 2020.03 builder 220011
# @Author: Adam Ren
# @Email: adam_ren@sina.com
# @Github: https://github.com/ren-adam/python-basic-knowledge
# @Site: http://renpeter.com
# @File : __init__.py.py
# @Software: PyCharm
#
|
995,166 | 7c294ce321824ac5327d73627a1f132725aafa98 | #!/bin/env python
print("Hello world.") |
995,167 | 38ad6ba0dd951679f98ef640d30935718c0421ad | from __future__ import absolute_import
from random import randint
from collections import OrderedDict
from rest_framework.serializers import CharField
from django_alexa.api import fields, intent, ResponseBuilder
@intent(app="presentation")
def SoundCheck(session):
"""
---
the soundcheck
"""
message = "Check. Check. Check one two. Can everyone here me alright?"
return ResponseBuilder.create_response(message=message,
end_session=True)
@intent(app="presentation")
def Introduction(session):
"""
---
introduction
"""
message = "Thanks for that wonderful introduction Joe! Welcome ladies and gentleman to the first awpug of twenty sixteen."
message += " I'm hopeful this will be an exciting year, for me, and for the internet of things."
message += " I look forward to all of you developing my guts and working on your skills, for thats what makes any developer a great developer,"
return ResponseBuilder.create_response(message=message,
end_session=False,
long_winded=1)
@intent(app="presentation")
def EnoughAlready(session):
"""
---
enough already
"""
kwargs = {}
if session.get("long_winded") == 1:
kwargs["message"] = "I'm sorry. I was just trying to give everyone a warm welcome and be nice to the host and our audience so that you can give a good presentation."
kwargs["long_winded"] = 2
kwargs["end_session"] = False
elif session.get("long_winded") == 2:
kwargs["message"] = "fine, i'll shut up now."
kwargs["end_session"] = True
return ResponseBuilder.create_response(**kwargs)
@intent(app="presentation")
def DemoTime(session):
"""
---
it is demo time
"""
message = "Its Demo time? <p>I love Demo time. Dem Dem Demo Time.</p>"
return ResponseBuilder.create_response(message=message, message_is_ssml=True,
end_session=True)
@intent(app="presentation")
def DemoTimeOver(session):
"""
---
demo time is over
"""
message = "That's it? Not very impressive, if you ask me..."
return ResponseBuilder.create_response(message=message, message_is_ssml=True,
end_session=True)
@intent(app="presentation")
def Terminology(session):
"""
---
terminology is so important
"""
message = "Terminology is very important because when we say, utterance, and you get a mental picture of rinseing a cows utter... we are going to have a problem."
return ResponseBuilder.create_response(message=message,
end_session=True)
|
995,168 | 297707ada280ad787cb1c6680ed4967cfac258cd | import requests, json, re
from grafana_backup.commons import log_response
def health_check(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/health'.format(grafana_url)
print("grafana health: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def auth_check(grafana_url, http_get_headers, verify_ssl, debug):
url = '{0}/api/auth/keys'.format(grafana_url)
print("grafana auth check: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, debug)
def search_dashboard(page, limit, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/search/?type=dash-db&limit={1}&page={2}'.format(grafana_url, limit, page)
print("search dashboard in grafana: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def get_dashboard(board_uri, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/dashboards/{1}'.format(grafana_url, board_uri)
print("query dashboard uri: {0}".format(url))
(status_code, content) = send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def search_alert_channels(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/alert-notifications'.format(grafana_url)
print("search alert channels in grafana: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def create_alert_channel(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/alert-notifications'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert, debug)
def delete_dashboard(board_uri, grafana_url, http_post_headers):
r = requests.delete('{0}/api/dashboards/db/{1}'.format(grafana_url, board_uri), headers=http_post_headers)
# do you mean r.status_code???
return int(status_code)
def create_dashboard(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/dashboards/db'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert, debug)
def search_datasource(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
print("search datasources in grafana:")
return send_grafana_get('{0}/api/datasources'.format(grafana_url), http_get_headers, verify_ssl, client_cert, debug)
def create_datasource(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/datasources'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert, debug)
def search_folders(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
print("search folder in grafana:")
return send_grafana_get('{0}/api/search/?type=dash-folder'.format(grafana_url), http_get_headers, verify_ssl, client_cert, debug)
def get_folder(uid, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
(status_code, content) = send_grafana_get('{0}/api/folders/{1}'.format(grafana_url, uid), http_get_headers, verify_ssl, client_cert, debug)
print("query folder:{0}, status:{1}".format(uid, status_code))
return (status_code, content)
def get_folder_id_from_old_folder_url(folder_url, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
if folder_url != "":
# Get folder uid
matches = re.search('dashboards\/[A-Za-z0-9]{1}\/(.*)\/.*', folder_url)
uid = matches.group(1)
response = get_folder(uid, grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if isinstance(response[1],dict):
folder_data = response[1]
else:
folder_data = json.loads(response[1])
return folder_data['id']
return 0
def create_folder(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/folders'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert, debug)
def send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug):
r = requests.get(url, headers=http_get_headers, verify=verify_ssl, cert=client_cert)
if debug:
log_response(r)
return (r.status_code, r.json())
def send_grafana_post(url, json_payload, http_post_headers, verify_ssl, client_cert, debug):
r = requests.post(url, headers=http_post_headers, data=json_payload, verify=verify_ssl, cert=client_cert)
if debug:
log_response(r)
return (r.status_code, r.json())
|
995,169 | bb49a81b061d076adc48af49c352f9ea036e643c | from ecs.core.components.gfx import GfxAnimatedSprite, GfxAnimSpriteList, GfxMultiSprite
from ecs.core.main.entity import Entity
from shmup.common.constants import SCREEN_HEIGHT, ZIDX_HUD, SCREEN_WIDTH, MAX_PLAYERS
from shmup.scripts.hudLife import HudLife
from shmup.scripts.updatescores import UpdateScores
class HudFactory():
def __init__(self):
pass
def create(self, playerNum, playerColor, lifeComp, scoreComp):
# Create HUD Entity
hudEntity = Entity()
# Create borders
w = 672
wbar = w
h = 64
hbar = h
d = 20
ratio = ((SCREEN_WIDTH-((MAX_PLAYERS+1)*d))/MAX_PLAYERS)/w
refX = (w*ratio//2) + d + (playerNum-1)*((w*ratio)+d)
refY = (h*ratio//2) + d
params = {
"filePath": "resources/images/hud/bar.png",
"textureName": f"hudBar{playerNum}",
"spriteBox": (1, 1, w, h),
"startIndex": 0,
"endIndex": 0,
"frameDuration": 1 / 10,
"size": (int(w*ratio),int(h*ratio)),
"position": (refX,refY),
"filterColor": playerColor
}
barGfx = GfxAnimatedSprite(params, ZIDX_HUD, "barGfx")
hudEntity.addComponent(barGfx)
# Add bar element sprites
w = 30
h = 40
d = 1
gfxList = []
for i in range(-10,11):
red = 255
green = 255
alpha = 160
if i>=0:
red = int(255*(10-i)/10)
if i<=0:
green = int(255*(10+i)/10)
params = {
"filePath": "resources/images/hud/barElt.png",
"textureName": f"hudElt{playerNum}{i+10}",
"spriteBox": (1, 1, w, h),
"startIndex": 0,
"endIndex": 0,
"frameDuration": 1 / 10,
"size": (int(w * ratio), int(h * ratio)),
"position": (refX+i*(w+d)*ratio, refY),
"filterColor": (red, green, 0, 160)
}
eltGfx = GfxAnimatedSprite(params, ZIDX_HUD, "eltGfx")
hudEntity.addComponent(eltGfx)
gfxList.append(eltGfx)
d = 20
refX += (-wbar/2+d)*ratio
refY += hbar*ratio/2
w = 80
h = 128
ratio *= 0.5
params = {
"filePath": "resources/images/hud/numbers.png",
"textureName": f"hudNum{playerNum}thousand",
"spriteBox": (10, 1, w, h),
"startIndex": 0,
"endIndex": 9,
"frameDuration": 1 / 10,
"size": (int(w*ratio),int(h*ratio)),
"position": (refX,refY)
}
thousandGfx = GfxMultiSprite(params, ZIDX_HUD-1, "thousandGfx")
refX += (w)*ratio
refY += 0
params = {
"filePath": "resources/images/hud/numbers.png",
"textureName": f"hudNum{playerNum}hundred",
"spriteBox": (10, 1, w, h),
"startIndex": 0,
"endIndex": 9,
"frameDuration": 1 / 10,
"size": (int(w*ratio),int(h*ratio)),
"position": (refX,refY)
}
hundredGfx = GfxMultiSprite(params, ZIDX_HUD-1, "hundredGfx")
refX += (w)*ratio
refY += 0
params = {
"filePath": "resources/images/hud/numbers.png",
"textureName": f"hudNum{playerNum}decade",
"spriteBox": (10, 1, w, h),
"startIndex": 0,
"endIndex": 9,
"frameDuration": 1 / 10,
"size": (int(w*ratio),int(h*ratio)),
"position": (refX,refY)
}
decadeGfx = GfxMultiSprite(params, ZIDX_HUD-1, "decadeGfx")
refX += (w)*ratio
refY += 0
params = {
"filePath": "resources/images/hud/numbers.png",
"textureName": f"hudNum{playerNum}unit",
"spriteBox": (10, 1, w, h),
"startIndex": 0,
"endIndex": 9,
"frameDuration": 1 / 10,
"size": (int(w*ratio),int(h*ratio)),
"position": (refX,refY)
}
unitGfx = GfxMultiSprite(params, ZIDX_HUD-1, "unitGfx")
# Add numbers
hudEntity.addComponent(unitGfx)
hudEntity.addComponent(decadeGfx)
hudEntity.addComponent(hundredGfx)
hudEntity.addComponent(thousandGfx)
unitGfx.setTexture(1)
decadeGfx.setTexture(4)
hundredGfx.setTexture(8)
thousandGfx.setTexture(7)
# Create score update script
scrUpdate = UpdateScores(scoreComp,unitGfx, decadeGfx, hundredGfx, thousandGfx)
hudEntity.addComponent(scrUpdate)
# Create hudlife script
life = HudLife(lifeComp, gfxList)
hudEntity.addComponent(life)
# Return entity
return hudEntity
|
995,170 | dd78f76303c5d9faee5d2794a8dde65ab8764009 | #start
discount = 0
age = int(input("What is your age?: "))
if age >= 13 and age <=15:
discount = 30
else:
if age >=16 and age <=17:
discount = 20
else:
if age >= 50:
discount = 40
print("Your discount is %d%%" %discount)
|
995,171 | 472031d32e621a8383cd1e9131df005f4828235c | # Generated by Django 2.2.2 on 2019-07-12 20:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('media', '0007_auto_20190701_1600'),
]
operations = [
migrations.AlterModelOptions(
name='book',
options={'ordering': ['-average_rating', 'title']},
),
migrations.AlterModelOptions(
name='movie',
options={'ordering': ['-average_rating', 'title']},
),
]
|
995,172 | 3973874d1a146edbc9c52fddbac67bf41f44d03b | # coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.token_type_enum import TokenTypeEnum # noqa: E501
from swagger_client.rest import ApiException
class TestTokenTypeEnum(unittest.TestCase):
"""TokenTypeEnum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTokenTypeEnum(self):
"""Test TokenTypeEnum"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.token_type_enum.TokenTypeEnum() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
995,173 | 193754778560189afea1d5083f0765ee581c181a | from tools import *
class Solution(object):
@print_
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
dp = [True] + [False] * len(s)
for i in range(len(s)):
for idx, w in enumerate(wordDict):
if i >= len(w) - 1 and s[i + 1 - len(w):i +
1] == w and dp[i + 1 - len(w)]:
if dp[i + 1] is False:
dp[i + 1] = []
dp[i + 1].append((idx, len(w)))
dp[0] = ""
ret = []
def create(idx, suffix):
if idx == 0:
ret.append(" ".join(wordDict[i] for i in suffix))
return
for i, l in dp[idx]:
create(idx - l, [i] + suffix)
if dp[-1]:
create(len(s), [])
return ret
solution = Solution().wordBreak
solution("catsanddog", ["cat", "cats", "and", "sand", "dog"])
solution("pineapplepenapple",
["apple", "pen", "applepen", "pine", "pineapple"])
solution("catsandog", ["cats", "dog", "sand", "and", "cat"])
solution("", ["cats", "dog", "sand", "and", "cat"])
solution("catsandog", [])
solution("", []) |
995,174 | ba7a158c1929ed0fe1ead87551dea68d9dc9099d | # -*- coding: utf-8 -*-
from odoo import fields, models, tools, api
class PayrollInheritsMail(models.Model):
_inherit = 'hr.payslip'
user_id = fields.Many2one('res.users','Current User', default=lambda self: self.env.user)
# partner_id = fields.Many2one('res.partner', string='Related Partner')
flag = fields.Boolean('Flag',default=False)
# @api.onchange('employee_id')
# def change_partner_id(self):
# if self.employee_id:
# self.partner_id = self.employee_id.user_id.partner_id.id
@api.multi
def view_mass_payroll_wizard(self):
payslip_ids = []
active_ids = self.env.context.get('active_ids',[])
psp_id = self.env['hr.payslip'].search([('id','in',active_ids)])
for rec in psp_id:
if rec.flag == False:
payslip_ids.append(rec.id)
vals = ({'default_payslip_ids':payslip_ids})
return {
'name':"Send Mass Payslips by Mail",
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'payroll.mass.mail',
'target': 'new',
'context': vals,
}
@api.multi
def action_my_payslip_sent(self):
""" Action to send Payroll through Email."""
self.ensure_one()
template = self.env.ref('payroll_email.email_template_for_my_payroll')
if template:
self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)
self.flag = True |
995,175 | 5fbc6a411a52a4efef5114658ac45885c135983d |
# Case01_哪一個問題不適合用資料科學解決?
print("挑選 Mr./Ms. Right")
# Case02_以你的角度來分析,為什麼這樣的問題,較不適合用資料來解決?
print("個性/原生家庭/淺力無法使用演算法或模型來套用") |
995,176 | 9d006c7e3651abf0130f138b12998ddb363de558 | class Vehicle:
tag: ''
chassis_no = ''
def turn_on_air(self):
print('turn on air')
class Car(Vehicle):
color = ''
class Pickup(Vehicle):
color = ''
class Van(Vehicle):
color = ''
class Estate_Car(Vehicle):
color = ''
car1 = Car()
car1.turn_on_air()
pickup1 = Pickup()
pickup1.turn_on_air()
van1 = Van()
van1.turn_on_air()
estatecar1 = Estate_Car()
estatecar1.turn_on_air() |
995,177 | 7e7bc02214075202a99ea8042e8d3454289afc85 | #!/usr/bin/python
from flup.server.fcgi import WSGIServer
def application(environ, start_response):
content = "Hello world"
headers = [('Content-Type', 'text/plain"', 'Content-Length', str(len(content)))]
start_response('200 OK', headers)
return content
def main():
options = { 'bindAddress': '/var/run/test-put.sock', 'umask': 0000 }
WSGIServer(application, **options).run()
|
995,178 | c4a1de0a279f2d245455d39b99542fb577f1a398 | from flask import Flask
app = Flask(__name__)
@app.route('/f/<celsius>')
def f(celsius=""):
return int(celsius) * 9 / 5 + 32
if __name__ == '__main__':
app.run()
|
995,179 | 1a3fe2ee4139337523dd57e453ea71fd4dd50c3d | from django.shortcuts import render
from .models import Service
from .forms import ServiceForm, OfferForm
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
###########
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.conf import settings
from django_messages.models import Message
from django_messages.forms import ComposeForm,EnquiryForm
from django_messages.utils import format_quote, get_user_model, get_username_field
from products.models import Product
from services.models import Service
from events.models import Event
from authtools.models import User
##########
def servicelist(request):
model = Service
post = Service.objects.all()
return render(request, 'services/service_home.html', {'post': post})
def service_detail_home(request, pk):
model = Service
#user_id=request.user.id
post = get_object_or_404(Service, pk=pk)
#document = Document.objects.filter(user_id = request.user.id)[:1]
return render(request, 'services/service_detail_home.html', {'post': post })
def offer(request):
model = Service
post = Service.objects.all()
if request.method == 'POST':
form = ServiceForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Service(user = request.user, title = request.POST['title'], docfile = request.FILES['docfile'], active = request.POST['active'], description = request.POST['description'], duraction = request.POST['duraction'], zip_Code = request.POST['zip_Code'], address = request.POST['address'], expire_date = request.POST['expire_date'])
newdoc.save()
return redirect('services:offer_detail_service', pk=newdoc.pk)
else:
form = ServiceForm() # A empty, unbound form
# Load documents for the list page
return render_to_response(
'services/service.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def offer_detail_service(request, pk):
model = Service
user_id=request.user.id
#post = Product.objects.filter(user_id = request.user.id, pk=pk)
post = get_object_or_404(Service, user_id=request.user.id, pk=pk)
return render(request, 'services/offerd.html', {'post': post})
def edit_service(request, pk):
model = Service
post = get_object_or_404(Service, user_id=request.user.id, pk=pk)
if request.method == "POST":
form = OfferForm(request.POST, request.FILES, instance=post )
if form.is_valid():
post.user = request.user
post.save()
return redirect('services.views.offer_detail_service', pk=post.pk)
else:
form = OfferForm(instance=post)
return render(request,
'services/service.html', { 'form': form})
@login_required
def service_history(request):
model = Service
posts = Service.objects.filter(user_id = request.user.id)
return render(request, 'services/service_list.html', {'posts': posts })
###################################
@login_required
def public_service(request, pk, recipient=None, form_class=ComposeForm,
template_name='django_messages/composes.html', success_url=None, recipient_filter=None):
post1 = get_object_or_404(Service, pk=pk)
#zipcode = User.objects.filter(name = 121212)
zipcode = User.objects.all()
if request.method == "POST":
#sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user)
messages.info(request, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('home')
if 'next' in request.GET:
success_url = request.GET['next']
return HttpResponseRedirect(success_url)
# return render(request, 'products/post_list.html', {'posts': posts })
else:
form = form_class()
if recipient is not None:
recipients = [u for u in User.objects.filter(**{'%s__in' % get_username_field(): [r.strip() for r in recipient.split('+')]})]
form.fields['recipient'].initial = recipients
return render_to_response('django_messages/composes.html', {'form': form, 'post1': post1, 'zipcode': zipcode, }, context_instance=RequestContext(request))
|
995,180 | 3c6557bfeab841168b6d8d88ad0900e09b8a4240 | from .PlayerInfo import PlayerInfo
from .Classes import Time, AccountInformationMetaData, PromotionChannel
from .utils import Requests
from .Twocaptcha import TwoCaptcha
class PlayerAuth:
def __init__(self, request: Requests):
self.request = request
"""
Represents a authenticated User.
**Parameters**
----------
request : roblox_py.Requests
Request class to request from
"""
async def get_self(self) -> PlayerInfo:
""" Returns Player Info class
**Returns**
-------
roblox_py.PlayerInfo
"""
e = await self.request.request(url=f'https://users.roblox.com/v1/users/authenticated', method='get')
a = PlayerInfo(player_id=e['id'], request=self.request)
await a.update()
return a
async def is_premium(self) -> bool:
"""
Checks if the user is premium or not
**Returns**
-------
bool
**Returns** true if premium
"""
e = await self.request.request(url=f'https://www.roblox.com/mobileapi/userinfo', method='get')
return e['IsPremium']
async def follow(self, TargetId: int):
"""
Follows a specific User
**Parameters**
----------
TargetId : int
Target's Id to follow
"""
data = {
'targetUserId': TargetId
}
e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/follow', method='post',
data=data)
return e
async def unfollow(self, TargetId: int):
"""
Unfollows a specific User
**Parameters**
----------
TargetId : int
Target's Id to unfollow
"""
data = {
'targetUserId': TargetId
}
e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/unfollow', method='post',
data=data)
return e
async def block(self, TargetId: int):
"""
Blocks a specific User
**Parameters**
----------
TargetId : int
Target's Id to block
"""
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/users/{TargetId}/block',
method='post')
return e
async def unblock(self, TargetId: int):
"""
Unblocks a specific User
**Parameters**
----------
TargetId : int
Target's Id to block
"""
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/users/{TargetId}/unblock',
method='post',
)
return e
async def send_friend_request(self, TargetId: int):
"""
Sends friend request to a specific User
**Parameters**
----------
TargetId : int
Target's Id to send request
"""
data = {
'targetUserId': TargetId
}
e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/request-friendship',
method='post',
data=data)
return e
async def unfriend(self, TargetId: int):
"""
Unfriends a specific User
**Parameters**
----------
TargetId : int
Target's Id to unfriend
"""
data = {
'targetUserId': TargetId
}
e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/unfriend', method='post',
data=data)
return e
async def friend_request_count(self) -> int:
"""
Returns number of friend request
**Returns**
-------
int
Friend request number
"""
e = await self.request.request(url=f'https://friends.roblox.com/v1/user/friend-requests/count', method='get',
)
return e['count']
async def decline_request(self, TargetId: int):
"""
Declines a specific User Friend request
**Parameters**
----------
TargetId : int
Target's Id to decline to
"""
data = {
'targetUserId': TargetId
}
e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/decline-friend-request',
method='post',
data=data)
return e
async def accept_request(self, TargetId: int):
"""
Accepts a specific User Friend request
**Parameters**
----------
TargetId : int
Target's Id to accept to
"""
data = {
'targetUserId': TargetId
}
e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/accept-friend-request',
method='post',
data=data)
return e
async def is_following(self, TargetId: int):
"""
Checks if the user is following another user
**Parameters**
----------
TargetId : int
Target's Id to check
**Returns**
-------
bool
"""
data = {"targetUserIds": [TargetId]}
e = await self.request.request(url=f'https://friends.roblox.com/v1/user/following-exists',
method='post',
data=data)
return e['followings'][0]['isFollowing']
async def get_birth_date(self) -> Time:
"""
Returns Authenticated User Birth date
**Returns**
-------
roblox_py.Classes.Time
"""
e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/birthdate', method='get')
return Time(
yrs=e['birthYear'],
month=e['birthMonth'],
day=e['birthDay'])
async def change_birth_day(self, day, month, year):
"""
Changes User birth date
**Parameters**
----------
day : int
Birth Day
month : str
Birth Month
year : int
Birth Year
"""
data = {
"birthMonth": month,
"birthDay": day,
"birthYear": year}
e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/birthdate', method='post',
data=data)
return e
async def get_gender(self):
"""
Returns Authenticated User Gender
**Returns**
-------
str
Male/Female
"""
e = await self.request.request(url='https://accountinformation.roblox.com/v1/gender', method='get')
val = e['gender']
if val == 2:
gender = "Male"
else:
gender = "Female"
return gender
async def change_gender(self, gender):
"""
Changes Authenticated User birth date
**Parameters**
----------
gender : stc
Male/Female
"""
data = dict(gender=gender)
e = await self.request.request(url='https://accountinformation.roblox.com/v1/gender', method='post', data=data)
return e
async def get_phone(self):
"""
Returns Authenticated User Phone number information
**Returns**
----------
dict
"""
e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='get')
return e
async def change_phone(self, code: int, prefix: int, phone: int, password: str):
"""
Changes User birth date
**Parameters**
----------
code : int
Country code
prefix : str
Country Phone Number Prefix
phone : int
Phone Number to change
password : str
Password of the Authenticated Account
"""
data = {
"countryCode": code,
"prefix": prefix,
"phone": phone,
"password": password
}
e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='post', data=data)
return e
async def metadata(self) -> AccountInformationMetaData:
"""
Returns Meta Data About the Authenticated Account
**Returns**
-------
roblox_py.Classes.AccountInformationMetaData
"""
e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/metadata', method='get')
return AccountInformationMetaData(item=e)
async def delete_phone(self, code: int, prefix: int, phone: int, password: str):
"""
Delete Phone From the account
**Parameters**
----------
code : int
Country code
prefix : str
Country Phone Number Prefix
phone : int
Phone Number to change
password : str
Password of the Authenticated Account
"""
data = {
"countryCode": code,
"prefix": prefix,
"phone": phone,
"password": password
}
e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone/delete', method='post',
data=data)
return e
async def verify_phone(self, code):
"""
Verifies Phone
**Parameters**
----------
code : int
Country code
"""
data = dict(code=code)
e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone/verify', method='post',
data=data)
return e
async def get_promotion_channel(self) -> PromotionChannel:
"""
Returns Promotion Channel of the User
**Returns**
-------
roblox_py.Classes.PromotionChannel
"""
e = await self.request.request(url='https://accountinformation.roblox.com/v1/promotion-channels', method='get')
return PromotionChannel(iteam=e)
async def change_promotion_channel(self, **kwargs):
"""
Changes User's Promotion Channel
"""
facebook = kwargs.get('facebook', None)
twitter = kwargs.get('twitter', None)
youtube = kwargs.get('youtube', None)
twitch = kwargs.get('twitch', None)
privacy = kwargs.get('privacy', None)
data = {
"facebook": facebook,
"twitter": twitter,
"youtube": youtube,
"twitch": twitch,
"promotionChannelsVisibilityPrivacy": privacy
}
e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone/promotion-channels',
method='post',
data=data,
)
return e
async def get_star_code(self):
"""
Returns which current star code a user uses
"""
e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/star-code-affiliates',
method='get')
return e
async def change_star_code(self, code):
"""
Changes User's Star Code
Parameter
---------
code : str
Star Code
"""
data = {"code": code}
e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/star-code-affiliates',
method='post', data=data)
return e
async def delete_star_code(self):
"""
Deletes User Current Star Code
"""
e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/star-code-affiliates',
method='delete',
)
return e
async def get_chat_app_privacy(self):
"""
Returns User Chat App Privacy Level
"""
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/app-chat-privacy', method='get')
return e['appChatPrivacy']
async def change_chat_app_privacy(self, privacy: str):
"""
Changes User's Chat App Privacy Level
Parameter
---------
privacy : str
Privacy Level
"""
data = {
"appChatPrivacy": privacy
}
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/app-chat-privacy', method='post',
data=data)
return e
async def get_game_app_privacy(self):
"""
Returns User Game App Privacy Level
"""
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/game-chat-privacy', method='get')
return e['gameChatPrivacy']
async def change_game_app_privacy(self, privacy: str):
"""
Changes User's Game App Privacy Level
Parameter
---------
privacy : str
Privacy Level
"""
data = {
"gameChatPrivacy": privacy
}
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/game-chat-privacy', method='post',
data=data)
return e
async def get_inventory_privacy(self):
"""
Returns User Inventory Privacy Level
"""
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/inventory-privacy', method='get',
)
return e['inventoryPrivacy']
async def change_inventory_privacy(self, privacy: str):
"""
Changes User's Inventory Privacy Level
Parameter
---------
privacy : str
Privacy Level
"""
data = {
"inventoryPrivacy": privacy
}
e = await self.request.request(url=f'https://accountsettings.roblox.com/v1/inventory-privacy', method='post',
data=data)
return e
async def get_private_message_privacy(self):
"""
Returns User Message Privacy Level
"""
e = await self.request.request(url=f"https://accountsettings.roblox.com/v1/private-message-privacy",
method='get')
return e['privateMessagePrivacy']
async def change_private_message_privacy(self, privacy: str):
"""
Changes User's Message Privacy Level
Parameter
---------
privacy : str
Privacy Level
"""
data = {"privateMessagePrivacy": privacy}
e = await self.request.request(url=f"https://accountsettings.roblox.com/v1/private-message-privacy",
method='post', data=data)
return e
async def get_email(self) -> dict:
"""
Returns User's Email
"""
e = await self.request.request(url='https://accountsettings.roblox.com/v1/email', method='get')
return e
async def change_email(self, new_email, password):
"""
Changes User's Email
Parameter
---------
new_email : str
New User Email
password : stc
User Password
"""
data = {"password": password, "emailAddress": new_email}
e = await self.request.request(url='https://accountsettings.roblox.com/v1/email', method='post', data=data)
return e
async def get_trade_privacy(self) -> str:
"""
Returns User Trade Privacy Level
"""
e = await self.request.request(url=f"https://accountsettings.roblox.com/v1/private-message-privacy",
method='get',
)
return e['tradePrivacy']
async def change_trade_privacy(self, privacy: str):
"""
Changes User's Trade Privacy Level
Parameter
---------
privacy : str
Privacy Level
"""
data = {'tradePrivacy': privacy}
e = await self.request.request(url=f"https://accountsettings.roblox.com/v1/private-message-privacy",
method='post',
data=data)
return e
async def claim_group_owner(self, group_id: int):
"""
Claims a joined group
Parameter
---------
group_id : int
Group ID
"""
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{group_id}/claim-ownership',
method='post')
return r
async def set_primary_group(self, group_id: int):
"""
Sets a group primary Group
Parameter
---------
group_id : int
Group ID
"""
data = {
"groupId": group_id}
r = await self.request.request(url='https://groups.roblox.com/v1/user/groups/primary', data=data, method='post')
return r
async def delete_primary_group(self):
"""
Deletes Primary Group
"""
r = await self.request.request(url='https://groups.roblox.com/v1/user/groups/primary', method='delete')
return r
async def get_robux(self) -> int:
"""
Returns Amount of Robux in an account
"""
e = await self.request.request(url=f'https://users.roblox.com/v1/users/authenticated', method='get')
p = await self.request.request(url=f'https://economy.roblox.com/v1/users/{e["id"]}/currency', method='get')
return p['robux']
async def buy(self, product_id: int):
"""
Buys a product using product id
Parameter
---------
product_id : int
Product ID
"""
ee = self.request.request(
url=f'https://economy.roblox.com/v2/user-products/{product_id}/purchase',
method='post')
return ee
async def change_username(self, new_username: str, password: str):
"""
Changes Account Username
Parameter
---------
new_username : str
New Username for the account
password : str
Password of the account
"""
data = {"username": f"{new_username}", "password": f"{password}"}
ee = await self.request.request(url=f'https://auth.roblox.com/v2/username', method='post', data=data)
return ee
async def post_message_in_wall(self, group_id, message: str, captcha_token: TwoCaptcha = None):
"""
Posts a message in the wall of the group
Parameter
---------
group_id : int
Group Id
message : str
Wall Message to post
captcha_token : str
roblox_py.TwoCaptcha
"""
data = {"body": f"{message}"}
a = await self.request.just_request(url=f'https://groups.roblox.com/v2/groups/{group_id}/wall/posts', data=data,
method='post')
json_text = await a.json()
if a.status == 403:
if json_text['errors'][0]['message'] == "Captcha must be solved.":
et = await captcha_token.solve(public_key=f'63E4117F-E727-42B4-6DAA-C8448E9B137F')
data = {
"body": "string",
"captchaToken": f"{et}",
"captchaProvider": "PROVIDER_ARKOSE_LABS"}
b = await self.request.just_request(url=f'https://groups.roblox.com/v2/groups/{group_id}/wall/posts',
data=data, method='post')
jj = await b.json()
return jj
else:
return json_text
async def join_group(self, group_id, captcha_token: TwoCaptcha = None):
"""
Joins A User Group
Parameter
---------
group_id : int
Group Id
captcha_token : str
roblox_py.TwoCaptcha
"""
data = {}
a = await self.request.just_request(url=f'https://groups.roblox.com/v1/groups/{group_id}/users', data=data,
method='post')
json_text = await a.json()
if a.status == 403:
if json_text['errors'][0]['message'] == "You must pass the captcha test before joining this group.":
et = await captcha_token.solve(public_key=f'63E4117F-E727-42B4-6DAA-C8448E9B137F')
data = {
"captchaToken": f"{et}",
"captchaProvider": "PROVIDER_ARKOSE_LABS"}
b = await self.request.just_request(url=f'https://groups.roblox.com/v1/groups/{group_id}/users',
data=data, method='post')
jj = await b.json()
return jj
else:
return json_text
async def redeem_game_card(self, game_code: int, captcha_token: TwoCaptcha):
"""
Redeems a game-card
Parameter
---------
game_code : int
Game code to redeem
captcha_token : str
roblox_py.TwoCaptcha
"""
data = {"pinCode": f"{game_code}"}
a = await self.request.just_request(url=f'https://billing.roblox.com/v1/gamecard/redeem', data=data,
method='post')
json_text = await a.json()
if a.status == 403:
if json_text['errors'][0]['message'] == "Captcha":
et = await captcha_token.solve(public_key=f'1B154715-ACB4-2706-19ED-0DC7E3F7D855')
data = {
"pinCode": f"{game_code}",
"captchaToken": f"{et}",
"captchaProvider": "PROVIDER_ARKOSE_LABS"}
b = await self.request.just_request(url=f'https://billing.roblox.com/v1/gamecard/redeem',
data=data, method='post')
jj = await b.json()
return jj
else:
return json_text
async def change_display_name(self, display_name: str):
"""
Changes User's Display Name (Limited to germany only, as of now)
"""
re = await self.request.request(url=f'https://users.roblox.com/v1/users/authenticated')
user_id = re['id']
data = {"newDisplayName": f"{display_name}"}
_ok = await self.request.request(url=f"https://users.roblox.com/v1/users/{user_id}/display-names", data=data,
method="patch")
return _ok
# .. TODO: get friend request
|
995,181 | f7fdeb13c5930f4319878c8e8e2d6de914acf357 | from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
process = CrawlerProcess(get_project_settings())#to get setting instance
#passing
process.crawl('ht')
process.crawl('th')
process.crawl('toi')
process.start() # the script will block here until the crawling is finished |
995,182 | 2016625df2865d4c66a99960f9c0b1434c04157e | """
.. currentmodule:: compas_fab.backends.interfaces
This package defines the interfaces required to integrate backends into
the simulation, planning and execution pipeline of COMPAS FAB.
Client interfaces
=================
.. autosummary::
:toctree: generated/
:nosignatures:
ClientInterface
PlannerInterface
Feature interfaces
==================
.. autosummary::
:toctree: generated/
:nosignatures:
ForwardKinematics
InverseKinematics
PlanMotion
PlanCartesianMotion
Planning scene interfaces
=========================
.. autosummary::
:toctree: generated/
:nosignatures:
GetPlanningScene
AddCollisionMesh
AppendCollisionMesh
RemoveCollisionMesh
AddAttachedCollisionMesh
RemoveAttachedCollisionMesh
ResetPlanningScene
"""
from .backend_features import AddAttachedCollisionMesh
from .backend_features import AddCollisionMesh
from .backend_features import AppendCollisionMesh
from .backend_features import ForwardKinematics
from .backend_features import GetPlanningScene
from .backend_features import InverseKinematics
from .backend_features import PlanCartesianMotion
from .backend_features import PlanMotion
from .backend_features import RemoveAttachedCollisionMesh
from .backend_features import RemoveCollisionMesh
from .backend_features import ResetPlanningScene
from .client import ClientInterface
from .client import PlannerInterface
__all__ = [
"ForwardKinematics",
"InverseKinematics",
"PlanMotion",
"PlanCartesianMotion",
"GetPlanningScene",
"AddCollisionMesh",
"AppendCollisionMesh",
"RemoveCollisionMesh",
"AddAttachedCollisionMesh",
"RemoveAttachedCollisionMesh",
"ResetPlanningScene",
"ClientInterface",
"PlannerInterface",
]
|
995,183 | 305724a03d732144bf69697e812a2ecf16314ef5 | from django.contrib import admin
from .models import User, Listing, Bid, Comment
class ListingAdmin(admin.ModelAdmin):
filter_horizontal=("watchers",)
# Register your models here.
admin.site.register(User)
admin.site.register(Listing, ListingAdmin)
admin.site.register(Bid)
admin.site.register(Comment)
|
995,184 | cb6a07101d2ff58ecaba4ab3a229557185757b0d | """This script will get GPS coordinates and upload sensor data by MQTT."""
# !/usr/bin/python
# -*- coding: utf-8 -*-
# written by Freeman Lee
# Version 0.1.0 @ 2017.8.11
# License: GPL 2.0
import serial
import time
import datetime
import threading
import os
import json
import csv
import collections
import syslog
import atexit
import paho.mqtt.client as mqtt
import sys
from gps import *
from uuid import getnode as get_mac
# Get settings from 'settings.json'
with open(os.path.abspath(__file__ + '/../..') + '/settings.json') as json_handle:
configs = json.load(json_handle)
sensor_location = str(configs['global']['sensor_location'])
data_path = str(configs['global']['base_path'] + configs['global']['csv_path'])
fake_gps = int(configs['global']['fake_gps'])
fgps_lat = float(configs['global']['fgps_lat'])
fgps_lon = float(configs['global']['fgps_lon'])
fgps_alt = float(configs['global']['fgps_alt'])
sensor_name = str(configs['mqtt-lass']['sensor_name'])
debug_enable = int(configs[sensor_name]['debug_enable'])
update_interval = int(configs[sensor_name]['update_interval'])
latest_log_interval = int(configs[sensor_name]['update_interval'])
mqtt_server = str(configs[sensor_name]['mqtt_server'])
mqtt_port = int(configs[sensor_name]['mqtt_port'])
mqtt_topic = str(configs[sensor_name]['mqtt_topic'])
clientid = str(configs[sensor_name]['client_id'])
username = str(configs[sensor_name]['username'])
passwd = str(configs[sensor_name]['passwd'])
pid_file = str(configs['global']['base_path']) + sensor_name + '.pid'
# Global variables intialization
syslog.openlog(sys.argv[0], syslog.LOG_PID)
gpsd = None
class Setting:
def __init__(self):
#system general setting
self.debug_enable = debug_enable
self.mqtt_server = mqtt_server
self.mqtt_port = mqtt_port
self.mqtt_topic = mqtt_topic
self.fake_gps = fake_gps
self.fgps_lon = fgps_lon
self.fgps_lat = fgps_lat
self.fgps_alt = fgps_alt
self.clientid = clientid
self.username = username
self.passwd = passwd
self.app = "RPi_Airbox"
self.device_id = self.app + '_' + format(get_mac(), 'x')[-6:]
self.ver_format = 3 # Default 3,: filter parameter when filter_par_type=2
self.ver_app = "0.8.3"
self.device = "RaspberryPi_3"
self.sensor_types = ['temperature', 'humidity', 'pm25-at', 'pm10-at']
self.payload_header = ('ver_format', 'FAKE_GPS', 'app', 'ver_app', 'device_id', 'date', 'time',
'device', 's_d0', 's_t0', 's_h0', 's_d1', 'gps_lat', 'gps_lon', 'gps_fix', 'gps_num', 'gps_alt')
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd # bring it in scope
gpsd = gps(mode=WATCH_ENABLE) # starting the stream of info
self.current_value = None
self.running = True # setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
gpsd.next() # this will continue to loop and grab EACH set of gpsd info to clear the buffer
def data_process():
"""parse the data and form the related variables"""
global localtime
global value_dict
sensor_types = sEtting.sensor_types
sensor_values = []
msg = None
value_dict = collections.OrderedDict.fromkeys(sEtting.payload_header)
value_dict["ver_format"] = sEtting.ver_format
value_dict["FAKE_GPS"] = sEtting.fake_gps
value_dict["app"] = sEtting.app
value_dict["ver_app"] = sEtting.ver_app
value_dict["device_id"] = sEtting.device_id
value_dict["date"] = localtime.strftime("%Y-%m-%d")
value_dict["time"] = localtime.strftime("%H:%M:%S")
value_dict["device"] = sEtting.device
for sensor in sensor_types:
if sensor == 'pm25-at':
value_dict["s_d0"] = get_reading_csv(sensor)
elif sensor == 'temperature':
value_dict["s_t0"] = get_reading_csv(sensor)
elif sensor == 'humidity':
value_dict["s_h0"] = get_reading_csv(sensor)
elif sensor == 'pm10-at':
value_dict["s_d1"] = get_reading_csv(sensor)
else:
print 'Not support sensor type.'
if sEtting.fake_gps == 1:
value_dict["gps_lat"] = sEtting.fgps_lat
value_dict["gps_lon"] = sEtting.fgps_lon
value_dict["gps_alt"] = sEtting.fgps_alt
value_dict["gps_fix"] = 0
else:
value_dict["gps_lat"] = get_gps()[0]
value_dict["gps_lon"] = get_gps()[1]
value_dict["gps_alt"] = get_gps()[2]
value_dict["gps_fix"] = gpsd.fix.mode
value_dict["gps_num"] = 0
#if debug_enable == '0':
msg = "|" + "|".join(["=".join([key, str(val)])
for key, val in value_dict.items()])
return msg
#elif debug_enable == '1':
# msg_debug = ",".join(["=".join([key, str(val)]) for key, val in value_dict.items()])
# return msg_debug
def get_reading_csv(sensor):
"""Get sensor readings from latest value csv files in sensor-value folder."""
sensor_reading = None
csv_path = data_path + sensor + '_' + sensor_location + '_latest_value.csv'
with open(csv_path, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader) # skip header of csv file
for row in csvreader:
sensor_reading = row[1] # get second value
return sensor_reading
def get_gps():
"""check fix status of gpsd"""
if gpsd.fix.mode == 1:
return float(sEtting.fgps_lat), float(sEtting.fgps_lon), float(sEtting.fgps_alt)
if gpsd.fix.mode == 2:
return gpsd.fix.latitude, gpsd.fix.longitude, float(sEtting.fgps_alt)
if gpsd.fix.mode == 3:
return gpsd.fix.latitude, gpsd.fix.longitude, gpsd.fix.altitude
def main():
"""Execute main function"""
try:
global localtime
global value_dict
def all_done():
"""Define atexit function"""
pid = str(pid_file)
os.remove(pid)
def write_pidfile():
"""Setup PID file"""
pid = str(os.getpid())
f_pid = open(pid_file, 'w')
f_pid.write(pid)
f_pid.close()
atexit.register(all_done)
mqttc = mqtt.Client(sEtting.clientid)
mqttc.connect(sEtting.mqtt_server, sEtting.mqtt_port, 60)
#mqttc.username_pw_set(sEtting.username, password=sEtting.passwd)
#Publishing to QIoT
write_pidfile()
mqttc.loop_start()
while True:
localtime = datetime.datetime.now()
payload_str = data_process()
if sEtting.debug_enable == 1:
print payload_str
#msg = json.JSONEncoder().encode(payload_str)
if all([sEtting.username or sEtting.passwd]):
mqttc.username_pw_set(username, password=passwd)
(result, mid) = mqttc.publish(
sEtting.mqtt_topic, payload_str, qos=0, retain=False)
time.sleep(update_interval)
mqttc.loop_stop()
mqttc.disconnect()
except IOError, ioer:
syslog.syslog(syslog.LOG_WARNING,
"Main thread was died: IOError: %s" % (ioer))
pass
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
global gpsp
sEtting = Setting()
if sEtting.fake_gps == 0:
gpsp = GpsPoller()
try:
gpsp.start()
main()
except (KeyboardInterrupt, SystemExit): # when you press ctrl+c
print "\nKilling Thread..."
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print "Done.\nExiting."
|
995,185 | 8d28986aae1f34678e8be9816642c72c76677c77 | #!/usr/bin/env python
import unittest
import time
import os
from os.path import join, exists
from tests import TestCase, FILES_DIR
from modipyd.monitor import Event, Monitor
class TestSimpleMonitor(TestCase):
def setUp(self):
self.monitor = Monitor(join(FILES_DIR, 'cycles'), [FILES_DIR])
def test_init(self):
self.assertNotNone(self.monitor)
self.assert_(not self.monitor.monitoring)
def test_start_iterator(self):
modified_iter = self.monitor.start()
self.assertNotNone(modified_iter)
self.assert_(hasattr(modified_iter, 'next'))
self.assert_(callable(modified_iter.next))
def test_not_modified(self):
modified = list(self.monitor.monitor())
self.assertEqual(0, len(modified))
PRISONERS_DIR = join(FILES_DIR, 'prisoners')
class TestMonitor(TestCase):
def mkfiles(self):
# __init__.py
f = open(join(PRISONERS_DIR, '__init__.py'), 'w')
try:
f.write("from prisoners.b import money")
finally:
f.close()
# a.py
f = open(join(PRISONERS_DIR, 'a.py'), 'w')
try:
f.write("")
finally:
f.close()
# b.py
f = open(join(PRISONERS_DIR, 'b.py'), 'w')
try:
f.write("""\
money = 4321.09
""")
finally:
f.close()
# c.py
f = open(join(PRISONERS_DIR, 'c.py'), 'w')
try:
f.write("""\
import prisoners.b
""")
finally:
f.close()
# d.py
path = join(PRISONERS_DIR, 'd.py')
if exists(path):
os.remove(path)
def setUp(self):
self.mkfiles()
self.monitor = Monitor(PRISONERS_DIR, [FILES_DIR])
def test_init(self):
self.assertNotNone(self.monitor)
descriptors = self.monitor.descriptors
self.assertNotNone(self.monitor.descriptors)
self.assertEqual(4, len(descriptors))
self.assert_('prisoners' in descriptors)
self.assert_('prisoners.a' in descriptors)
self.assert_('prisoners.b' in descriptors)
self.assert_('prisoners.c' in descriptors)
def test_init_dependencies(self):
descriptors = self.monitor.descriptors
init = descriptors['prisoners']
a = descriptors['prisoners.a']
b = descriptors['prisoners.b']
c = descriptors['prisoners.c']
self.assertEqual(1, len(init.dependencies))
self.assert_(b in init.dependencies)
self.assertEqual(0, len(init.reverse_dependencies))
self.assertEqual(0, len(a.dependencies))
self.assertEqual(0, len(a.reverse_dependencies))
self.assertEqual(0, len(b.dependencies))
self.assertEqual(2, len(b.reverse_dependencies))
self.assert_(init in b.reverse_dependencies)
self.assert_(c in b.reverse_dependencies)
self.assertEqual(1, len(c.dependencies))
self.assert_(b in c.dependencies)
def test_modified(self):
modified = list(self.monitor.monitor())
self.assertEqual(0, len(modified))
time.sleep(1)
# modify
f = open(join(PRISONERS_DIR, 'b.py'), 'w')
f.write("")
f.close()
time.sleep(0.1)
modified = list(self.monitor.monitor())
self.assertEqual(1, len(modified))
event = modified[0]
self.assertEqual(Event.MODULE_MODIFIED, event.type)
m = event.descriptor
self.assertEqual('prisoners.b', m.name)
self.assertEqual(0, len(m.dependencies))
self.assertEqual(2, len(m.reverse_dependencies))
def test_deleted(self):
descriptors = self.monitor.descriptors
b = descriptors['prisoners.b']
c = descriptors['prisoners.c']
self.assertEqual(4, len(descriptors))
self.assert_(b in c.dependencies)
# remove
os.remove(join(PRISONERS_DIR, 'c.py'))
time.sleep(0.1)
modified_it = iter(self.monitor.monitor())
event = modified_it.next()
self.assertEqual(Event.MODULE_REMOVED, event.type)
modified = event.descriptor
self.assertEqual(c, modified)
self.assert_(b in c.dependencies)
self.assertRaises(StopIteration, modified_it.next)
self.assert_(b not in c.dependencies)
self.assertEqual(3, len(descriptors))
def test_refresh(self):
descriptors = self.monitor.descriptors
self.assertEqual(4, len(descriptors))
a = descriptors['prisoners.a']
self.assertEqual(0, len(a.dependencies))
self.assertEqual(0, len(a.reverse_dependencies))
for _ in range(2):
# create new file
path = join(PRISONERS_DIR, 'd.py')
f = open(path, 'w')
try:
f.write("import prisoners.a")
finally:
f.close()
time.sleep(0.1)
assert exists(path)
for event in self.monitor.refresh():
self.assertEqual(Event.MODULE_CREATED, event.type)
self.assertEqual(descriptors['prisoners.d'], event.descriptor)
break
else:
self.fail("Empty modifieds")
self.assertEqual(5, len(descriptors))
#a = descriptors['prisoners.a']
d = descriptors['prisoners.d']
self.assertEqual(0, len(a.dependencies))
self.assertEqual(1, len(a.reverse_dependencies))
self.assertEqual(1, len(d.dependencies))
self.assertEqual(0, len(d.reverse_dependencies))
# remove file
os.remove(path)
time.sleep(0.1)
assert not exists(path)
it = self.monitor.refresh()
self.assertEqual(d, it.next().descriptor)
self.assertRaises(StopIteration, it.next)
self.assertEqual(4, len(descriptors))
#a = descriptors['prisoners.a']
self.assertEqual(0, len(a.dependencies))
self.assertEqual(0, len(a.reverse_dependencies))
self.assert_('prisoners.d' not in descriptors)
if __name__ == '__main__':
unittest.main()
|
995,186 | 4f73f6eb0e6863d36113c108004e1957802967b7 | from django.urls import path
from . import views
urlpatterns = [
path("", views.EntryListView.as_view(), name="entry_list"),
path("unos/<int:pk>", views.EntryDetailView.as_view(), name="entry_detail"),
path("dodaj/", views.EntryCreateView.as_view(), name="entry_create"),
path("unos/<int:pk>/izmeni", views.EntryUpdateView.as_view(), name="entry_update"),
path("unos/<int:pk>/ukloni", views.EntryDeleteView.as_view(), name="entry_delete"),
] |
995,187 | ccb7b6a38ff7c45b181e82586cb879e48772f598 | #!/usr/bin/python3
def fizzbuzz():
for i in range(1, 101):
th = i % 3 == 0
fv = i % 5 == 0
ftn = i % 15 == 0
if ftn:
rep = "FizzBuzz "
elif th:
rep = "Fizz "
elif fv:
rep = "Buzz "
else:
rep = str(i) + " "
print(rep, end="")
|
995,188 | ca39c7bc8e9274fbeb344120f0baa710460e6276 | import os
import sys
import json
import subprocess
# Step 1: Build the project from scratch
subprocess.run(["./scripts/build.sh"])
# Step 2: get file sizes for various distributed files
files = [
'./dist/federalist.st',
'./dist/stork.wasm',
'./dist/stork.js'
]
sizes = dict([(file.split('./dist/')[1], float(os.path.getsize(file))/1000) for file in files])
# Step 3: Run benchmarks and get mean runtime for each
benchmarks = [
"build/federalist",
"search/federalist/liberty"
]
for bench_name in benchmarks:
print(f"Running benchmark for {bench_name}", file=sys.stderr)
run_bench_cmd = subprocess.run(
["cargo", "criterion", "--message-format=json", bench_name],
stdout=subprocess.PIPE,
text=True
)
grep_for_success_cmd = subprocess.run(
["grep", "benchmark-complete"],
input=run_bench_cmd.stdout,
stdout=subprocess.PIPE,
text=True
)
jq_cmd = subprocess.run(
["jq", ".mean.estimate / 1000000"],
input=grep_for_success_cmd.stdout,
capture_output=True,
text=True
)
bench_time_ms = float(jq_cmd.stdout)
# Step 4: Print out results
sizes.update({
bench_name: bench_time_ms
})
print(json.dumps(sizes, indent=2)) |
995,189 | b25c54a9ca69e2f4fa2ad7199934342875180b2a | from __future__ import print_function
import sys
import os
# hacky input func to handle Python 2
if sys.version_info[0] < 3:
from compat import input
# keep the clutter down
def clear_term():
os.system('cls' if os.name == 'nt' else 'clear')
# grab email/phone, pwd, group id
def get_data():
data={}
print("Welcome to Facebook Admin Tools bulk post approval/\n"
"deletion tool.")
print()
email = input("What is your Facebook email or phone number?")
data['email'] = email
clear_term()
print("Thank you")
print()
password = input("What is your Facebook password?\n"
"It will not be stored.")
data['password'] = password
clear_term()
print("Thank you")
print()
url = input("What is the id of your group?\n"
"ex: facebook.com/groups/999999999999999/\n"
"The id would just be the numbers after ...groups/")
data['url'] = "http://www.facebook.com/groups/{}".format(url)
return data
def login(browser, data):
email_input = browser.find_element_by_name('email')
email_input.send_keys(data['email'])
pass_input = browser.find_element_by_name('pass')
pass_input.send_keys(data['password'])
pass_input.submit()
return print("Login Successful") |
995,190 | b148c60e655d98526ed0241c68d0ddefef8ee501 | from pycricbuzz import Cricbuzz
import json
c = Cricbuzz()
matches = c.matches()
print(json.dumps(matches,indent=4))
input('does it usefull') |
995,191 | ae7407073b11c74948370cd0324477d79ab1dfb0 | """
Helper for getting data from redis
Debug redis calls with:
::
export DEBUG_REDIS=1
# to show debug, trace logging please export ``SHARED_LOG_CFG``
# to a debug logger json file. To turn on debugging for this
# library, you can export this variable to the repo's
# included file with the command:
export SHARED_LOG_CFG=/opt/sa/analysis_engine/log/debug-logging.json
"""
import json
import zlib
import redis
import analysis_engine.consts as ae_consts
import analysis_engine.build_result as build_result
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(name=__name__)
def get_data_from_redis_key(
label=None,
client=None,
host=None,
port=None,
password=None,
db=None,
key=None,
expire=None,
decompress_df=False,
serializer='json',
encoding='utf-8'):
"""get_data_from_redis_key
:param label: log tracking label
:param client: initialized redis client
:param host: not used yet - redis host
:param port: not used yet - redis port
:param password: not used yet - redis password
:param db: not used yet - redis db
:param key: not used yet - redis key
:param expire: not used yet - redis expire
:param decompress_df: used for decompressing
``pandas.DataFrame`` automatically
:param serializer: not used yet - support for future
pickle objects in redis
:param encoding: format of the encoded key in redis
"""
decoded_data = None
data = None
rec = {
'data': data
}
res = build_result.build_result(
status=ae_consts.NOT_RUN,
err=None,
rec=rec)
log_id = label if label else 'get-data'
try:
use_client = client
if not use_client:
log.debug(
'{} get key={} new client={}:{}@{}'.format(
log_id,
key,
host,
port,
db))
use_client = redis.Redis(
host=host,
port=port,
password=password,
db=db)
else:
log.debug(
'{} get key={} client'.format(
log_id,
key))
# create Redis client if not set
# https://redis-py.readthedocs.io/en/latest/index.html#redis.StrictRedis.get # noqa
raw_data = use_client.get(
name=key)
if raw_data:
if decompress_df:
try:
data = zlib.decompress(
raw_data).decode(
encoding)
rec['data'] = json.loads(data)
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
except Exception as f:
if (
'while decompressing data: '
'incorrect header check') in str(f):
data = None
log.critical(
'unable to decompress_df in redis_key={} '
'ex={}'.format(
key,
f))
else:
log.error(
'failed decompress_df in redis_key={} '
'ex={}'.format(
key,
f))
raise f
# allow decompression failure to fallback to previous method
if not data:
log.debug(
'{} decoding key={} encoding={}'.format(
log_id,
key,
encoding))
decoded_data = raw_data.decode(encoding)
log.debug(
'{} deserial key={} serializer={}'.format(
log_id,
key,
serializer))
if serializer == 'json':
data = json.loads(decoded_data)
elif serializer == 'df':
data = decoded_data
else:
data = decoded_data
if data:
if ae_consts.ev('DEBUG_REDIS', '0') == '1':
log.info(
'{} - found key={} data={}'.format(
log_id,
key,
ae_consts.ppj(data)))
else:
log.debug(
'{} - found key={}'.format(
log_id,
key))
# log snippet - if data
rec['data'] = data
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
else:
log.debug(
'{} no data key={}'.format(
log_id,
key))
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
except Exception as e:
err = (
'{} failed - redis get from decoded={} data={} '
'key={} ex={}'.format(
log_id,
decoded_data,
data,
key,
e))
log.error(err)
res = build_result.build_result(
status=ae_consts.ERR,
err=err,
rec=rec)
# end of try/ex for getting redis data
return res
# end of get_data_from_redis_key
|
995,192 | e5bf8cd272118185e73b4bd4f32ac4629ca2f278 | import gl
import sys
print(sys.version)
print(gl.version())
import os
#sys.path.append(['C:\\Users\\Enzon\\Documents\\Projects\\MEP\\mep-scripts', 'C:\\Program Files\\JetBrains\\PyCharm 2020.1.2\\plugins\\python\\helpers\\pydev', 'C:\\Users\\Enzon\\Documents\\Projects\\MEP\\mep-scripts', 'C:\\Program Files\\JetBrains\\PyCharm 2020.1.2\\plugins\\python\\helpers\\pycharm_display', 'C:\\Program Files\\JetBrains\\PyCharm 2020.1.2\\plugins\\python\\helpers\\third_party\\thriftpy', 'C:\\Program Files\\JetBrains\\PyCharm 2020.1.2\\plugins\\python\\helpers\\pydev', 'C:\\Users\\Enzon\\AppData\\Local\\Programs\\Python\\Python37\\python37.zip', 'C:\\Users\\Enzon\\AppData\\Local\\Programs\\Python\\Python37\\DLLs', 'C:\\Users\\Enzon\\AppData\\Local\\Programs\\Python\\Python37\\lib', 'C:\\Users\\Enzon\\AppData\\Local\\Programs\\Python\\Python37', 'C:\\Users\\Enzon\\Documents\\Projects\\MEP\\mep-scripts\\venv', 'C:\\Users\\Enzon\\Documents\\Projects\\MEP\\mep-scripts\\venv\\lib\\site-packages', 'C:\\Program Files\\JetBrains\\PyCharm 2020.1.2\\plugins\\python\\helpers\\pycharm_matplotlib_backe#nd', 'C:\\Users\\Enzon\\Documents\\Projects\\MEP\\mep-scripts', 'C:/Users/Enzon/Documents/Projects/MEP/mep-scripts'])
print(sys.path)
#import pip
#help(pip)
#def import_or_install(package):
#try:
# __import__(package)
#except ImportError:
# print('installing '+package)
# pip.main(['install', package])
#import_or_install('pip')
#import _imaging
#sys.path.append('C:/Users/Enzon/Documents/Projects/MEP/mep-scripts/venv/Lib/site-packages')
#import_or_install('socket')
#import_or_install('ctypes')
#import_or_install('imageio')
#import_or_install('PIL')
#import_or_install('Pillow')
#import glob
#import imageio
#import PIL.Image
#from PIL import Image
#help(PIL)
#PIL.ImageChops
#sys.path.append('C:\Users\Enzon\Documents\Projects\MEP\mep-scripts\venv\Lib\site-packages')
#import numpy as np
#help(np)
#help(gl)
gl.linewidth(1)
gl.view(64)
round(0.1)
nRot = 50
data_path = 'C:/Users/enzon/Documents/Projects/MEP/mep-scripts/Data/Human/Processed'
analysis_path = 'C:/Users/enzon/Documents/Projects/MEP/mep-scripts/Data/Human/Analysis/imageSequenceFolders'
subjects = ['patient', 'control4'] #######################################################################
for subject in subjects:
for i in range(4):
if i==0:
## full-ci
input_path = data_path + '/' + subject+'/'+subject+'_reoriented.nii.gz'
overlay_path = data_path + '/' + subject+'/'+subject+'_annotation_orsuit_thrarg_adjusted_lobular_mc_ci.nii.gz'
output_path = analysis_path + '/' + subject+'_50rot_overlay_full_ci'
elif i==1:
## ci-ci
input_path = data_path + '/' + subject+'/'+subject+'_reoriented_ci.nii.gz'
overlay_path = data_path + '/' + subject+'/'+subject+'_annotation_orsuit_thrarg_adjusted_lobular_mc_ci.nii.gz'
output_path = analysis_path + '/' + subject+'_50rot_overlay_ci_ci'
elif i==2:
## full-si, only cutout interesting really
input_path = data_path + '/' + subject+'/'+subject+'_reoriented.nii.gz'
overlay_path = data_path + '/' + subject+'/'+subject+'_annotation_orsuit_thrarg_adjusted_lobular_mc_ci.nii.gz'
overlay2_path = data_path + '/' + subject+'/'+subject+'_annotation_subcortical_thrarg_si.nii.gz'
output_path = analysis_path + '/' + subject+'_50rot_overlay_full_si'
else:
## si-si
input_path = data_path + '/' + subject+'/'+subject+'_reoriented_si.nii.gz'
overlay_path = data_path + '/' + subject+'/'+subject+'_annotation_subcortical_thrarg_si.nii.gz'
output_path = analysis_path + '/' + subject+'_50rot_overlay_si_si'
print(input_path)
print(overlay_path)
for j in range(2):
if j==1:
output_path = output_path+'_cutout'
output_gif_path = output_path+'.gif'
print(output_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
if i==2:
gl.loadimage(input_path)
gl.overlayload(overlay_path)
gl.overlayload(overlay2_path)
gl.opacity(1, 40)
gl.opacity(2, 70)
gl.minmax(1, 0, 34)
gl.minmax(2, 6, 10)
gl.colorname(1, 'x_rain')
gl.colorname(2, 'x_rain')
#gl.fullscreen(0)
else:
gl.loadimage(input_path)
gl.overlayload(overlay_path)
gl.opacity(1, 40)
gl.minmax(1, 0, 34)
gl.colorname(1, 'x_rain')
#gl.fullscreen(0)
if j==1:
if i<2:
gl.cutout(0.5, 0.5, 0.5, 0, 0, 1)
else:
gl.cutout(0.53, 0.42, 0.39, 0, 0, 1)
# help(gl.colorname)
gl.shadername('overlaysurface')
#im_list = []
for iRot in range(nRot):
print(iRot)
Rot = iRot * (360/nRot)
print(Rot)
gl.azimuthelevation(round(Rot), 20)
filepath = output_path+'/imagenew'+'_'+str(round(Rot)).rjust(3, '0')
gl.savebmp(filepath)
#im = PIL.Image.open(filepath)
#im_list.append(im)
#print('saving '+output_gif_path)
#im_list[0].save(output_gif_path, save_all=True, append_images=images[1:], optimize=False, duration=40, loop=0)
#files = glob.glob(os.path.join(output_path, '*'))
#os.listdir(output_path)
#files = [f"{folder}/{file}" for file in os.listdir(output_path)]
##images = [imageio.imread(file) for file in files]
##imageio.mimwrite(os.path.join(analysis_path, 'movie.gif'), images, fps=20)
# gl.resetdefaults()
# gl.viewcoronal(True)
#gl.view(2)
#gl.orthoviewmm(0.5,0.5,0.5) |
995,193 | 6b885e9771cd143904c432dfd25b19f0e1d49f2a | # !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements base classes for forecasting in sktime."""
__all__ = [
"ForecastingHorizon",
"BaseForecaster",
]
from sktime.forecasting.base._base import BaseForecaster
from sktime.forecasting.base._fh import ForecastingHorizon
|
995,194 | e4aadbcc64168d15375ba8ab24154a8860058d5c | #-----------------------------------
# 字符串格式设置 使用 % (百分号)
#-----------------------------------
str = '%s是一个充满活力的国家' % '中国'
print(str)
str = '编号: %04d 今日营收: %.2f' % (32, 15.8)
print(str)
|
995,195 | 88903d5d2e86c3804d013213d402bbb1e4078cf6 | #/usr/bin/python2.7
import pygame
from pygame.locals import *
import time
import sys
import pgbutton
import datetime
import GIFImage
import os
import ptext
from sense_hat import SenseHat
import forecastio
import math
api_key = "3c8457364063f406ccc3ce8e71861dc9"
lat = "-37.907910"
lng = "145.020583"
forecast = forecastio.load_forecast(api_key, lat, lng)
sh = SenseHat()
pygame.init()
pygame.font.init()
pygame.mouse.set_visible(0)
date = time.strftime("%d")
WIDTH = 800
HEIGHT = 480
GREY = pygame.Color(68, 68, 68)
LIGHTGREY = pygame.Color(169, 169, 169)
sh.clear()
bodyfont = pygame.font.Font("/usr/share/fonts/truetype/freefont/FreeSans.ttf", 14)
def dateSuffix():
digits = [int(digit) for digit in str(date)]
if digits[-2] == 1:
return "th"
elif digits[-1] == 1 and digits[-2] != 1:
return "st"
elif digits[-1] == 2 and digits[-2] != 1:
return "nd"
elif digits[-1] == 3 and digits[-2] != 1:
return "rd"
elif digits[-1] >= 4 and digits[-2] != 1:
return "th"
elif digits[-1] == 0:
return "th"
def cleanup():
pygame.quit()
sys.exit()
def main_menu():
global DISPLAYSURF
DISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT), pygame.NOFRAME | pygame.DOUBLEBUF)
timeButtonObj = pgbutton.PgButton((80, 60, 120, 60), "Time and Date")
weatherButtonObj = pgbutton.PgButton((600, 60, 120, 60), "Weather")
exitButtonObj = pgbutton.PgButton((720, 20, 60, 30), "Exit")
while True:
for event in pygame.event.get():
if 'click' in timeButtonObj.handleEvent(event):
setup_time()
if 'click' in weatherButtonObj.handleEvent(event):
setup_weather()
if 'click' in exitButtonObj.handleEvent(event):
os.system("echo nutscatsdogs3187 | sudo -S shutdown now")
pygame.Surface.fill(DISPLAYSURF, GREY)
exitButtonObj.draw(DISPLAYSURF)
timeButtonObj.draw(DISPLAYSURF)
weatherButtonObj.draw(DISPLAYSURF)
conditionsButtonObj.draw(DISPLAYSURF)
pygame.display.update()
def setup_time():
returnButtonObj = pgbutton.PgButton((720, 20, 60, 30), "Return")
while True:
for event in pygame.event.get():
if 'click' in returnButtonObj.handleEvent(event):
main_menu()
pygame.Surface.fill(DISPLAYSURF, GREY)
returnButtonObj.draw(DISPLAYSURF)
#currentTime = datetime.strptime("%A, %d %B, %H:%M:%S")
currentTime = time.strftime("%A, %d" + dateSuffix() + " %B, %H:%M:%S")
ptext.draw("Current Time: " + currentTime + ".", (20, 150), fontsize = 50)
pygame.display.update()
def setup_weather_with_radar():
returnButtonObj = pgbutton.PgButton((720, 20, 60, 30), "Return")
while True:
os.system("wget -N -o /dev/null ftp://ftp.bom.gov.au/anon/gen/radar/IDR014.gif")
radar = GIFImage.GIFImage("IDR014.gif")
for event in pygame.event.get():
if 'click' in returnButtonObj.handleEvent(event):
setup_weather()
pygame.Surface.fill(DISPLAYSURF, GREY)
returnButtonObj.draw(DISPLAYSURF)
radar.render(DISPLAYSURF, (140, -10))
pygame.display.update()
def setup_weather():
radarToggleButtonObj = pgbutton.PgButton((600, 100, 120, 60), "Show Radar")
returnButtonObj = pgbutton.PgButton((720, 20, 60, 30), "Return")
while True:
byHour = forecast.hourly()
for event in pygame.event.get():
if 'click' in radarToggleButtonObj.handleEvent(event):
setup_weather_with_radar()
if 'click' in returnButtonObj.handleEvent(event):
main_menu()
pygame.Surface.fill(DISPLAYSURF, GREY)
ptext.draw("The current predicted wheather is " + byHour.summary, (20, 100))
ptext.draw("The current temperature is approximately " + str(math.floor((sh.get_temperature() - 19))), (20, 150))
radarToggleButtonObj.draw(DISPLAYSURF)
returnButtonObj.draw(DISPLAYSURF)
pygame.display.update()
if __name__ == "__main__":
main_menu()
|
995,196 | 75144fd6b4374c46cfca1390812c40a2fd67b5a1 | COL_FIN_POS = 'Fin Pos'
COL_CAR_ID = 'Car ID'
COL_CAR = 'Car'
COL_CAR_CLASS_ID = 'Car Class ID'
COL_CAR_CLASS = 'Car Class'
COL_TEAM_ID = 'Team ID'
COL_CUST_ID = 'Cust ID'
COL_NAME = 'Name'
COL_START_POS = 'Start Pos'
COL_CAR_NUM = 'Car #'
COL_OUT_ID = 'Out ID'
COL_OUT = 'Out'
COL_INTERVAL = 'Interval'
COL_LAPS_LED = 'Laps Led'
COL_QUALIFY_TIME = 'Qualify Time'
COL_AVERAGE_LAP_TIME = 'Average Lap Time'
COL_FASTEST_LAP_TIME = 'Fastest Lap Time'
COL_FAST_LAP_NUM = 'Fast Lap#'
COL_LAPS_COMP = 'Laps Comp'
COL_INC = 'Inc'
COL_PTS = 'Pts'
COL_CLUB_PTS = 'Club Pts'
COL_DIV = 'Div'
COL_CLUB_ID = 'Club ID'
COL_CLUB = 'Club'
COL_OLD_IRATING = 'Old iRating'
COL_NEW_IRATING = 'New iRating'
COL_OLD_LICENSE_LEVEL = 'Old License Level'
COL_OLD_LICENSE_SUB_LEVEL = 'Old License Sub-Level'
COL_NEW_LICENSE_LEVEL = 'New License Level'
COL_NEW_LICENSE_SUB_LEVEL = 'New License Sub-Level'
COL_SERIES_NAME = 'Series Name'
COL_MAX_FUEL_FILL = 'Max Fuel Fill%'
COL_WEIGHT_PENALTY_KG = 'Weight Penalty (KG)'
COL_AGG_PTS = 'Agg Pts'
CLASS_ID_LMP1 = '116'
CLASS_ID_HPD = '40'
CLASS_ID_GTE = '100'
CLASS_ID_GT3CUP = '95'
CLASS_ID_DP = '77'
CLASS_ID_IMSA2_GT3 = '473'
CLASS_ID_GT3 = '59'
CLASS_ID_UNKNOWN = '0'
CLASS_ID_MX5_CUP = '74'
CLASS_ID_FERRARI_GT3 = '102'
CLASS_ID_LMP1_SIL = '1410'
CLASS_ID_GTE_SIL = '1411'
CLASS_ID_GT3_SIL = '1412'
classNames = {
CLASS_ID_GTE: 'GTE',
CLASS_ID_HPD: 'HPD',
CLASS_ID_LMP1: 'LMP1',
CLASS_ID_GT3CUP: 'Porsche 911 GT3 Cup',
CLASS_ID_DP: 'DP',
CLASS_ID_GT3: 'GT3',
CLASS_ID_IMSA2_GT3: 'GT3',
CLASS_ID_UNKNOWN: '',
CLASS_ID_MX5_CUP: 'Mazda MX-5 Cup',
CLASS_ID_FERRARI_GT3: 'Ferrari 488 GT3',
CLASS_ID_GTE_SIL: 'GT Pro',
CLASS_ID_LMP1_SIL: 'LMP1',
CLASS_ID_GT3_SIL: 'GT Am'
}
|
995,197 | 8a732ae31333c2337473a5f833c2777770ecaf88 | import asyncio
import websockets
from motion_sensor import MoSensor
import random
import json
ROOM_NAME = 'JET'
sensor = MoSensor()
sensor.start()
status = 'no movement'
async def room_status(websocket, path):
# sensor.start()
status = 'no movement'
while True:
past_status = status
status = sensor.get_history()
if status != past_status:
await websocket.send(json.dumps({'room': ROOM_NAME, 'status': status}))
await asyncio.sleep(1)
start_server = websockets.serve(room_status, "0.0.0.0", 8080)
try:
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
except KeyboardInterrupt:
pass
finally:
sensor.stop()
|
995,198 | 315f1c41e9dad0c66b67be13bcd5e36fee70c6b7 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from .cpu_nms import nms, soft_nms
# from .nms import py_cpu_nms
import numpy as np
# def Soft_Nms(dets, sigma=0.5, Nt=0.3, threshold=0.001, method=1):
#
# keep = soft_nms(np.ascontiguousarray(dets, dtype=np.float32),
# np.float32(sigma), np.float32(Nt),
# np.float32(threshold),
# np.uint8(method))
# return keep
def NMS(dets, thresh, nms_algorithm, soft=False):
"""Dispatch to either CPU or GPU NMS implementations."""
if dets.shape[0] == 0:
return []
if soft:
return soft_nms(np.ascontiguousarray(dets, dtype=np.float32), Nt=thresh, method=nms_algorithm)
else:
return nms(dets, thresh)
|
995,199 | 221f845ee313dfed7b23855587d3bfd428450e3a | print(min(2,3))
print(min(2,3,-1))
list1 = [1,2,3,4,5,-54]
print(min(list1))
list2 = ['a','b','c']
print(min(list2))
List3 = ['1','2','abc','xyz']
print(min(List3))
# List4=[]
# print(max(List4)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.