text stringlengths 38 1.54M |
|---|
#! /usr/bin/env python3
# Module imageImport.py
#################################################
# #
# Python functions for importing image files #
# #
# Author: Robert Thomas #
# Username: rjst20 #
# #
#################################################
# OpenCV used for loading images
from cv2 import *
# Modules below used for listing available images in the current directory
from os import getcwd
from os.path import join, split
from glob import glob
# Modules for function termination and writing to file
from sys import exit
import sys
import numpy as np
# Allow for plotting histogram graphs
import matplotlib.pyplot as plt
class Image:
# Define a class that contains an image and its associated name
'Common class for an image'
"""
Attributes:
name: A string containing the name of the original image
image: An array of the individual pixels of the image
transforms: A list of all performed transforms done on the image
"""
def __init__(self, *args):
# Constructor for the class
# Generate a list of files within the current directory for validation
listOfFiles = self._listFiles()
# No transforms have been yet done on the image
self.transforms = []
if (0 == len(args)): # Validate that there is an argument to import, if not generate an empty image
[self.image, self.name] = [ [], "" ]
else: # If a string is provided check that it exists within the current directory
args = args[0]# Select only the first argument provided if it exists
if (args) in listOfFiles: # If filename is present within the current directory import the file and return it
imageImported = self._importPic(args)
__printSpacer__('Image file ' + args + ' succesfully loaded')
[self.image, self.name] = imageImported, args
elif 'empty' == args:
self.image = []
self.name = 'emptyImage'
else: # If file does not exist within the directory exit the scripts
__printSpacer__(args + ': file not found')
self._printFileList(listOfFiles)
exit()
##########################################
## ##
## Methods (Internal): ##
## ##
##########################################
def _listFiles(*args):
#Function to list available files for processing in the current directory
# Define the current working directory
cwd = getcwd()
# Create a glob-able parser
pngFilter = join(cwd,'*.png')
# Extract an array of all pngs in the current working directory
pngGlob = glob(pngFilter)
# Initialise an empty array to contain all valid png image files in the directory
pngFiles = []
# For each image detected in the current working directory remove the directory path and
# add file to pngFiles array
for image in pngGlob:
_ , fileName = split(image)
pngFiles.append(fileName)
# return the populated array of file names
return pngFiles
def _importPic(*args):
# Function to import an image corresponding to a pre-validated string
# OpenCV's function 'imread' brings in a grayscale image
# 1 = color image without alpha channel
# 0 = grayscale image
# -1 = unchanged image including alpha channel
imageImported = imread(args[1],0)
# Return imported image
return imageImported
def _printFileList(_,listOfFiles):
# Function to print the files within the list
__printSpacer__()
print('No input file selected, please enter one of the following files as a function input:')
print('')
for entry in listOfFiles:
print(entry)
__printSpacer__()
##########################################
## ##
## Methods (External): ##
## ##
##########################################
def imageSize(self):
imageM, imageN = self.image.shape
return imageM, imageN
def updateImage(self, newImage, transform):
# Update the image after a new transform
self.image = newImage
self.transforms.append(transform)
def duplicate(self):
# Function to create a duplicate of the current structure
newStruct = Image('empty')
# Hard assigns to stop altering original
imageM, imageN = self.imageSize()
newStruct.image = np.add(np.zeros([imageM, imageN],'uint8'), self.image)
newStruct.name = 'Duplicate' + self.name
newStruct.transforms = self.transforms + ['Duplicate']
return newStruct
def mask(self, mask):
# Function to apply a mask provided to the image
print('Applying mask')
self.image = np.minimum(self.image, mask.image)
print('Mask done')
def overlay(self, imageOver, mask):
# Function to overlay two images of the same size using a defined mask
# Apply mask to the base image
self.mask(mask)
# Invert the mask
mask.POnegImage()
# Apply the inverted mask to the image to overlay
imageOver.mask(mask)
# Combine the two masked images
self.image = self.image + imageOver.image
##########################################
## ##
## Methods (Outputs) ##
## ##
##########################################
def showImage(self):
# Shows the contained image until a key is pressed
imshow(self.name, self.image)
waitKey(0)
destroyAllWindows()
def saveImage(self):
# Function to save an image to file after processing
imwrite(self.name + "_".join(self.transforms) + ".png", self.image)
def histogram(self):
# Function to create a histogram of an image's greyscale levels using the formula:
# p(r) = n / MN
# Find the total number of elements in the image
imageM, imageN = self.imageSize()
imageMN = imageM * imageN *1.0
print(imageMN)
# Initialise an array for counting the occurences of each grey value
nkCount = np.zeros(256, 'uint8')
for i in range(imageM):
for j in range(imageN):
nkCount[self.image[i,j]] = nkCount[self.image[i,j]] + 1.0
# Calculate probability of occurence
probRk = nkCount / imageMN
plt.plot(probRk)
plt.xlim(0,255)
plt.show()
##########################################
## ##
## Methods (Point Operator Filters) ##
## ##
##########################################
def POnormalise(self, nMin, nMax):
# Function to utilise contrast stretching for a given image using the formula:
# f(x,y) = [f(x,y) - Omin] x (Nmax-Nmin)/(Omax-Omin) + Nmin
# Ensure that the boundaries are floats so that the conversion ratio is a float
nMin = float(nMin)
nMax = float(nMax)
# Find Omax and Omin Values
oMin = np.amin(self.image)
oMax = np.amax(self.image)
# Generate the conversion ratio to reduce the amount of divisions required
conversionRatio = ((nMax - nMin)/(oMax-oMin))
# Create an emtpy array to populate with the new image information
imageM, imageN = self.imageSize()
newImage = np.zeros([imageM, imageN],'uint8')
for i in range(imageM):
for j in range(imageN):
newImage[i,j] = int((self.image[i,j] - oMin) * conversionRatio + nMin)
self.updateImage(newImage, 'normaliseHistogram' + str(nMin) + '_' + str(nMax))
def POequalise(self):
# Create an empty array to populate with the new iamge information
imageM, imageN = self.imageSize()
newImage = np.zeros([imageM, imageN],'uint8')
def PObitSlice(self, lMin, lMax):
# Function to produce the parts of the image within a certain bit range,
# Returns a new image structure so that it can be used for generating masks
newImageStruct = self.duplicate()
newImage = newImageStruct.image
lowerSubset = newImage < lMin
midSubset = (newImage >= lMin) & (newImage <= lMax) # We want to include the values specified
upperSubset = newImage > lMax
newImage[lowerSubset] = 0
newImage[midSubset] = 255
newImage[upperSubset] = 0
newImageStruct.updateImage(newImage,'GreySlice')
return newImageStruct
def POnegImage(self):
# Function to invert the colours (or greyscale) of the image
initialImage = self.image
newImage = 255 - initialImage
self.updateImage(newImage, 'Invert')
def __printSpacer__(*args):
# Function created to print a line of asterix, made seperate to make code neater
if (0 == len(args)): # If no arguments are included then print an asterix line spacer
print('')
print('************************************************************************************')
print('')
else: # If arguments are provided then print the argument surrounded by asterix'
print('')
print('************************************************************************************')
print(args[0])
print('************************************************************************************')
print('')
# a = Image('foetus.png')
# #b = Image()
# a.showImage()
# imshow('image', a.image)
# waitKey(0)
# destroyAllWindows()
#a = Image('NZjers1.png')
#b = Image('NZjers1.png')
a = Image('foetus.png')
#b = Image('foetus.png')
#makeHistogram(a)
#a.showImage()
def removeBlack(arg):
mask = arg.PObitSlice(0,0)
mask.POnegImage()
arg.mask(mask)
arg.showImage()
mask = a.PObitSlice(0,100)
#b.POnegImage()
#a.overlay(b,mask)
#combineImages(a,b,mask)
a.mask(mask)
a.showImage()
a.histogram()
a.POnormalise(0,255)
a.histogram()
a.showImage()
|
#Boa:Frame:Frame2
import wx
import MySQLdb
def create(parent):
return Frame2(parent)
[wxID_FRAME2, wxID_FRAME2BUTTON1, wxID_FRAME2BUTTON2, wxID_FRAME2BUTTON3,
wxID_FRAME2BUTTON4, wxID_FRAME2BUTTON5, wxID_FRAME2BUTTON6,
wxID_FRAME2BUTTON7, wxID_FRAME2BUTTON8, wxID_FRAME2PANEL1, wxID_FRAME2START,
] = [wx.NewId() for _init_ctrls in range(11)]
[wxID_FRAME2TIMER1, wxID_FRAME2TIMER2, wxID_FRAME2TIMER3, wxID_FRAME2TIMER4,
wxID_FRAME2TIMER5, wxID_FRAME2TIMER6, wxID_FRAME2TIMER7,
] = [wx.NewId() for _init_utils in range(7)]
class Frame2(wx.Frame):
global db
def _init_utils(self):
# generated method, don't edit
self.timer1 = wx.Timer(id=wxID_FRAME2TIMER1, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer1Timer, id=wxID_FRAME2TIMER1)
self.timer2 = wx.Timer(id=wxID_FRAME2TIMER2, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer2Timer, id=wxID_FRAME2TIMER2)
self.timer3 = wx.Timer(id=wxID_FRAME2TIMER3, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer3Timer, id=wxID_FRAME2TIMER3)
self.timer4 = wx.Timer(id=wxID_FRAME2TIMER4, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer4Timer, id=wxID_FRAME2TIMER4)
self.timer5 = wx.Timer(id=wxID_FRAME2TIMER5, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer5Timer, id=wxID_FRAME2TIMER5)
self.timer6 = wx.Timer(id=wxID_FRAME2TIMER6, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer6Timer, id=wxID_FRAME2TIMER6)
self.timer7 = wx.Timer(id=wxID_FRAME2TIMER7, owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer7Timer, id=wxID_FRAME2TIMER7)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_FRAME2, name='', parent=prnt,
pos=wx.Point(413, 180), size=wx.Size(400, 453),
style=wx.DEFAULT_FRAME_STYLE, title='Frame2')
self._init_utils()
self.SetClientSize(wx.Size(384, 415))
self.panel1 = wx.Panel(id=wxID_FRAME2PANEL1, name='panel1', parent=self,
pos=wx.Point(0, 0), size=wx.Size(384, 415),
style=wx.TAB_TRAVERSAL)
self.Start = wx.Button(id=wxID_FRAME2START, label='Start', name='Start',
parent=self.panel1, pos=wx.Point(144, 96), size=wx.Size(75, 23),
style=0)
self.Start.Bind(wx.EVT_BUTTON, self.OnStartButton, id=wxID_FRAME2START)
self.button1 = wx.Button(id=wxID_FRAME2BUTTON1, label='button1',
name='button1', parent=self.panel1, pos=wx.Point(8, 16),
size=wx.Size(75, 23), style=0)
self.button2 = wx.Button(id=wxID_FRAME2BUTTON2, label='button2',
name='button2', parent=self.panel1, pos=wx.Point(96, 16),
size=wx.Size(75, 23), style=0)
self.button3 = wx.Button(id=wxID_FRAME2BUTTON3, label='button3',
name='button3', parent=self.panel1, pos=wx.Point(184, 16),
size=wx.Size(75, 23), style=0)
self.button4 = wx.Button(id=wxID_FRAME2BUTTON4, label='button4',
name='button4', parent=self.panel1, pos=wx.Point(272, 16),
size=wx.Size(75, 23), style=0)
self.button5 = wx.Button(id=wxID_FRAME2BUTTON5, label='button5',
name='button5', parent=self.panel1, pos=wx.Point(8, 56),
size=wx.Size(75, 23), style=0)
self.button6 = wx.Button(id=wxID_FRAME2BUTTON6, label='button6',
name='button6', parent=self.panel1, pos=wx.Point(96, 56),
size=wx.Size(75, 23), style=0)
self.button7 = wx.Button(id=wxID_FRAME2BUTTON7, label='button7',
name='button7', parent=self.panel1, pos=wx.Point(184, 56),
size=wx.Size(75, 23), style=0)
self.button8 = wx.Button(id=wxID_FRAME2BUTTON8, label='button8',
name='button8', parent=self.panel1, pos=wx.Point(272, 56),
size=wx.Size(75, 23), style=0)
def __init__(self, parent):
self._init_ctrls(parent)
def OnStartButton(self, event):
db = MySQLdb.connect("127.0.0.1","root","asdf","fetch_data" )
try:
db.query("""SELECT * FROM colors where Button = 1""")
r = db.store_result()
print r.fetch_row()
if (r.fetch_row() == 'Red',):
self.button1.SetBackgroundColour('Blue')
self.Refresh()
self.timer1.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer1Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 2""")
r1 = db.store_result()
print r1.fetch_row()
if (r1.fetch_row() == 'Green',):
self.button2.SetBackgroundColour('Green')
self.Refresh()
self.timer1.Stop()
self.timer2.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer2Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 3""")
r2 = db.store_result()
print r2.fetch_row()
if (r2.fetch_row() == 'Red',):
self.button3.SetBackgroundColour('Red')
self.Refresh()
self.timer2.Stop()
self.timer3.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer3Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 4""")
r3 = db.store_result()
print r3.fetch_row()
if (r3.fetch_row() == 'Yellow',):
self.button4.SetBackgroundColour('Yellow')
self.Refresh()
self.timer3.Stop()
self.timer4.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer4Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 5""")
r4 = db.store_result()
print r4.fetch_row()
if (r4.fetch_row() == 'Orange',):
self.button5.SetBackgroundColour('Orange')
self.Refresh()
self.timer4.Stop()
self.timer5.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer5Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 6""")
r5 = db.store_result()
print r5.fetch_row()
if (r5.fetch_row() == 'Grey',):
self.button6.SetBackgroundColour('Grey')
self.Refresh()
self.timer5.Stop()
self.timer6.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer6Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 7""")
r6 = db.store_result()
print r6.fetch_row()
if (r6.fetch_row() == 'Black',):
self.button7.SetBackgroundColour('Black')
self.Refresh()
self.timer6.Stop()
self.timer7.Start(2000)
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
def OnTimer7Timer(self, event):
try:
db.query("""SELECT * FROM colors where Button = 8""")
r7 = db.store_result()
print r7.fetch_row()
if (r7.fetch_row(0) == 'White',):
self.button8.SetBackgroundColour('White')
self.Refresh()
self.timer7.Stop()
else:
print "Fetching not possible"
except:
print "Error: unable to fetch data"
|
import argparse
import logging
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
class BigQueryToBigQuery(beam.DoFn):
def __init__(self):
pass
def process(self, element):
pass
def run(argv=None, save_main_session=True):
'''Main run method'''
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='cb-dataflow-python:dataflow-output',
help='Input table to process')
parser.add_argument('--output-main',
dest='output-main',
required=True,
help='Output table to write record results to.')
parser.add_argument('--output-reject',
dest='output-reject',
required=True,
help='Output table to write reject record results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# parser.parse_args(argv)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() |
import logging
from collections import deque
from itertools import permutations
import networkx as nx
import numpy as np
from tqdm import trange
from LearningWithExpertKnowledge.expert import *
from LearningWithExpertKnowledge.graph import DAG
class Estimator:
def __init__(self, data: pd.DataFrame, expert: ExpertKnowledge, k=10000):
self.data = data
self.expert = expert
self.k = k
self.DAG = DAG()
self.vars = data.columns
self.state_names = {
var: self._collect_state_names(var) for var in self.vars
}
# 检查data的columns与expert的columns是否符合
for var in self.vars:
if var in data.columns:
continue
else:
raise ValueError("专家信息与data不符!")
# log 文件设置
logging.basicConfig(filename='log.txt', level=0, filemode="w", format="")
logging.info("*****日志文件*****")
logging.info("数据预览:")
logging.info(self.data.head(5))
logging.info("专家知识预览:")
logging.info(self.expert.data)
def _collect_state_names(self, variable):
"""
收集该变量的状态名
:param variable:
:return:
"""
states = sorted(list(self.data.loc[:, variable].dropna().unique()))
return states
def state_counts(self, variable, parents=None):
"""
:param variable:
:param parents:
:return:
"""
if parents is None:
parents = []
parents = list(parents)
# ignores either any row containing NaN, or only those where the variable or its parents is NaN
data = self.data
if not parents:
# count how often each state of 'variable' occurred
state_count_data = data.loc[:, variable].value_counts()
state_counts = (
state_count_data.reindex(self.state_names[variable]).fillna(0).to_frame()
)
else:
parents_states = [self.state_names[parent] for parent in parents]
# count how often each state of 'variable' occurred, conditional on parents' states
state_count_data = (
data.groupby([variable] + parents).size().unstack(parents)
)
if not isinstance(state_count_data.columns, pd.MultiIndex):
state_count_data.columns = pd.MultiIndex.from_arrays(
[state_count_data.columns]
)
# reindex rows & columns to sort them and to add missing ones
# missing row = some state of 'variable' did not occur in data
# missing column = some state configuration of current 'variable's parents
# did not occur in data
row_index = self.state_names[variable]
column_index = pd.MultiIndex.from_product(parents_states, names=parents)
state_counts = state_count_data.reindex(
index=row_index, columns=column_index
).fillna(0)
return state_counts
def expert_score(self, variable, parents):
"""
专家评分部分
:param variable:
:param parents:
:return:
"""
parents = set(parents)
sample_size = len(self.data)
# 专家分数计算
score = 1
for node in self.vars:
thinks = self.expert.think(variable, node)
if node == variable:
continue
elif node in parents:
score *= thinks[1]
else:
score *= thinks[2]
# 评分两极化处理
# 若信息全部是0.333,那么score = 0.333**(len(self.vars)-1)
zero_point = 0.333 ** (len(self.vars) - 1)
if score > zero_point:
score = 10e17/(1-zero_point) * (score - zero_point)
else:
score = (-10e17)/(0-zero_point) * (score - zero_point)
# 考虑样本影响:
score *= self.k / sample_size
return score
def score_function(self, variable, parents):
"""
:param variable:
:param parents: 一定要是list
:return:
"""
var_states = self.state_names[variable]
var_cardinality = len(var_states)
state_counts = self.state_counts(variable, parents)
sample_size = len(self.data)
num_parents_states = float(state_counts.shape[1])
counts = np.asarray(state_counts)
log_likelihoods = np.zeros_like(counts, dtype=np.float_)
# Compute the log-counts
np.log(counts, out=log_likelihoods, where=counts > 0)
# Compute the log-conditional sample size
log_conditionals = np.sum(counts, axis=0, dtype=np.float_)
np.log(log_conditionals, out=log_conditionals, where=log_conditionals > 0)
# Compute the log-likelihoods
log_likelihoods -= log_conditionals
log_likelihoods *= counts
likelihood_score = np.sum(log_likelihoods)
################
#
# log似然的计算
# 加上这段代码就是BIC评分
# score -= 0.5 * log(sample_size) * num_parents_states * (var_cardinality - 1)
#
################
expert_score = self.expert_score(variable=variable, parents=parents)
score = likelihood_score + expert_score
logging.info("{}与{}组成的部分结构,得分为:{}+{}={}".format(variable, parents, likelihood_score, expert_score, score))
return score
def legal_operations(self, tabu_list):
tabu_list = set(tabu_list)
potential_new_edges = (
set(permutations(self.vars, 2))
- set(self.DAG.edges())
- set([(Y, X) for (X, Y) in self.DAG.edges()])
)
for (X, Y) in potential_new_edges:
# Check if adding (X, Y) will create a cycle.
if not nx.has_path(self.DAG, Y, X):
operation = ("+", (X, Y))
if operation not in tabu_list:
old_parents = self.DAG.get_parents(Y)
new_parents = old_parents + [X]
score_delta = self.score_function(Y, new_parents) - self.score_function(Y, old_parents)
yield (operation, score_delta)
for (X, Y) in self.DAG.edges():
operation = ("-", (X, Y))
if operation not in tabu_list:
old_parents = self.DAG.get_parents(Y)
new_parents = old_parents[:]
new_parents.remove(X)
score_delta = self.score_function(Y, new_parents) - self.score_function(Y, old_parents)
yield (operation, score_delta)
for (X, Y) in self.DAG.edges():
# Check if flipping creates any cycles
if not any(
map(lambda path: len(path) > 2, nx.all_simple_paths(self.DAG, X, Y))
):
operation = ("flip", (X, Y))
if operation not in tabu_list:
old_X_parents = self.DAG.get_parents(X)
old_Y_parents = self.DAG.get_parents(Y)
new_X_parents = old_X_parents + [Y]
new_Y_parents = old_Y_parents[:]
new_Y_parents.remove(X)
score_delta = (
self.score_function(X, new_X_parents)
+ self.score_function(Y, new_Y_parents)
- self.score_function(X, old_X_parents)
- self.score_function(Y, old_Y_parents)
)
yield (operation, score_delta)
def run(self, epsilon=1e-4, max_iter=1e6):
"""
:param epsilon:
:param max_iter:
:return:
"""
########
# 初始检查:略去
########
# 初始化
start_dag = self.DAG
start_dag.add_nodes_from(self.vars)
tabu_list = deque(maxlen=100)
current_model = start_dag
# 每次迭代,找到最佳的 (operation, score_delta)
iteration = trange(int(max_iter))
for _ in iteration:
logging.debug(current_model.edges)
best_operation, best_score_delta = max(
self.legal_operations(tabu_list),
key=lambda t: t[1],
)
logging.info("搜索到的最佳操作为:{}".format(best_operation))
if best_operation is None or best_score_delta < epsilon:
break
elif best_operation[0] == "+":
current_model.add_edge(*best_operation[1])
tabu_list.append(("-", best_operation[1]))
elif best_operation[0] == "-":
current_model.remove_edge(*best_operation[1])
tabu_list.append(("+", best_operation[1]))
elif best_operation[0] == "flip":
X, Y = best_operation[1]
current_model.remove_edge(X, Y)
current_model.add_edge(Y, X)
tabu_list.append(best_operation)
return current_model
def mic_of_edge(self, u, v):
"""
计算一对边之间的相关性,MIC
参考文献:Detecting novel associations in large data sets[J]. science, 2011, 334(6062): 1518-1524.
:param u:
:param v:
:return:
"""
pass
def corr_of_edges(self, u, v):
"""
计算两个节点之间的相关系数
ps:相关系数衡量随机变量X与Y相关程度的一种方法,相关系数的取值范围是[-1,1]。
相关系数的绝对值越大,则表明X与Y相关度越高。 当X与Y线性相关时,相关系数取值为1(正线性相关)或-1(负线性相关)
:param u:
:param v:
:return:
"""
var1 = self.data[u].values
var2 = self.data[v].values
corr = np.corrcoef(var1, var2)[0][1]
return corr
def add_weight_to_edges(self):
"""
给每条边,根据corr增加权重,经过变换:
100:最远,相关性最弱
0:最近,相关性最强
:return:
"""
if self.DAG.edges is None:
print("No edge was found!")
return None
for edge in self.DAG.edges:
weight = (1 - abs(self.corr_of_edges(edge[0], edge[1]))) * 100
self.DAG[edge[0]][edge[1]]["weight"] = weight
def importance_of_node(self, node):
"""
计算该节点的重要度
参考文献:复杂网络中节点重要度评估的节点收缩方法[D]. , 2006.
:param node:
:return:
"""
# 计算距离矩阵
distance_matrix = nx.floyd_warshall_numpy(self.DAG, weight="weight")
# 计算初始网络的凝聚度
where_are_inf = np.isinf(distance_matrix)
_distance_matrix = distance_matrix
_distance_matrix[where_are_inf] = 0
cohesion_of_initial_network = (len(self.DAG.nodes) - 1) / _distance_matrix.sum()
# 对node进行节点收缩
# 当对node进行节点收缩时,相当于把node的所有相邻节点到node的距离变为0
def centrality_of_nodes(self):
centrality = nx.katz_centrality(self.DAG, weight="weight")
return centrality
if __name__ == '__main__':
chen_data = pd.DataFrame({
"A": [0, 0.8, 0, 0.3],
"B": [0.1, 0, 0.3, 0.9],
"C": [1, 0.2, 0, 0.1],
"D": [0.3, 0.2, 0.1, 0]
}, index=["A", "B", "C", "D"])
print(chen_data)
chen = ExpertKnowledge(data=chen_data)
data = pd.read_excel(r"./data/data.xlsx")
a = Estimator(data=data, expert=chen)
a.run()
print(a.corr_of_edges('A', 'B'))
a.add_weight_to_edges()
print(a.DAG.edges.data())
print(a.centrality_of_nodes())
|
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
from typing import Union, List, Dict, Tuple
import math
from matplotlib.textpath import TextPath
from matplotlib.font_manager import FontProperties, findfont
from ezdxf.entities import Text, Attrib, Hatch
from ezdxf.lldxf import const
from ezdxf.math import Matrix44, BoundingBox, Vec3
from ezdxf import path
from ezdxf.path import Path
from ezdxf.tools import fonts
from ezdxf.query import EntityQuery
AnyText = Union[Text, Attrib]
def make_paths_from_str(s: str,
font: fonts.FontFace,
size: float = 1.0,
align: str = 'LEFT',
length: float = 0,
m: Matrix44 = None) -> List[Path]:
""" Convert a single line string `s` into a list of
:class:`~ezdxf.path.Path` objects. All paths are returned in a single
list. The text `size` is the height of the uppercase letter "X" (cap height).
The paths are aligned about the insertion point at (0, 0).
BASELINE means the bottom of the letter "X".
Args:
s: text to convert
font: font face definition
size: text size (cap height) in drawing units
align: alignment as string, default is "LEFT"
length: target length for the "ALIGNED" and "FIT" alignments
m: transformation :class:`~ezdxf.math.Matrix44`
"""
if len(s) == 0:
return []
font_properties, font_measurements = _get_font_data(font)
scaled_size = size / font_measurements.cap_height
scaled_fm = font_measurements.scale_from_baseline(scaled_size)
paths = _str_to_paths(s, font_properties, scaled_size)
bbox = path.bbox(paths, precise=False)
halign, valign = const.TEXT_ALIGN_FLAGS[align.upper()]
matrix = get_alignment_transformation(scaled_fm, bbox, halign, valign)
stretch_x = 1.0
stretch_y = 1.0
if align == 'ALIGNED':
stretch_x = length / bbox.size.x
stretch_y = stretch_x
elif align == 'FIT':
stretch_x = length / bbox.size.x
if stretch_x != 1.0:
matrix *= Matrix44.scale(stretch_x, stretch_y, 1.0)
if m is not None:
matrix *= m
return list(path.transform_paths(paths, matrix))
def _get_font_data(
font: fonts.FontFace) -> Tuple[FontProperties, fonts.FontMeasurements]:
fp = FontProperties(
family=font.family,
style=font.style,
stretch=font.stretch,
weight=font.weight,
)
ttf_path = findfont(fp)
fonts.load() # not expensive if already loaded
# The ttf file path is the cache key for font measurements:
fm = fonts.get_font_measurements(ttf_path)
return fp, fm
def _str_to_paths(s: str, fp: FontProperties, size: float = 1.0) -> List[Path]:
text_path = TextPath((0, 0), s, size=size, prop=fp, usetex=False)
return list(path.from_matplotlib_path(text_path))
def get_alignment_transformation(fm: fonts.FontMeasurements, bbox: BoundingBox,
halign: int, valign: int) -> Matrix44:
if halign == const.LEFT:
shift_x = 0
elif halign == const.RIGHT:
shift_x = -bbox.extmax.x
elif halign == const.CENTER or halign > 2: # ALIGNED, MIDDLE, FIT
shift_x = -bbox.center.x
else:
raise ValueError(f'invalid halign argument: {halign}')
cap_height = max(fm.cap_height, bbox.extmax.y)
descender_height = max(fm.descender_height, abs(bbox.extmin.y))
if valign == const.BASELINE:
shift_y = 0
elif valign == const.TOP:
shift_y = -cap_height
elif valign == const.MIDDLE:
shift_y = -cap_height / 2
elif valign == const.BOTTOM:
shift_y = descender_height
else:
raise ValueError(f'invalid valign argument: {valign}')
if halign == 4: # MIDDLE
shift_y = max(fm.total_height, bbox.size.y) / -2.0
return Matrix44.translate(shift_x, shift_y, 0)
def make_hatches_from_str(s: str,
font: fonts.FontFace,
size: float = 1.0,
align: str = 'LEFT',
length: float = 0,
dxfattribs: Dict = None,
m: Matrix44 = None) -> List[Hatch]:
""" Convert a single line string `s` into a list of virtual
:class:`~ezdxf.entities.Hatch` entities.
The text `size` is the height of the uppercase letter "X" (cap height).
The paths are aligned about the insertion point at (0, 0).
The HATCH entities are aligned to this insertion point. BASELINE means the
bottom of the letter "X".
Args:
s: text to convert
font: font face definition
size: text size (cap height) in drawing units
align: alignment as string, default is "LEFT"
length: target length for the "ALIGNED" and "FIT" alignments
dxfattribs: additional DXF attributes
m: transformation :class:`~ezdxf.math.Matrix44`
"""
# HATCH is an OCS entity, transforming just the polyline paths
# is not correct! The Hatch has to be created in the xy-plane!
paths = make_paths_from_str(s, font, size, align, length)
dxfattribs = dxfattribs or dict()
dxfattribs.setdefault('solid_fill', 1)
dxfattribs.setdefault('pattern_name', 'SOLID')
dxfattribs.setdefault('color', const.BYLAYER)
hatches = path.to_hatches(
paths, edge_path=True, dxfattribs=dxfattribs)
if m is not None:
# Transform HATCH entities as a unit:
return [hatch.transform(m) for hatch in hatches]
else:
return list(hatches)
def make_paths_from_entity(entity: AnyText) -> List[Path]:
""" Convert text content from DXF entities TEXT and ATTRIB into a
list of :class:`~ezdxf.path.Path` objects. All paths are returned in a
single list.
The paths are located at the location of the source entity, but don't expect
a 100% match compared to CAD applications.
"""
def get_font_name():
font_name = 'arial.ttf'
style_name = entity.dxf.style
if entity.doc:
try:
style = entity.doc.styles.get(style_name)
font_name = style.dxf.font
except ValueError:
pass
return font_name
def get_transformation():
""" Apply rotation, width factor, translation to the insertion point
and if necessary transformation from OCS to WCS.
"""
# TODO: text generation flags - mirror-x and mirror-y
angle = math.radians(entity.dxf.rotation)
width_factor = entity.dxf.width
if align == 'LEFT':
location = p1
elif align in ('ALIGNED', 'FIT'):
width_factor = 1.0 # text goes from p1 to p2, no stretching applied
location = p1.lerp(p2, factor=0.5)
angle = (p2 - p1).angle # override stored angle
else:
location = p2
m = Matrix44.chain(
Matrix44.scale(width_factor, 1, 1),
Matrix44.z_rotate(angle),
Matrix44.translate(location.x, location.y, location.z),
)
ocs = entity.ocs()
if ocs.transform:
m *= ocs.matrix
return m
if not entity.dxftype() in ('TEXT', 'ATTRIB'):
raise TypeError(f'unsupported entity type: {entity.dxftype()}')
fonts.load()
text = entity.plain_text()
align = entity.get_align()
p1 = Vec3(entity.dxf.insert)
if entity.dxf.hasattr('align_point'):
p2 = Vec3(entity.dxf.align_point)
else:
p2 = p1
length = 0
if align in ('FIT', 'ALIGNED'):
# text is stretch between p1 and p2
length = p1.distance(p2)
paths = make_paths_from_str(
text, fonts.get_font_face(get_font_name()),
size=entity.dxf.height, # cap height in drawing units
align=align,
length=length,
)
m = get_transformation()
return path.transform_paths(paths, m)
def make_hatches_from_entity(entity: AnyText) -> List[Hatch]:
""" Convert text content from DXF entities TEXT and ATTRIB into a
list of virtual :class:`~ezdxf.entities.Hatch` entities.
The hatches are located at the location of the source entity, but don't
expect a 100% match compared to CAD applications.
"""
return []
def explode(entity: AnyText, kind: int = 1, target=None) -> EntityQuery:
""" Explode the text content of DXF entities TEXT and ATTRIB into
LWPOLYLINE entities as outlines as HATCH entities as fillings.
The target layout is given by the `target` argument or the same layout as
the source entity reside, if `target`is ``None``.
The `kind` argument defines the DXF types to create:
=== ===============================================
1 :class:`~ezdxf.entities.Hatch` as filling
2 :class:`~ezdxf.entities.LWPolyline` as outline
3 :class:`~ezdxf.entities.Hatch` and :class:`~ezdxf.entities.LWPolyline`
=== ===============================================
Returns the created DXF entities as an :class:`~ezdxf.query.EntityQuery`
object.
The source entity will be destroyed and don't expect a 100% match compared
to CAD applications.
Args:
entity: TEXT or ATTRIB entity to explode
kind: kind of entities to create, 1=HATCH, 2=LWPOLYLINE, 3=BOTH
target: target layout for new created DXF entities, ``None`` for the
same layout as the source entity.
"""
entities = []
return EntityQuery(entities)
|
import collections
class Node():
def __init__(self):
self.child = collections.defaultdict(Node)
self.isWord = False
self.s = ""
self.cnt = 0
class WordDictionary:
def __init__(self):
self.root = Node()
def addWord(self, word: str) -> None:
cur = self.root
for ch in word:
cur.cnt += 1
cur = cur.child[ch]
cur.s = word
cur.isWord = True
def delWord(self, word: str) -> None:
cur = self.root
for ch in word:
pre = cur
cur = cur.child.get(ch)
if pre.cnt > 0:
pre.cnt -= 1
else:
pre.child.pop(ch)
def search2(self, word: str) -> bool:
cur = self.root
for ch in word:
cur = cur.child.get(ch)
if not ch:
return False
return True
def findWords(board, words):
#root = Trie()
#for word in words:
# root.insert(word)
wd = WordDictionary()
for word in words:
wd.addWord(word)
def helper(x, y, node):
if x < 0 or x >= len(board) or y < 0 or y >= len(board[0]) or not node or board[x][y] == "#":
return
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
temp = board[x][y]
cur_node = node.child.get(temp)
if cur_node:
if cur_node.isWord:
print(cur_node.s)
res.append(cur_node.s)
cur_node.isWord = False
#root.delete(cur_node.s)
wd.delWord(cur_node.s)
for i in range(4):
board[x][y] = "#"
helper(x + dx[i], y + dy[i], node.child.get(temp))
board[x][y] = temp
res = []
for x in range(len(board)):
for y in range(len(board[0])):
helper(x, y, wd.root)
return res
def findWords2(board, words):
"""
output: ["oath","oathf","oathfi","oathfii","oathi","oathk", "oate","eat"]
expect: ["oath","oathk","oathf","oathfi","oathfii","oathi","oathii","oate","eat"]
"""
words = list(set(words))
m, n = len(board), len(board[0])
seen = [[0]*n for _ in range(m)]
def srch(i, j, path):
path += board[i][j]
f = False
for word in words:
if path==word and path not in res: res.append(path); words.remove(path)
if path in word and path!=word: f = True
if not f: return False
if seen[i][j]: return False
seen[i][j] = 1
for dx, dy in [(-1,0),(1,0),(0,-1),(0,1)]:
x,y = i+dx,j+dy
if 0<=x<m and 0<=y<n and not seen[x][y]:
if not srch(x, y, path):
continue
seen[i][j] = 0
return False
res = []
for i in range(m):
for j in range(n):
if not srch(i,j,''):
continue
return res
board = [["o","a","a","n"],["e","t","a","e"],["i","h","k","r"],["i","f","l","v"]]
words = ["oath","pea","eat","rain","oathi","oathk","oathf","oate","oathii","oathfi","oathfii"]
#board = [["o","a","b","n"],["o","t","a","e"],["a","h","k","r"],["a","f","l","v"]]
#words = ["oa","oaa"]
print(findWords(board, words))
print(findWords2(board, words))
|
import os
path = 'c:\\tmp\\csv'
s = '10秒'
d = '10s_20180521_20180523'
fs = os.listdir(path)
for f in fs:
os.rename(path + '\\' + f,
path + '\\' + f.replace(s, d)) |
# Basic training configuration file
from pathlib import Path
from torchvision.transforms import RandomVerticalFlip, RandomHorizontalFlip
from torchvision.transforms import RandomResizedCrop
from torchvision.transforms import ToTensor, Normalize
from common.dataset import get_test_data_loader
SEED = 12345
DEBUG = True
OUTPUT_PATH = Path("output") / "val_probas"
dataset_path = Path("/home/fast_storage/imaterialist-challenge-furniture-2018/")
SAVE_PROBAS = True
# SAMPLE_SUBMISSION_PATH = dataset_path / "sample_submission_randomlabel.csv"
TEST_TRANSFORMS = [
RandomResizedCrop(350, scale=(0.8, 1.0), interpolation=3),
RandomVerticalFlip(p=0.5),
RandomHorizontalFlip(p=0.5),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
]
N_CLASSES = 128
BATCH_SIZE = 24
NUM_WORKERS = 15
TEST_LOADER = get_test_data_loader(
dataset_path=dataset_path / "validation",
test_data_transform=TEST_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory=True)
MODEL = (Path("output") / "train" / "train_inceptionv4_350_fc_random_resized_crop" / "20180506_2103" /
"model_FurnitureInceptionV4_350_FC_10_val_loss=0.5576787.pth").as_posix()
N_TTA = 12
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.search, name='search'),
path('<str:word>/', views.word_detail, name='wordDetail'),
path('wordDetail/pdf/<str:document>/',views.pdf_openner, name='pdfOpenner')
] |
import os
import cv2
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dropout, Activation, Dense
from keras.layers import Flatten, Convolution2D, MaxPooling2D
from keras.callbacks import EarlyStopping
from sklearn.model_selection import KFold, train_test_split, GridSearchCV
import json
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
groups_folder_path = 'C:/Users/USER/Desktop/hyeonji/MachineLearning/Data/'
categories = ["Eunbi", "Minju", "Wonyoung", "Sakura", "Yuri", "Yena",
"Chaewon", "Chaeyeon", "Nako", "Hitomi", "Yujin", "Hyewon"]
num_classes = len(categories)
'''
image_w = 64
image_h = 64
X = []
Y = []
# 전처리
for idex, categorie in enumerate(categories):
label = [0 for i in range(num_classes)]
label[idex] = 1
image_dir = groups_folder_path + categorie + "/"
for top, dir, f in os.walk(image_dir):
for filename in f:
print(image_dir+filename)
img = cv2.imread(image_dir+filename)
img = cv2.resize(img,None,fx=image_w/img.shape[1],fy=image_h/img.shape[0])
X.append(img/256)
Y.append(label)
X = np.array(X)
Y = np.array(Y)
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=0.2, random_state=1)
xy = (X_train, Y_train, X_validation, Y_validation)
print(xy[0].shape)
print(xy[1].shape)
print(xy[2].shape)
print(xy[3].shape)
np.save("C:/Users/USER/Desktop/hyeonji/MachineLearning/result/tst.npy", xy)
print()
'''
print("======================== Train Start ==========================")
print()
early_stopping = EarlyStopping(monitor='loss', patience=10, verbose=1)
numpy_path = 'C:/Users/USER/Desktop/hyeonji/MachineLearning_npy/tst.npy'
result_path = 'C:/Users/USER/Desktop/hyeonji/MachineLearning/result/'
X_train, Y_train, X_validation, Y_validation = np.load(numpy_path, allow_pickle=True)
num_classes = len(categories)
accuracy = []
skf = KFold(n_splits=5, shuffle=True)
def create_model():
model = Sequential()
model.add(Convolution2D(16, 3, 3, border_mode='same', activation='relu', input_shape=X_train.shape[1:]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=create_model(), verbose=0)
batch_size = [32, 64, 100, 200, 300]
epochs = [10, 100, 300]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=skf)
grid_result = grid.fit(X_train, Y_train)
print("==============Grid Search================")
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
model_json = model.to_json()
with open(result_path + 'tst.json',
"w") as json_file:
json.dump(model_json, json_file)
model.save_weights(result_path + 'tst.h5')
print("saved model to disk")
|
q = 0
def endQuestion():
global q
q += 1
print("End of question", q)
# end of def
# 1. Biggie Size - Given a list, write a function that changes all positive numbers in the list to "big".
# Example: biggie_size([-1, 3, 5, -5]) returns that same list, but whose values are now [-1, "big", "big", -5]
def posBig(list):
for x in range(0, len(list), 1):
if(list[x] > 0):
list[x] = "big"
#end of if statement
#end of for loop
return list
#end of def
print(posBig([-1, 3, 5, -5]))
endQuestion()
# 2. Count Positives - Given a list of numbers, create a function to replace the last value with the number of positive values. (Note that zero is not considered to be a positive number).
# Example: count_positives([-1,1,1,1]) changes the original list to [-1,1,1,3] and returns it
# Example: count_positives([1,6,-4,-2,-7,-2]) changes the list to [1,6,-4,-2,-7,2] and returns it
def posCount(list):
pCount = 0
for x in range(0, len(list), 1):
if(list[x] > 0):
print(pCount)
#end of if statement
#end of for loop
list[len(list)-1] = pCount
return list
#end of def
print(posCount([-1,1,1,1]))
endQuestion()
# 3. Sum Total - Create a function that takes a list and returns the sum of all the values in the list.
# Example: sum_total([1,2,3,4]) should return 10
# Example: sum_total([6,3,-2]) should return 7
def totalSum(list):
sum = 0
for x in range(len(list)):
sum += list[x]
#end of for loop
return sum
#end of def
print("Sum of list is:", totalSum([1,2,3,4]))
endQuestion()
# 4. Average - Create a function that takes a list and returns the average of all the values.x
# Example: average([1,2,3,4]) should return 2.5
def avg(list):
sum = 0
for x in range(len(list)):
sum += list[x]
#end of for loop
return (sum / len(list))
#end of def
print("Average is:", avg([1,2,3,4]))
endQuestion()
# 5. Length - Create a function that takes a list and returns the length of the list.
# Example: length([37,2,1,-9]) should return 4
# Example: length([]) should return 0
def length(list):
return len(list)
#end of def
print("Length is:", length([1,2,3,4]))
endQuestion()
# 6. Minimum - Create a function that takes a list of numbers and returns the minimum value in the list. If the list is empty, have the function return False.
# Example: minimum([37,2,1,-9]) should return -9
# Example: minimum([]) should return False
def minimum(list):
min = list[0]
if(len(list) > 0):
for x in range(len(list)):
if(min > list[x]):
min = list[x]
#end of if statement
#end of for loop
#end of if statement
else:
return "False"
#end of else statement
return min
#end of def
print("Lowest number is:", minimum([37,2,1,-9]))
endQuestion()
# 7. Maximum - Create a function that takes a list and returns the maximum value in the list. If the list is empty, have the function return False.
# Example: maximum([37,2,1,-9]) should return 37
# Example: maximum([]) should return False
def maximum(list):
max = list[0]
if(len(list) > 0):
for x in range(len(list)):
if(max < list[x]):
max = list[x]
#end of if statement
#end of for loop
#end of if statement
else:
return "False"
#end of else statement
return max
#end of def
print("Highest number is:", maximum([37,2,1,-9]))
endQuestion()
# 8. Ultimate Analysis - Create a function that takes a list and returns a dictionary that has the sumTotal, average, minimum, maximum and length of the list.
# Example: ultimate_analysis([37,2,1,-9]) should return {'sumTotal': 31, 'average': 7.75, 'minimum': -9, 'maximum': 37, 'length': 4 }
def analysis(list):
sum = 0
avg = 0
min = list[0]
max = list[0]
for x in range(len(list)):
sum += list[x] #sum total
avg = sum / len(list) #average
if(min > list[x]):
min = list[x]
#end of if statement
if(max < list[x]):
max = list[x]
#end of if statement
#end of for loop
ana_dict = {"Sum Total":sum, "Average":avg, "Minimum":min, "Maximum":max, "Length":len(list)}
return ana_dict
#end of def
print(analysis([37,2,1,-9]))
endQuestion()
# 9. Reverse List - Create a function that takes a list and return that list with values reversed. Do this without creating a second list. (This challenge is known to appear during basic technical interviews.)
# Example: reverse_list([37,2,1,-9]) should return [-9,1,2,37]
def reverse(list):
last = list[len(list)-1]
print(last)
for x in range(len(list)-1, -1, -1):
list[x] = list[x-1]
#end of for loop
list[0] = last
return list
#end of def
print(reverse([37,2,1,-9]))
endQuestion() |
for i in range(int(input())):
s = input()
if '1' not in s:
print("NO")
else:
chng = 0
i = 1
while i<len(s) and chng <= 2:
if s[i] != s[i-1]:
chng += 1
i+=1
if chng == 2 and s[0] == '0':
print("YES")
elif chng <= 1:
print("YES")
else:
print("NO")
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class user_master(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key = True)
username = models.CharField(max_length=50)
email = models.EmailField(max_length=50, unique=True)
address = models.CharField(max_length=200)
password = models.CharField(max_length=50) |
"""
Write a program that asks the user how many people are in their dinner group.
"""
seating = input('How many people are in your dinner group?\n')
seating = int(seating)
if seating > 8:
print("You need to wait for a table.")
else:
print("Table is ready!")
|
entry = input()
while entry != '2002':
print('Senha Invalida')
entry = input()
print('Acesso Permitido') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md).
# Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Derived from:
# https://github.com/godotengine/godot/blob/4.0/misc/scripts/copyright_headers.py
import sys, os
header = """\
# Copyright (c) 2018-present. This file is part of V-Sekai https://v-sekai.org/.
# SaracenOne & K. S. Ernest (Fire) Lee & Lyuma & MMMaellon & Contributors
# $filename
# SPDX-License-Identifier: MIT
"""
fname = sys.argv[1]
# Handle replacing $filename with actual filename and keep alignment
fsingle = fname.strip()
fsingle = os.path.basename(fsingle)
if fsingle.find("#") != -1:
fsingle = fsingle[fsingle.rfind("#") + 1 :]
rep_fl = "$filename"
rep_fi = fsingle
len_fl = len(rep_fl)
len_fi = len(rep_fi)
# Pad with spaces to keep alignment
if len_fi < len_fl:
for x in range(len_fl - len_fi):
rep_fi += " "
elif len_fl < len_fi:
for x in range(len_fi - len_fl):
rep_fl += " "
if header.find(rep_fl) != -1:
text = header.replace(rep_fl, rep_fi)
else:
text = header.replace("$filename", fsingle)
text += "\n"
# We now have the proper header, so we want to ignore the one in the original file
# and potentially empty lines and badly formatted lines, while keeping comments that
# come after the header, and then keep everything non-header unchanged.
# To do so, we skip empty lines that may be at the top in a first pass.
# In a second pass, we skip all consecutive comment lines starting with "/*",
# then we can append the rest (step 2).
fileread = open(fname.strip(), "r")
line = fileread.readline()
header_done = False
while line.strip() == "": # Skip empty lines at the top
line = fileread.readline()
if line.find("# Copyright (c) 2018-present. This file is part of V-Sekai") == -1: # Header starts this way
# Maybe starting with a non-comment, abort header magic
header_done = True
while not header_done: # Handle header now
if line.find("#") != 0: # No more starting with a comment
header_done = True
if line.strip() != "":
text += line
line = fileread.readline()
while line != "": # Dump everything until EOF
text += line
line = fileread.readline()
fileread.close()
# Write
filewrite = open(fname.strip(), "w")
filewrite.write(text)
filewrite.close()
|
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.datasets import cifar10
# Generate dummy data
import numpy as np
# x_train = np.random.random((1000, 20))
# y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
# x_test = np.random.random((100, 20))
# y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
batch_size = 32
num_classes = 10
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# flattening images (32, 32, 3) to 3072 vector
num_pixels = x_train.shape[1] * x_train.shape[2]
x_train = x_train.reshape(-1, 3072)
x_test = x_test.reshape(-1, 3072)
# normalize inputs from 0-255 to 0.0-1.0
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train = x_train / 255.0
x_test = x_test / 255.0
model = Sequential()
model.add(Dense(num_pixels, activation='relu', input_dim=3072))
model.add(Dropout(0.5))
model.add(Dense(num_pixels, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=batch_size)
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score = ',score)
print('Test accuracy = ', acc)
|
# Register your models here.
from django.contrib import admin
import models
admin.site.register(models.Restaurant) |
class instruction:
binary = ''
def __str__(self):
return self.binary
def parse(self, line):
line = line.strip()
line = line.replace('<',';')
line = line.replace('>',';')
output = line.split(';')
return str(output[2])
class a_instruction(instruction):
def __init__(self, constant):
self.binary = '@' + self.parse(constant)
class c_instruction(instruction):
def __init__(self, comp, dest, jump):
try:
temp_bin = bin(int(self.parse(comp),16))[2:].zfill(8)
temp_bin = str(temp_bin)[1:]
print(temp_bin)
self.comp = self.comp_dict[temp_bin]
except:
self.comp = self.parse(comp)
self.comp = self.comp[2:]
self.comp = 'X' + self.comp
self.dest = self.dest_dict[self.parse(dest)]
self.jump = self.jump_dict[self.parse(jump)]
if self.dest != 'null':
self.binary += self.dest + '='
if self.comp != 'null':
self.binary += self.comp
if self.jump != 'null':
self.binary += ';' + self.jump
dest_dict = {'000': 'null',
'001': 'M',
'010': 'D',
'011': 'MD',
'100': 'A',
'101': 'AM',
'110': 'AD',
'111': 'AMD',}
jump_dict = {'000': 'null',
'001': 'JGT',
'010': 'JEQ',
'011': 'JGE',
'100': 'JLT',
'101': 'JNE',
'110': 'JLE',
'111': 'JMP',}
comp_dict = {'0101010': '0',
'0111111': '1',
'0111010': '-1',
'0001100': 'D',
'0110000': 'A',
'0001101': '!D',
'0110011': '!A',
'0001111': '-D',
'0110011': '-A',
'0011111': 'D+1',
'0110111': 'A+1',
'0001110': 'D-1',
'0110010': 'A-1',
'0000010': 'D+A',
'0010011': 'D-A',
'0000111': 'A-D',
'0000000': 'D&A',
'0010101': 'D|A',
'1110000': 'M',
'1110001': '!M',
'1110011': '-M',
'1110111': 'M+1',
'1110010': 'M-1',
'1000010': 'D+M',
'1010011': 'D-M',
'1000111': 'M-D',
'1000000': 'D&M',
'1010101': 'D|M',}
|
# Databricks notebook source
# Set credentials for blob storage
spark.conf.set("fs.azure.account.key.databricksstoragedevweu.blob.core.windows.net", "******accesskey******")
username = "guido.tournois@icemobile.com" # AAD user with READ permission on database
password = dbutils.secrets.get("guido.tournois@icemobile.com","password")
jdbcString = (
"jdbc:sqlserver://af-cbi-dev-weu.database.windows.net:1433;database=AF;encrypt=false;"+
"trustServerCertificate=true;hostNameInCertificate=*.database.windows.net;"+
"Authentication=ActiveDirectoryPassword;"
)
df = (spark.read.format("com.databricks.spark.sqldw")
.option("url", jdbcString)
.option("tempDir", "wasbs://data@databricksstoragedevweu.blob.core.windows.net/tmp")
.option("forward_spark_azure_storage_credentials", "true")
.option("user","guido.tournois@icemobile.com")
.option("password",password)
.option("query", "select ProgramName from mgt.programparameter")
.load())
# COMMAND ----------
df.show()
# COMMAND ----------
# COMMAND ----------
# COMMAND ----------
# COMMAND ----------
# COMMAND ----------
# MAGIC %%bash
# MAGIC ls -la ../../dbfs/FileStore/tables/
# MAGIC # rm ../../dbfs/FileStore/tables/azure_sqldb_spark_1_0_0_jar_with_dependencies-d8798.jar
# MAGIC # rm ../../dbfs/FileStore/tables/junit_4_8_1-aac34.jar
# MAGIC # rm ../../dbfs/FileStore/tables/mssql_jdbc_6_4_0_jre8-ebbc3.jar
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC import java.sql.Connection;
# MAGIC import java.sql.ResultSet;
# MAGIC import java.sql.Statement;
# MAGIC
# MAGIC import com.microsoft.sqlserver.jdbc.SQLServerDataSource;
# MAGIC import com.microsoft.sqlserver.jdbc.SQLServerDriver
# MAGIC
# MAGIC var ds = new SQLServerDataSource();
# MAGIC ds.setServerName("af-cbi-dev-weu.database.windows.net"); // Replace with your server name
# MAGIC ds.setDatabaseName("AF"); // Replace with your database
# MAGIC ds.setUser("guido.tournois@icemobile.com"); // Replace with your user name
# MAGIC ds.setPassword("gT22071988"); // Replace with your password
# MAGIC ds.setAuthentication("ActiveDirectoryPassword");
# MAGIC ds.setHostNameInCertificate("*.database.windows.net");
# MAGIC // ds.setPortNumber(1433);
# MAGIC ds.setTrustServerCertificate(true);
# MAGIC
# MAGIC var connection = ds.getConnection();
# MAGIC var stmt = connection.createStatement();
# MAGIC var res = stmt.executeQuery("SELECT SUSER_SNAME()")
# MAGIC res.
# COMMAND ----------
# COMMAND ----------
|
# 读取id、姓名、成绩
# aa.xlsx:
# id||name||score
# 1||mike||88.5
# 2||amy||60.8
# 3||bob||79.6
import xlrd
# region (1)将excel内容存于student类型的list
class student():
def __init__(self):
self.id = 0
self.name = 0
self.age = 0
def read_student(filename):
workbook = xlrd.open_workbook(filename)
sheet = workbook.sheet_by_index(0) # 最前边的sheet
Students = []
for i in range(1, sheet.nrows, 1): # 从第2行开始读,步长是1
row = sheet.row_values(i)
a_student = student()
a_student.id = int(row[0])
a_student.name = row[1]
a_student.age = row[2]
Students.append(a_student)
return Students
Students_lst = read_student("aa.xlsx")
# endregion
# region (2)将excel内容存于dict形式的list
def read_dict(filename):
workbook = xlrd.open_workbook(filename)
sheet = workbook.sheet_by_index(0) # 最前边的sheet
Students = []
for i in range(1, sheet.nrows, 1): # 从第2行开始读,步长是1
row = sheet.row_values(i)
a_stu = {}
a_stu['id'] = int(row[0])
a_stu['name'] = row[1]
a_stu['score'] = float(row[2])
Students.append(a_stu)
return Students
Students_dict = read_dict("aa.xlsx")
# endregion
print()
|
import numpy
from ltp_core.datamodules.components.srl import Srl
from ltp_core.datamodules.utils.datasets import load_dataset
def tokenize(examples, tokenizer, max_length):
res = tokenizer(
examples["form"],
is_split_into_words=True,
max_length=max_length,
truncation=True,
)
word_index = []
for encoding in res.encodings:
word_index.append([])
last_word_idx = -1
current_length = 0
for word_idx in encoding.words[1:-1]:
if word_idx != last_word_idx:
word_index[-1].append(current_length)
current_length += 1
last_word_idx = word_idx
labels = []
for predicates, roles in zip(examples["predicate"], examples["arguments"]):
sentence_len = len(predicates)
labels.append(numpy.zeros((sentence_len, sentence_len), dtype=numpy.int64))
for idx, predicate in enumerate(predicates):
if predicate == 1:
srl = numpy.asarray(roles.pop(0), dtype=numpy.int64)
labels[-1][idx, :] = srl
result = res.data
for ids in result["input_ids"]:
ids[0] = tokenizer.cls_token_id
ids[-1] = tokenizer.sep_token_id
result["overflow"] = [len(encoding.overflowing) > 0 for encoding in res.encodings]
result["word_index"] = word_index
result["word_attention_mask"] = [[True] * len(index) for index in word_index]
result["labels"] = labels
return result
def build_dataset(data_dir, task_name, tokenizer, max_length=512, **kwargs):
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
dataset = load_dataset(Srl, data_dir=data_dir, cache_dir=data_dir)
dataset = dataset.map(lambda examples: tokenize(examples, tokenizer, max_length), batched=True)
dataset = dataset.filter(lambda x: not x["overflow"])
dataset.set_format(
type="torch",
columns=[
"input_ids",
"token_type_ids",
"attention_mask",
"word_index",
"word_attention_mask",
"labels",
],
)
return dataset
|
#-*-coding:UTF_8-*-
import re
import json
sf= input('')
sf=sf[:-1]#删去末尾的'.'
pf={
'姓名':'',
'手机':'',
'地址':[],
}
#提取难度级别并删去
level=sf[0]
sf=sf.split(r'!')
sf=sf[1]
#提取号码并删去
telnum=re.findall("\d{11}",sf)
telnum=telnum[0]
sf=re.sub(r'\d{11}','',sf)
#提取人名并删去
name=re.sub(r',.*$',"",sf)
sf=re.sub(name,'',sf)
sf=re.sub(r',','',sf)#删去逗号
pf['姓名']=name
pf['手机']=telnum
#第一级地址
direct_cities=['北京','上海','重庆','天津']
if '省' in sf:
first=re.sub(r'省.*$',"",sf)
first+='省'
sf=sf.replace(first,'',1)#删去第一级地址
elif '自治区' in sf:
first = re.sub(r'自治区.*$',"",sf)
first+='自治区'
sf=sf.replace(first,'',1)
elif '北京市' in sf:
first='北京市'
sf=sf.replace(first,'',1)
first='北京'
elif '上海市' in sf:
first='上海市'
sf=sf.replace(first,'',1)
first='上海'
elif '重庆市' in sf:
first='重庆市'
sf=sf.replace(first,'',1)
first='重庆'
elif '天津' in sf:
first='天津市'
sf=sf.replace(first,'',1)
first='天津'
elif '北京' in sf:
first='北京'
sf=sf.replace(first,'',1)
elif '上海' in sf:
first='上海'
sf=sf.replace(first,'',1)
elif '重庆' in sf:
first='重庆'
sf=sf.replace(first,'',1)
elif '天津' in sf:
first='天津'
sf=sf.replace(first,'',1)
elif '内蒙古' in sf:
first='内蒙古自治区'
one='内蒙古'
sf=sf.replace(one,'',1)
elif '宁夏' in sf:
first='宁夏回族自治区'
one='宁夏'
sf=sf.replace(one,'',1)
elif '广西' in sf:
first='广西壮族自治区'
one='广西'
sf=sf.replace(one,'',1)
elif '新疆' in sf:
first='新疆维吾尔族自治区'
one='新疆'
sf=sf.replace(one,'',1)
elif '西藏' in sf:
first='西藏自治区'
one='西藏'
sf=sf.replace(one,'',1)
elif '黑龙江' in sf:
first='黑龙江省'
one='黑龙江'
sf=sf.replace(one,'',1)
else:
first=sf[:2]
sf=sf.replace(first,'',1)
first+='省'
pf['地址'].append(first)
#第二级地址
city={'市','地区','盟','自治州'}
for b in first:
if b in direct_cities:
second=b
second+='市'
break
for c in city:
if c in sf:
second=re.sub(c+'.*$',"",sf)
second+=c
sf=sf.replace(second,'',1)#删去第二级地址
break
else:
second=''
pf['地址'].append(second)
#第三级地址
county=['区','市','县','旗','自治县','自治旗','林区','特区']
for d in county:
if d in sf:
third=re.sub(d+'.*$',"",sf)
third+=d
sf=sf.replace(third,'',1)
break
else:
third=""
pf['地址'].append(third)
#第四级地址
town=['镇','乡','街道','民族乡','苏木','民族苏木']
for e in town:
if e in sf:
forth=re.sub(e+'.*$',"",sf)
forth+=e
sf=sf.replace(forth,'',1)
break
else:
forth=""
pf['地址'].append(forth)
#第五级地址
street=['街','村','路']
if level=='1':
fifth=sf
pf['地址'].append(fifth)
elif level=='2' or '3':
for f in street:
if f in sf:
fifth=re.sub(f+'.*$',"",sf)
fifth+=f
pf['地址'].append(fifth)
sf=sf.replace(fifth,'',1)
break
else:
fifth=""
#第六级地址
if '号' not in sf :
sixth=""
else:
sixth=re.sub(r'号.*$',"",sf)
sixth+='号'
sf=sf.replace(sixth,'',1)
pf['地址'].append(sixth)
#第七级地址
seventh=sf
pf['地址'].append(seventh)
json_str=json.dumps(pf,ensure_ascii=False)
print(json_str)
|
from datetime import datetime
'''
print(datetime.now())
print(datetime.now().day)
print(datetime.now().month)
print(datetime.now().year)
print(datetime.now().time())
# criar data
lancamento_ap = datetime(2021,5,6)
print(f'Data de Lançamento: {lancamento_ap}')
receberdata = datetime.strptime(input("Quando devemos lançar o app? "),'%d/%m/%Y')
print(type(receberdata))
contagem = receberdata - datetime.now()
print(contagem)'''
aniversário = datetime.strptime(input("Diga o dia mes e ano do seu aniversário"),'%d/%m/%Y')
calcular = aniversário - datetime.now()
print(calcular)
|
# Generated by Django 2.2.6 on 2019-12-16 19:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='speaker',
options={'verbose_name': 'palestrante', 'verbose_name_plural': 'palestrantes'},
),
]
|
import yaml, os
class GetData:
def get_yaml_data(self, name):
"""
返回yaml文件数据
:param name: 需要读取文件名字
:return:
"""
#打开文件
with open("./Data" + os.sep + name, "r", encoding="utf-8") as f:
#加载文件
return yaml.safe_load(f) |
import calendar
y = int(input("Enter Year"))
m = int(input("Enter month"))
c = calendar.TextCalendar(calendar.SUNDAY)
str = c.formatmonth(y, m)
print(str)
|
from django.shortcuts import render
from django.http import Http404
from django import forms
import markdown2
from random import randrange
from . import util
class SearchForm(forms.Form): # lhs search form
search = forms.CharField(label='',
widget=forms.TextInput(attrs={'placeholder':'Search Encyclopedia'}))
class TitleForm(forms.Form):
title = forms.CharField(label='',
widget=forms.TextInput(attrs={'placeholder':'Enter title'}))
class ContentForm(forms.Form):
content = forms.CharField(label='',
widget=forms.Textarea(attrs={'placeholder':'Enter content'}))
def index(request):
query = ""
res = []
if request.method == "POST": # search bar
form = SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data["search"]
for title in util.list_entries():
if query.lower() == title.lower(): # found exact match
return wiki_title(request, title) # redirect to wiki/[query]
if query.lower() in title.lower(): # substring match - find all matches
res.append(title)
if res != []: # all results with substring
return render(request, "encyclopedia/search.html", {
"results":res,
"form": SearchForm()
})
else:
return wiki_title(request, query) # error 404
else: # home page. list all entries
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries(),
"form": SearchForm()
})
# entry page for wiki/[title]
def wiki_title(request, title):
if util.get_entry(title) is None:
return render(request, "encyclopedia/404.html", {
"message": "Requested page was not found"}) # resource is already available
else:
return render(request, "encyclopedia/title.html", {
"title":title.capitalize(),
"entry":markdown2.markdown(util.get_entry(title)),
"form": SearchForm()
})
def new(request):
title_form = TitleForm(request.POST)
content_form = ContentForm(request.POST)
if request.method == "POST":
if title_form.is_valid() and content_form.is_valid():
title = title_form.cleaned_data["title"]
content = content_form.cleaned_data["content"]
if util.get_entry(title) is not None:
return render(request, "encyclopedia/404.html", {
"message": "Entry has already been created for this topic"}) # resource is already available
else:
util.save_entry(title, '#' + title + '\n' + content)
return wiki_title(request, title)
else:
return render(request, "encyclopedia/new.html", {
"form": SearchForm(),
"new_title": TitleForm(),
"new_content": ContentForm()
})
def edit(request, title):
if request.method == "GET":
content = util.get_entry(title)
return render(request, "encyclopedia/edit.html", {
"form": SearchForm(),
"text_edit": ContentForm(initial={'content':content})
})
else:
content_form = ContentForm(request.POST)
if content_form.is_valid():
content = content_form.cleaned_data["content"]
util.save_entry(title, content)
return wiki_title(request, title)
def random(request):
index = randrange(len(util.list_entries()))
return wiki_title(request, util.list_entries()[index]) |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 7 22:42:11 2018
@author: gehui
"""
even_numbers = list(range(2,11,2))
print(even_numbers) |
#!/usr/bin/python
'''The third step of metagene_analysis, metagene_plot_only.py uses R to create
the metagene plot (as a PDF).
Please see README for full details and examples.
Requires:
python 2 (https://www.python.org/downloads/)
R (http://cran.us.r-project.org/)
Joy-El R.B. Talbot Copyright (c) 2014
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import sys
import subprocess
import re
import datetime
import os
import argparse # to parse the command line arguments
PROGRAM = "metagene_plot_only.py"
VERSION = "0.1.0"
UPDATED = "140407 JRBT"
def get_arguments():
'''Collect and parse information from the user's command line arguments.'''
date = datetime.datetime.now().strftime('%y%m%d-%H%M%S')
parser = argparse.ArgumentParser(description=
'''The third step of metagene_analysis, metagene_plot.py uses R to create the
metagene plot (as a PDF) and its associated statistics.
Please see README for full details and examples.
Requires:
python 2 (https://www.python.org/downloads/)
R (http://cran.us.r-project.org/)
''')
parser.add_argument("-v", "--version",
action='version',
version="{} {}\tUpdated {}".format(PROGRAM, VERSION, UPDATED))
parser.add_argument("-d", "--data_set",
help="comma-dilimited values of file.1.sense(or ungapped),file.1.antisene(or gapped),normalization.factor,color(for plotting),name(for plot legend)",
metavar='DATA_SET',
required=True,
action='append')
parser.add_argument("-o", "--output_prefix",
help="Prefix for output files",
required=True)
parser.add_argument("--feature_counted",
help="Name of feature examined, eg TSS, Start, End, Gene, Intron",
required=True)
arguments = parser.parse_args()
return arguments
if __name__ == "__main__":
arguments = get_arguments()
total_sets = len(arguments.data_set)
data_sets = []
for data in arguments.data_set:
for part in data.split(","):
data_sets.append(str(part))
# identify window.size, top and bottom labels from input filenames
try:
(window_size, top1, top2) = re.findall('.(\d+)bpX\d+bp.([a-zA-Z]+)_([a-zA-Z]+).csv\Z', data_sets[0])[0]
(bottom1, bottom2) = re.findall('.\d+bpX\d+bp.([a-zA-Z]+)_([a-zA-Z]+).csv\Z', data_sets[1])[0]
except IndexError as err:
raise MetageneError(err, "You must specify two files in each data_set -d option")
if top1 == bottom1:
top = top2
bottom = bottom2
else:
top = top1
bottom = bottom1
# extract metagene information from first file
with open(data_sets[0]) as inf:
metagene = re.split('[\s-]+', inf.readline().strip())
metagene_parts = {}
for part in metagene:
search = re.search('([A-Za-z]+):(\d+)', part)
if search is not None:
metagene_parts[search.group(1)] = int(search.group(2))
total_start = -metagene_parts['Upstream']
interval_start = 0
interval_end = metagene_parts['Interval'] - 1
total_end = metagene_parts['Interval'] + metagene_parts['Downstream'] - 1
path_to_script = os.path.dirname(os.path.realpath(__file__))
path_to_script += "/plot_only.R"
call = ['Rscript', "--vanilla", "--verbose",
path_to_script,
str(arguments.output_prefix),
str(arguments.feature_counted),
str(window_size),
str(interval_start),
str(interval_end),
str(total_start),
str(total_end),
str(top),
str(bottom),
str(total_sets)]
for data in data_sets:
call.append(str(data))
subprocess.call(call)
print "Finished plotting"
|
import os
import glob
"""
train.zipを解凍したtrainから
https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
にあるように訓練データを振り分ける
"""
imgs_path = "./image_dir/*.png"
source_dir = "./train"
train_dir = "./data/train"
valid_dir = "./data/validation"
os.makedirs("%s/dogs" % train_dir)
os.makedirs("%s/cats" % train_dir)
os.makedirs("%s/dogs" % valid_dir)
os.makedirs("%s/cats" % valid_dir)
"""
list_imgs_path = glob.glob(imgs_path)
for i, imgs_file in enumerate(list_imgs_path):
os.rename(imgs_file ,"./image_dir/" + str(i) + ".png" )
"""
# 最初の1000枚の画像をtrain_dirに移動
for i in range(1000):
os.rename("%s/dog.%d.jpg" % (source_dir, i + 1),
"%s/dogs/dog%04d.jpg" % (train_dir, i + 1))
os.rename("%s/cat.%d.jpg" % (source_dir, i + 1),
"%s/cats/cat%04d.jpg" % (train_dir, i + 1))
# 次の400枚の画像をvalid_dirに移動
for i in range(400):
os.rename("%s/dog.%d.jpg" % (source_dir, 1000 + i + 1),
"%s/dogs/dog%04d.jpg" % (valid_dir, i + 1))
os.rename("%s/cat.%d.jpg" % (source_dir, 1000 + i + 1),
"%s/cats/cat%04d.jpg" % (valid_dir, i + 1))
|
input = open('antiqs.in', 'r')
output = open('antiqs.out', 'w')
n = int(input.readline())
a=list()
for i in range(0,n):
a.append(i+1)
for i in range(0, n):
a[i], a[i // 2] = a[i // 2], a[i]
for i in range(0, n):
output.write(str(a[i]))
output.write(' ')
input.close()
output.close() |
rows = 20
cols = 20
size = 18
width = 650
height = 550
outline_gray = "gray"
outline_black = "black"
fill_empty = ""
fill_black = "#000000"
fill_white = "#ffffff" |
import pandas as pd
import cudf, cuml
df = pd.read_csv("data/data.csv")
columns=['name', 'artists', 'acousticness', 'danceability', 'energy', 'instrumentalness', 'key', 'liveness', 'loudness', 'speechiness', 'tempo', 'valence']
df_mod = df[columns]
keys = df_mod.iloc[:,:2].values.tolist()
features = df_mod.iloc[:,2:].to_numpy()
features = (features-features.min())/(features.max()-features.min())
df = cudf.DataFrame(features)
embed = cuml.UMAP(n_neighbors=20, n_epochs=100,
min_dist=0.1, init='spectral').fit_transform(df)
np_embed = embed.to_pandas().to_numpy()
np.save("result/embeddings.npy", np_embed) |
import os
import json
import urllib2
#import jmsCode # JMS STOMP connection wrapper - needs stomp.py
import datetime
#///////////////////////////////////////////////////////////////////////////////////////////////
#
# Set of functions to handle the update payload from an instagram subscription update POST.
#
# The main() seems a bit convoluted, but it handles the possibility of multiple updates in a
# single POST. And then it handles each media item (photo) returned from the GET call the the
# relevant search endpoint.
#
# It also handles the recording of the next URL, so that each call only gets the most recent
# content that has not been retrieved before. It does that by retrieving either a 'min_id' (in the
# case of the geography) or a 'next_url' (in the case of a tag) and storing this for the next time.
#
# The next URL (from geog and from tag) is stored in a text file named according to the object_id
# for the subscription in the /config directory. The code attempts to open this for every update
# and read the next url. If it can't, it just proceeds in getting all that is available.
#
# Media metadata is either put out over JMS (not tested yet) or dumped straight to a file as JSON.
#
# If on dotcloud, check /var/log/supervisor/uwsgi.log for any print outs/errors.
# Also, note that if deploying on dotcloud you will need a custom build to ensure nginx can
# accept big enough POST payloads.
#
#
#///////////////////////////////////////////////////////////////////////////////////////////////
def getNextUrl(p, object_id):
''' See whether the url to use has already been written to a file '''
outDir = os.path.dirname(p.configFile)
if str(object_id) in os.listdir(outDir):
f = open(os.path.join(outDir, str(object_id)), 'r')
url = f.read()
f.close()
else:
url = None
return url
#------------------------------------------------------------------------------------------------
def formatMetadata(mediaMeta):
''' Retrieves those fields that would usefully be stored in the same format as twitter do it.
This allows downstream processors to (hopefully) handle the data irrespective of source.
It stores the original too, but duplicates fields containing time/geo/hashtags/text. '''
# Get the data list
eventsIn = mediaMeta['data']
eventsOut = []
for data in eventsIn:
# Assign caption text to the 'text' field
try:
data['text'] = data['caption']['text']
except:
data['text'] = None
# Assign the created_time to created_at
dt = datetime.datetime.fromtimestamp(float(data['created_time']))
data['created_at'] = dt.strftime('%a %b %d %H:%M:%S +0000 %Y')
# Deal with entities/tags - put each of the tags into the hashtag structure
entities = {"urls": [],"hashtags": [],"user_mentions": []}
# Loop the tags on the photo, add them to an 'entities' dict
for tag in data['tags']:
try:
entities['hashtags'].append({'text':str(tag), 'indices':[]})
except:
pass
# Add that entities dict to the original data
data['entities'] = entities
# Deal with geolocation information
try:
lat = float(data['location']['latitude'])
lon = float(data['location']['longitude'])
except:
lat, lon = None, None
# Note the switcheroo of the lat/lon between these 2 sets
data["geo"] = {"type": "Point", "coordinates": [lat, lon]}
data["coordinates"] = {"type": "Point", "coordinates": [lon, lat]}
eventsOut.append(data)
# Return the original photo metadata with some fields added (duplicated)
# to ensure they're in the twitter format
return eventsOut
#------------------------------------------------------------------------------------------------
def getMediaUpdates(url):
''' Reads and parses the subscription updates'''
try:
response = urllib2.urlopen(url)
mediaMeta = json.loads(response.read())
except:
mediaMeta = None
print "Failed to open this url: \n %s" %url
return mediaMeta
#------------------------------------------------------------------------------------------------
def handleMediaPagination(p, url, object_id, mediaMeta):
''' Extracts the pagination information relating to the next set of update data'''
nextUrl = None
# See if there is a pagincation key in the media metadata
if mediaMeta and mediaMeta.has_key('pagination'):
pagination = mediaMeta['pagination']
# If it has a next_url, then get that for the next time this gets updated - they tell you what its going to be
if pagination.has_key('next_url') and pagination['next_url'] != None:
nextUrl = pagination['next_url']
# Geography subscriptions, just have a next_min_id, which is used to get the next data.
elif pagination.has_key('next_min_id') and pagination['next_min_id'] != None:
minId = pagination['next_min_id']
# Strip out the base url. Catch the first instance where it shouldn't have an & in it
amp = url.find('&')
if amp != -1:
url = url[:amp+1]
nextUrl = "%s&min_id=%s" %(url, minId)
else:
pass
else:
print "Failed too retrieve either mediaMeta or the pagination key."
# Where we've been successful getting the next url, dump it out to a file for next time
if nextUrl:
try:
outDir = os.path.dirname(p.configFile)
outName = os.path.join(outDir, str(object_id))
fOut = open(outName, 'w')
fOut.write(nextUrl)
fOut.close()
except:
print "Failed to write out next URL for object_id : %s \n %s" %(object_id, nextUrl)
return
#------------------------------------------------------------------------------------------------
def buildUrl(p, obj, objectId):
''' Submits the request to the SEARCH api for the actual media update.
This gets called if the pagination function doesn't get used.
The pagination function gets the 'next' url from the current message,
That url ensures you don't get dupes.'''
# Swap out the geography id
if obj == 'geography':
url = p.geoUrl.replace('<geo-id>', str(objectId))
# Swap out the tag
if obj == 'tag':
url = p.tagUrl.replace('<tag>', str(objectId))
# Sub out the client id for authorisation
url = url.replace('<client-id>', str(p.client))
return url
#------------------------------------------------------------------------------------------------
def main(p, response):
'''Handles the subscription updates, including making the call to the endpoint and dumping to jms/text.'''
# Make the JMS connection via STOMP and the jmsCode class
if p.jmsBase == True:
import jmsCode
jms = jmsCode.jmsHandler(p.jmsHost, p.jmsPort, verbose=True)
jms.connect()
# If the config says save it out to file, do so
if p.fileBase == True:
outDir = os.path.dirname(p.configFile)
# Accepts a list of dictionaries - the update message
updates = json.loads(response)
# Format the url and get the media metadata
for upd in updates:
# Does the next URL already exist for this object?
url = getNextUrl(p, upd['object_id'])
# If the next (ie this) url hasn't been written to a file, build it from the config file
if url == None:
url = buildUrl(p, upd['object'], upd['object_id'])
# Get the media that has changed since the last time
mediaMeta = getMediaUpdates(url)
# Find the pagination info and save out info that concerning next url for this subscription
handleMediaPagination(p, url, upd['object_id'], mediaMeta)
# Format the content - make it like a tweet
data = formatMetadata(mediaMeta)
# Loop each photo that is referenced by this update and either save it out or message it
for photo in data:
# Dump the media metadata out to a string
jPhoto = json.dumps(photo, ensure_ascii=True)
# Write the json for this photo out to file
if p.fileBase == True:
f = open(os.path.join(outDir, str(photo['id'])+'.json'), 'w')
f.write(jPhoto)
f.close()
# Put the metadata onto the JMS
if p.jmsBase == True:
jms.sendData(p.jmsDest, jPhoto, photo['id'])
# Close the jms connection
if p.jmsBase == True:
jms.disConnect()
|
# 8.3 power set of a set
class Solution(object):
# Recursive solution, slow
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
n = len(nums)
if n == 0:
return nums
elif n == 1:
return [nums, []]
else:
for i in xrange(n):
l = [nums[j] for j in xrange(n) if j != i]
exc = self.subsets( l )
return (exc + [ [nums[i]] + item for item in exc ] )
def subsets2(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
n = len(nums)
if n == 0:
return nums
else:
l = [[], [ nums[0] ] ]
for i in xrange(1,n):
m = len(l)
for j in xrange(m):
l.append(l[j]+ [nums[i]])
return l
def subsets3(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
n = len(nums)
if n == 0:
return nums
else:
l = [[], [ nums[0] ] ]
m = 2
for i in xrange(1,n):
li = [[nums[i]]+l[j] for j in xrange(m)]
m *= 2
l.extend(li)
return l
s = Solution()
l = [1,2,3,4,5,6,7,8,9,10]
#print s.subsets(l)
print s.subsets2(l)
print s.subsets3(l)
|
from transformers import TFAlbertForSequenceClassification
class NN:
def __init__(self):
self._nn = self._create_nn()
def _create_nn(self) -> TFAlbertForSequenceClassification:
return TFAlbertForSequenceClassification.from_pretrained('albert-base-v2', num_labels=1)
def get_nn(self):
return self._nn
|
import praw
from config_bot import *
# Reddit stuff
r = praw.Reddit(user_agent = "ARTCbot 1.3.0 by herumph",
client_id = ID,
client_secret = SECRET,
username = REDDIT_USERNAME,
password = REDDIT_PASS)
submission = r.submission(url='https://www.reddit.com/r/RumphyBot/comments/8gb347/mooseleague_test/')
def get_first_index(keyword, text):
indices = [i for i, x in enumerate(text) if x.count(keyword)]# == keyword]
try:
index = indices[0]
except:
index = False
return index
def main():
authors = []
times = []
urls = []
for comment in submission.comments:
body = str(comment.body).lower()
body = body.split()
# handling extra spaces
body = list(filter(None, body))
if (body.count('time:')):
# getting position in the list of a certain string
index1 = get_first_index('time:',body)+1
# if there is a comma in the same position or one position over from the first time,
# there are two times
index2 = get_first_index(',',body)
# no space between first time and comma
if (index2 and index2 == index1):
times.append(body[index1][:-1]+','+body[index2+1])
# space between first time and comma
elif (index2 and index2 == index1+1):
times.append(body[index1]+','+body[index2+1])
else:
times.append(body[index1])
authors.append(str(comment.author))
# url handling
# strava but no youtube
if (body.count('strava:') and not body.count('youtube:')):
index = get_first_index('strava:',body)+1
urls.append(body[index])
# youtube but no strava
elif (body.count('youtube:') and not body.count('strava:')):
index = get_first_index('youtube:',body)+1
urls.append(body[index])
# both
elif (body.count('strava:') and body.count('youtube:')):
index1 = get_first_index('strava:',body)+1
index2 = get_first_index('youtube:',body)+1
urls.append(body[index1]+','+body[index2])
# neither
else:
urls.append('1')
# writing out to txt
with open ("raceResults.txt", "w") as f:
for i in range(0,len(authors)):
f.write(authors[i]+"|"+times[i]+"|"+urls[i]+'\n')
return
main()
|
# Generated by Django 2.2.6 on 2020-05-13 16:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('companies', '0008_auto_20200513_0930'),
]
operations = [
migrations.AlterField(
model_name='event',
name='event_type',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='companies.EventType', verbose_name='Вид мероприятия'),
),
]
|
from ..models import Block
from ..models import Phase
from ..models import DrawingStatus
from ..models import Department
from ..models import Discipline
from ..models import DrawingKind
from ..models import Drawing
from ..models import Revision
from ..models import Comment
from ..models import Reply
from ..models import Project
import os
import datetime
from django.utils import timezone
import pytz
location = os.path.dirname(os.path.realpath(__file__))
block_file = 'blocks.csv'
phase_file = 'phases.csv'
project_file = 'projects.csv'
department_file = 'departments.csv'
discipline_file = 'disciplines.csv'
drawing_file = 'drawings.csv'
drawing_kinds_file = 'drawing_kinds.csv'
drawing_status_file = 'drawing_statuses.csv'
expected_dates_file = 'expected_drawing_dates.csv'
def available():
print('\n{}:'.format(location))
for f in os.listdir(location):
if os.path.isfile(os.path.join(location, f)):
print(' -> {}'.format(f))
def _pack_info(keys, info_raw):
info = {}
for line in info_raw:
items = line.split(',')
for i, key in enumerate(keys):
try:
info[key].append(items[i].strip().lower())
except KeyError:
info[key] = []
return info
def _parse_file(name=None, headers=True):
if not name:
return None
file_path = os.path.join(location, name)
with open(file_path, 'r') as f:
info_raw = [line.strip('\n').strip() for line in f\
if line.strip('\n').strip() != '']
if headers:
head_raw = info_raw.pop(0)
head = [item.strip().lower() for item in head_raw.split(',')]
return _pack_info(head, info_raw)
else:
fhead = name.split('.')[0]
info = {fhead:[]}
for line in info_raw:
info[fhead].append(line.strip().lower())
return info
def add_blocks():
print('Populating Blocks...')
info = _parse_file(name=block_file, headers=False)
keyval = block_file.split('.')[0]
already = Block.objects.all()
prev = [block.name for block in already]
added = len(prev)
test = set(prev)
print('->> Total already in: {}'.format(added))
for item in info[keyval]:
if item not in test:
new_block = Block(name=item)
new_block.save()
test.add(item)
added += 1
print(' -> Added Block: {}'.format(item))
print('->> Total added: {}'.format(added - len(prev)))
def add_projects():
print('Populating Projects...')
info = _parse_file(name=project_file, headers=False)
keyval = project_file.split('.')[0]
already = Project.objects.all()
prev = [proj.name for proj in already]
added = len(prev)
test = set(prev)
print('->> Total already in: {}'.format(added))
for item in info[keyval]:
if item not in test:
new_proj = Project(name=item)
new_proj.save()
test.add(item)
added += 1
print(' -> Added Project: {}'.format(item))
print('->> Total added: {}'.format(added - len(prev)))
def add_drawing_statuses():
info = _parse_file(name=drawing_status_file, headers=False)
keyval = drawing_status_file.split('.')[0]
already = DrawingStatus.objects.all()
prev = [dwg_st.status for dwg_st in already]
added = prev[:]
print('->> Total already in: {}'.format(len(added)))
for item in info[keyval]:
if item not in added:
new_dwg_status = DrawingStatus(status=item)
new_dwg_status.save()
added.append(item)
print(' -> Added Dwg Status: {}'.format(item))
print('->> Total added: {}'.format(len(added) - len(prev)))
def add_departments():
info = _parse_file(name=department_file, headers=False)
keyval = department_file.split('.')[0]
already = Department.objects.all()
prev = [dep.name for dep in already]
added = prev[:]
print('->> Total already in: {}'.format(len(added)))
for item in info[keyval]:
if item not in added:
new_dep = Department(name=item)
new_dep.save()
added.append(item)
print(' -> Added Department: {}'.format(item))
print('->> Total added: {}'.format(len(added) - len(prev)))
def add_disciplines():
info = _parse_file(name=discipline_file, headers=False)
keyval = discipline_file.split('.')[0]
already = Discipline.objects.all()
prev = [disc.name for disc in already]
added = prev[:]
print('->> Total already in: {}'.format(len(added)))
for item in info[keyval]:
if item not in added:
new_disc = Discipline(name=item)
new_disc.save()
added.append(item)
print(' -> Added Discipline: {}'.format(item))
print('->> Total added: {}'.format(len(added) - len(prev)))
def add_drawing_kinds():
info = _parse_file(name=drawing_kinds_file, headers=False)
keyval = drawing_kinds_file.split('.')[0]
already = DrawingKind.objects.all()
prev = [dwg_kind.name for dwg_kind in already]
added = prev[:]
print('->> Total already in: {}'.format(len(added)))
for item in info[keyval]:
if item not in added:
new_dwg_kind = DrawingKind(name=item)
new_dwg_kind.save()
added.append(item)
print(' -> Added Dwg Kind: {}'.format(item))
print('->> Total added: {}'.format(len(added) - len(prev)))
def find_phases():
print('finding phases in drawings.csv')
info = _parse_file(name=drawing_file, headers=True)
phases = set()
for i in range(len(info[list(info.keys())[0]])):
ph = info['phase'][i].lower()
phases.add(ph)
with open(os.path.join(location, 'phases.csv'), 'w') as pfile:
for item in phases:
print(item)
pfile.write('{}\n'.format(item))
def add_phases():
print('Looking for phase file')
if phase_file not in os.listdir(location):
print('phase file not found...')
find_phases()
print('Populating Phases...')
info = _parse_file(name=phase_file, headers=False)
keyval = phase_file.split('.')[0]
already = Phase.objects.all()
prev = [phase.number for phase in already]
added = prev[:]
print('->> Total already in: {}'.format(len(added)))
for item in info[keyval]:
if item not in added:
new_phase = Phase(number=item)
new_phase.save()
added.append(item)
print(' -> Added Phase: {}'.format(item))
print('->> Total added: {}'.format(len(added) - len(prev)))
def add_drawings():
info = _parse_file(name=drawing_file, headers=True)
print(' | '.join(['{}:{}'.format(key, val[0]) for key, val in info.items()]))
total = len(info[[i for i in info.keys()][0]])
added = 0
for i in range(total):
name = info['name'][i].lower()
if not Drawing.objects.filter(name=name).exists():
print('-> {}'.format(info['block'][i]), end='')
if info['block'][i] == '0':
info['block'][i] = 'misc'
block = Block.objects.get(name=info['block'][i]) if info['block'][i] \
and info['block'][i] != '0'\
and info['block'][i] != 'none'\
else None
status = DrawingStatus.objects.get(status='new')
dep = Department.objects.get(name=info['department'][i]) if info['department'][i] else None
disc = Discipline.objects.get(name=info['discipline'][i]) if info['discipline'][i] else None
kind = DrawingKind.objects.get(name=info['kind'][i]) if info['kind'][i] else None
phase = Phase.objects.get(number=info['phase'][i]) if info['phase'][i] else None
if 'project' in info:
proj = Project.objects.get(name=info['project'][i]) if info['project'][i] else None
else:
proj = Project.objects.get(name='cv3600')
new_dwg = Drawing(name=name,
desc=info['desc'][i] if info['desc'][i] else None,
phase=phase,
project=proj,
#block=block,
status=status,
department=dep,
discipline=disc,
kind=kind,
)
new_dwg.save()
new_dwg.block.add(block)
new_dwg.save()
added += 1
print(' -> Added Drawing: {}'.format(name))
print('->> Total Added: {}'.format(added))
def add_expected_dates():
info = _parse_file(name=expected_dates_file, headers=True)
current_tz = pytz.timezone("America/New_York")
for i in range(len(info['name'])):
name = info['name'][i]
if name:
date = None
exp_date = None
if info['date'][i]:
date = info['date'][i]
exp_date = current_tz.localize(datetime.datetime.strptime(date, '%m/%d/%Y'), is_dst=None)
if Drawing.objects.filter(name=name).exists():
d = Drawing.objects.get(name=name)
d.expected = exp_date
d.save()
#update(expected=exp_date)
print(' -> updated {} with date {}'.format(name, exp_date))
|
import sys
def get_expression(s):
n_chars = len(s)
if n_chars == 0:
return s
exps = [ s[0] ]
for char in s[1:]:
n_exps = len(exps)
j = 0
while j < n_exps:
exp = exps[j]
exps[j] = exp + '+' + char#+
exps.append(exp + '-' + char)#-
exps.append(exp + char)#''
j += 1
return exps
def calculate(exp):
if len(exp) == 0:
return 0
# get first number
j = 0
number = ''
while j < len(exp) and exp[j] != '+' and exp[j] != '-':
number += exp[j]
j += 1
s = int(number)
if j >= len(exp):
# the expression was a literal number
return s
number = ''
# now follow the rest of the expression
signal = exp[j]
j += 1
while j < len(exp):
if exp[j] in ('-', '+'):
if signal == '-':
s -= int(number)
else:
s += int(number)
signal = exp[j]
number = ''
else:
number += exp[j]
j += 1
if signal == '-':
s -= int(number)
else:
s += int(number)
return s
def is_ugly(number):
return number % 2 == 0 or number % 3 == 0 or \
number % 5 == 0 or number % 7 == 0
def count_ugly(s):
expressions = get_expression(s)
ugly_numbers = 0
for expression in expressions:
if is_ugly(calculate(expression)):
ugly_numbers += 1
return ugly_numbers
if __name__ == '__main__':
with open(sys.argv[1]) as tests:
for test in tests:
test = test.replace('\n', '')
print(count_ugly(test))
|
import picamera
import RPi.GPIO as GPIO
import threading
import os
import time
import logging
logging.basicConfig(filename='timelapse.log', level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
event_start = threading.Event()
GPIO_INPUT_BCM = 4 # pin 7
GPIO.setmode(GPIO.BCM) # BCM mode is required to easily control camera LED
GPIO.setup(GPIO_INPUT_BCM, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def event_update():
if GPIO.input(GPIO_INPUT_BCM) == 1:
logging.info('Event set - recording allowed')
event_start.set()
else:
logging.info('Event cleared - recording blocked')
if camera.recording:
logging.info('...but still recording')
event_start.clear()
def toggle_cb(channel):
time.sleep(0.1) # It seems like a small delay can be helpful...
event_update()
# Add a callback for GPIO input changes
GPIO.add_event_detect(GPIO_INPUT_BCM, GPIO.BOTH, callback=toggle_cb, bouncetime=200)
# Configure camera and start preview
camera = picamera.PiCamera()
camera.framerate = 5
camera.resolution = (1296, 730)
camera.video_stabilization = True
#camera.resolution = (1920, 1080)
camera.start_preview()
camera.led = False
# Make sure initial state of event_start is correct
event_update()
# Exception handling loop - just try again if something fails
# (also folder creation as new folder is required to prevent overwrites)
while 1:
try:
if not os.path.exists('videos'):
os.makedirs('videos')
contents = os.listdir('videos/')
dir_name = 'videos/%05d' % len(contents)
os.makedirs(dir_name)
# Unlimited recording loop
for filename in camera.record_sequence(
(dir_name + '/%05d.h264' % i for i in range(1, 99999))):
camera.led = False
if not event_start.is_set():
logging.info('Loop: Blocked')
event_start.wait()
camera.led = True # LED should only be enabled when recording - not for preview
logging.info('Loop: Recording to %s' % filename)
camera.wait_recording(60)
logging.info('Loop: Recording complete')
except Exception as e:
logging.error('ERROR - retrying: %s' % (str(e)))
time.sleep(1) # Prevents crazy loop and allows for double CTRL-C to quit
camera.close() |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 13 16:57:34 2016
@author: Stanley
"""
rom_num = (input("Please enter a number in the simplfied Roman System:"))
return_num = 0
counter = 0
length = len(rom_num)
while (length > 0):
if (rom_num[counter] == "M"):
return_num += 1000
counter +=1
length -= 1
if (rom_num[counter] == "D"):
return_num += 500
counter +=1
length -= 1
if (rom_num[counter] == "C"):
return_num += 100
counter +=1
length -= 1
if (rom_num[counter] == "L"):
return_num += 50
counter +=1
length -= 1
if (rom_num[counter] == "X"):
return_num += 10
counter +=1
length -= 1
if (rom_num[counter] == "V"):
return_num += 5
counter +=1
length -= 1
if (rom_num[counter] == "I"):
return_num += 1
counter +=1
length -= 1
print (return_num) |
import numpy as np
import cv2
def color_predicts():
'''
给class图上色
'''
img = cv2.imread("image_3_predict.png",cv2.CAP_MODE_GRAY)
color = np.ones([img.shape[0], img.shape[1], 3])
color[img==0] = [0, 0, 0] #其他,黑色,0
color[img==1] = [255,0, 0]#烤烟,红色,1
color[img==2] = [0, 255, 0] #玉米,绿色,2
color[img==3] = [0,0,255] #薏米仁,蓝色,3
cv2.imwrite("color.png",color)
#return color
color_predicts() |
from .hexutil import long_to_bytes
import itertools
class StreamReader:
def __init__(s, x):
s.str = x
s.pos = 0
def read(s, length=1):
if (s.pos + length) > len(s.str):
length = len(s.str) - s.pos
res = s.str[s.pos : s.pos + length]
s.pos += length
return res
def seek(s, pos):
assert 0 <= pos < len(s.str)
s.pos = pos
def len(s):
return len(s.str)
def pos(s):
return s.pos
def __len__(s):
return s.len()
def xorstr(s, key):
out = ""
if isinstance(key, int):
key = long_to_bytes(key).decode()
return mapstr(s, key, lambda x, y: chr(ord(x) ^ ord(y)))
def xorbytes(s, key):
out = bytearray([])
if isinstance(key, int):
key = long_to_bytes(key)
return mapbytes(s, key, lambda x, y: x ^ y)
def mapbytes(s, t, func):
out = bytearray([])
for x, y in zip(s, itertools.cycle(t)):
out.append(func(x, y))
return out
def mapstr(s, t, func):
out = ""
for x, y in zip(s, itertools.cycle(t)):
out += func(x, y)
return out
def transpose(s):
return "".join(map(lambda x: "".join(x), zip(*s)))
def nth_split(s, n):
r = []
i = 0
while True:
if i + n > len(s):
r += [s[i:]]
break
r += [s[i : i + n]]
i += n
if r[-1] == "":
r = r[:-1]
return r
|
import requests
from log import *
import json
def wavySendMessage(data):
log('***Invocando API send message de wavy***\n')
url = 'https://api-messaging.wavy.global/v1/whatsapp/send'
urlHeaders = {'Content-type': 'application/json','UserName': 'wa_telectronicperusac_pe','AuthenticationToken': 'nouxAVNgqztEWAgVyYfj1qI2i8g-DToSty6bGz1P'}
response = requests.post(url, json.dumps(data), headers = urlHeaders)
log(f'Json devuelto: {response.json()}')
print(log('*Evento finalizado*\n'))
def wavyTextJson(data):
log('***Preparando JSON para wavy***\n')
number = data['externalId']
message = data ['text']
correlationId = data['messageId']
cjson = {
"destinations": [{
"correlationId": f"{correlationId}",
"destination": f"{number}"
}],
"message": {
"messageText": f"{message}"
}
}
log(f'Json a enviar: {cjson}\n')
wavySendMessage(cjson)
def wavyImageJson(data, url):
log('***Preparando JSON para wavy***\n')
number = data['externalId']
correlationId = data['messageId']
cjson = {
"destinations": [{
"correlationId": f"{correlationId}",
"destination": f"{number}"
}],
"message": {
"image": {
"type": "JPG",
"url": f"{url}"
}
}
}
log(f'Json a enviar: {cjson}\n')
wavySendMessage(cjson)
def wavyVideoJson(data, url):
log('***Preparando JSON para wavy***\n')
number = data['externalId']
correlationId = data['messageId']
cjson = {
"destinations": [{
"correlationId": f"{correlationId}",
"destination": f"{number}"
}],
"message": {
"audio": {
"type": "MP4",
"url": f"{url}"
}
}
}
log(f'Json a enviar: {cjson}\n')
wavySendMessage(cjson)
def wavyPDFJson(data, url):
log('***Preparando JSON para wavy***\n')
number = data['externalId']
correlationId = data['messageId']
cjson = {
"destinations": [{
"correlationId": f"{correlationId}",
"destination": f"{number}"
}],
"message": {
"document": {
"type": "PDF",
"url": f"{url}",
"caption": "PDF"
}
}
}
log(f'Json a enviar: {cjson}\n')
wavySendMessage(cjson)
def wavyMP3Json(data, url):
log('***Preparando JSON para wavy***\n')
number = data['externalId']
correlationId = data['messageId']
cjson = {
"destinations": [{
"correlationId": f"{correlationId}",
"destination": f"{number}"
}],
"message": {
"audio": {
"type": "MP3",
"url": f"{url}"
}
}
}
log(f'Json a enviar: {cjson}\n')
wavySendMessage(cjson)
|
from django.core.management.base import BaseCommand
import requests
from mentions.models import Mention
class Command(BaseCommand):
def handle(self, **options):
url = "https://www.electionmentions.com/api/stream?since=2016-01-01"
req = requests.get(url)
results = req.json()
for mention in results['stream_items']:
if not mention['quote']:
continue
if "http://www.newstatesman.com/" in mention['url']:
continue
mention_obj, created = Mention.objects.update_or_create_from_em(
mention)
if created:
print("Added new mention: {0}".format(mention['title']))
|
f = open('text.txt', 'r')
text = f.read()
f.close
text1 = text
mas = []
q_mas = []
#task 1 with ' '
'''
abcdefgh
a[0:2] + ';' + [2:]
ab;cdefgh
a[3:5]
REPORT4 - bigrams without ' ' | не сквозные
pos1 = 0
pos2 = 2
text2 = ''
while 1:
text2 = text2 + text1[pos1:pos2] + ';'
pos1 = pos2
pos2 = pos1 + 2
if (pos2 >= len(text1)):
break
#print(text2)
pos1 = 0
pos2 = 3
while 1:
element = text2[pos1:pos2]
if element not in mas:
mas.append(element)
q_mas.append(text2.count(element))
pos1 = pos2
pos2 = pos1 + 3
if (pos2 > len(text2)):
break
for i in range(len(mas)):
print(mas[i][0:2] + ' ' + str(q_mas[i]))
'''
#task2 without ' ' - REPORT5
text1 = text1.replace(' ', '')
pos1 = 0
pos2 = 2
text2 = ''
while 1:
text2 = text2 + text1[pos1:pos2] + ';'
pos1 = pos2
pos2 = pos1 + 2
if (pos2 >= len(text1)):
break
#print(text2)
pos1 = 0
pos2 = 3
while 1:
element = text2[pos1:pos2]
if element not in mas:
mas.append(element)
q_mas.append(text2.count(element))
pos1 = pos2
pos2 = pos1 + 3
if (pos2 > len(text2)):
break
for i in range(len(mas)):
print(mas[i][0:2] + ' ' + str(q_mas[i]))
|
# -*- coding: utf-8 -*-
import os, time, sys
import Tool
import random
param = {'Capacitance':'pF'}
class Instrument(Tool.MeasInstr):
def __init__(self, resource_name, debug = False,**keyw):
super(Instrument, self).__init__(resource_name,'AH2550A',debug,**keyw)
self.identify("Hello, this is ")
def measure(self,channel):
if self.last_measure.has_key(channel):
if not self.debug:
if channel=='Capacitance':
answer=self.capacitance(self.single())
else:
answer=random.random()
self.last_measure[channel]=answer
else:
print "you are trying to measure a non existent channel : " +channel
print "existing channels :", self.channels
answer=None
return answer
def average(self, average): #4 is default, higher the number slower the measurement
if self.debug == False:
self.write('AVERAGE ' + str(average))
else:
print "average - debug mode"
def capacitance(self, msg): #parses the string and only returns capacitance
if self.debug == False:
return float(msg[3:13])
else:
print "capacitance - debug mode"
def continuousON(self):
if self.debug == False:
self.write('CONTINUOUS')
else:
print "continuous - debug mode"
def continuousOFF(self):
if self.debug == False:
self.write('CONTINUOUS OFF')
else:
print "continuousOFF - debug mode"
def frequency(self, freq): #broken function. Something wrong with capacitance bridge
if self.debug == False:
self.write('FREQUENCY' + str(freq))
else:
print "frequency - debug mode"
def showFrequency(self):
if self.debug == False:
return self.ask('SHOW FREQUENCY')
else:
print "show frequency - debug mode"
def single(self):
if self.debug == False:
return self.ask('SINGLE')
else:
print "single - debug mode"
#following code has been tested and known to work.
if (__name__ == '__main__'):
myinst=Instrument('GPIB::28')
myinst.average(8)
myinst.continuousON()
for i in range(10):
capacitance=myinst.capacitance(myinst.read()) #a float, displays in pF
print capacitance
time.sleep(3)
myinst.continuousOFF()
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import TemplateView
import contracts
from utils import contract
from .models import Account
# Create your views here.
class IndexView(TemplateView):
template_name = 'contracts/index.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data()
ether_price = contract.call_function('getThePrice()')
tot_sup = contract.call_function('totalSupply()')
context['ether_price'] = ether_price / 10 ** 8
context['tot_sup'] = tot_sup / 10 ** 5
accounts = []
for account in Account.objects.all():
account.ethers = contract.get_balance(account.address) / 10 ** 18
account.tokens = contract.call_function('balanceOf(address)', account.address) / 10 ** 5
accounts.append({
'name': account.name,
'ethers': account.ethers,
'tokens': account.tokens
})
account.save()
context['accounts'] = accounts
return context
class TransactView(TemplateView):
template_name = 'contracts/index.html'
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data()
ether_price = contract.call_function('getThePrice()')
tot_sup = contract.call_function('totalSupply()')
context['ether_price'] = ether_price / 10 ** 8
context['tot_sup'] = tot_sup / 10 ** 5
contract.transact('deposit()')
accounts = []
for account in Account.objects.all():
account.ethers = contract.get_balance(account.address) / 10 ** 18
account.tokens = contract.call_function('balanceOf(address)', account.address) / 10 ** 5
accounts.append({
'name': account.name,
'ethers': account.ethers,
'tokens': account.tokens
})
account.save()
context['accounts'] = accounts
return context
|
import json
import os
import tkinter as tk
from tkinter import filedialog
class ConfigHandler:
__CONFIG_FILE = "config.json"
__PATH = "path"
__LIMIT = "limit"
def __init__(self): # TODO: this should be done better, for the moment it will suffice
if os.path.isfile(self.__CONFIG_FILE) and os.access(self.__CONFIG_FILE, os.R_OK):
config_file = open(self.__CONFIG_FILE, "r")
try:
self.__json = json.load(config_file)
except Exception as e:
config_file = open(self.__CONFIG_FILE, "w")
print("Creazione file di configurazione...")
data = {self.__LIMIT: 180}
root = tk.Tk()
root.withdraw()
directory_path = ""
while not os.path.isfile(f"{directory_path}") or 'CLIENTI.DBF' not in directory_path:
print("Selezionare il file 'CLIENTI.DBF' di Mr.Book")
directory_path = filedialog.askopenfilename()
print(directory_path)
if directory_path == "":
print("Operazione annullata")
raise FileNotFoundError
directory_path = directory_path.replace('/CLIENTI.DBF', '')
print(directory_path)
data[self.__PATH] = directory_path
json.dump(data, config_file)
config_file.flush()
config_file.close()
config_file = open(self.__CONFIG_FILE, "r")
self.__json = json.load(config_file)
print("File di configurazione creato")
try:
self.path = self.__json[self.__PATH]
self.limit = self.__json[self.__LIMIT]
except Exception as e:
print(e)
config_file.close()
def __update_value(self, new_value, key):
self.__json[key] = new_value
config_file = open(self.__CONFIG_FILE, "w")
json.dump(self.__json, config_file)
config_file.close()
def update_path(self, new_path):
self.__update_value(new_path, self.__PATH)
def update_limit(self, new_limit):
self.__update_value(new_limit, self.__LIMIT)
def get_path(self):
return self.path
|
import requests
from django.db import models
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.utils.text import slugify
class Location(models.Model):
address = models.CharField(max_length=200)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
zip_code = models.CharField(max_length=5)
address_string = models.CharField(max_length=500)
location = models.PointField(blank=True, null=True)
def save(self, *args, **kwargs):
if self.location is None:
params={
"address": self.address_string,
"key":"AIzaSyCRB2jA_b4InjlQtslR5g5NO9n8dUTdJ0Q"
}
resp = requests.get("https://maps.googleapis.com/maps/api/geocode/json", params=params)
if resp.ok:
try:
j = resp.json()
loc = j['results'][0]['geometry']['location']
y = loc['lat']
x = loc['lng']
p = Point(x=x, y=y)
self.location = p
except:
pass
super().save(*args, **kwargs)
def __str__(self):
return self.address_string
class MeetingCode(models.Model):
code = models.CharField(max_length=5)
description = models.CharField(max_length=100)
# slug = models.SlugField()
# def save(self, *args, **kwargs):
# if not self.slug:
# self.slug = slugify(self.code)
# super().save(*args, **kwargs)
def __str__(self):
return self.description
class MeetingType(models.Model):
type = models.CharField(max_length=50)
slug = models.SlugField()
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.type)
super().save(*args, **kwargs)
def __str__(self):
return self.type
class MeetingArea(models.Model):
area = models.CharField(max_length=100)
slug = models.SlugField()
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.area)
super().save(*args, **kwargs)
def __str__(self):
return self.area
class Meeting(models.Model):
name = models.CharField(max_length=200)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
time = models.TimeField(auto_now=False, auto_now_add=False, db_index=True)
url = models.URLField(max_length=500)
area = models.ForeignKey(
MeetingArea, on_delete=models.CASCADE, blank=True, null=True)
codes = models.ManyToManyField(MeetingCode) #, through='CodeMap')
types = models.ManyToManyField(MeetingType) #, through='TypeMap')
row_src = models.TextField(blank=True)
orig_filename = models.TextField(blank=True)
notes = models.TextField(blank=True)
def __str__(self):
return self.name
#class CodeMap(models.Model):
# meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
# meetingcode = models.ForeignKey(MeetingCode, on_delete=models.CASCADE)
#
# class Meta:
# db_table = 'aafinder_meeting_codes'
#class TypeMap(models.Model):
# meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
# meetingtype = models.ForeignKey(MeetingType, on_delete=models.CASCADE)
#
# class Meta:
# db_table = 'aafinder_meeting_types'
|
#!python3
# SimPy model for a fault_injector.
# The fault_injector injects faults into devices
# at a predetermined time (via a SimPy interrupt)
#
# Author: Neha Karanjkar
from __future__ import print_function
import os, sys
import threading
from queue import Queue
import simpy
import time
import json
import logging
logger = logging.getLogger(__name__)
class FaultInjector(object):
"""
A FaultInjector injects faults into devices
at specific times (by means of a SimPy interrupt).
"""
def __init__(self, env):
self.env = env
# a dictionary of device instaces
# <device_name>: <device_pointer>
self.device_instances = {}
# start a simpy process for the main behavior
self.behavior_process=self.env.process(self.behavior())
# main behavior:
def behavior(self):
# wait until some time T
yield self.env.timeout(5)
#inject faults into one device
assert len(self.device_instances)>0
for d in self.device_instances:
self.device_instances[d].behavior_process.interrupt("FAULT")
logger.info("SIM_TIME:{} FaultInjector injected a fault into device {}".format(self.env.now,d))
# that's it.
|
from flask import Flask, render_template
from flask import request
import networkx as nx
import json
import pickle
from aminer.graph import setup_graph
from aminer.util import get_attached_subgraph, graph_to_d3tree_json, \
graph_to_d3nodelink_json, mst_from_graph, bfs_from_tree, neighborhood, \
deep_subgraph, build_tech_index, nodes_by_affil
app = Flask(__name__)
# FIXME: this is a hack for demo purposes
# TODO - Move data into graph database
AUTHOR_FILE = "/brokenwillow/AMiner/AMiner-Author.txt"
COAUTHOR_FILE = "/brokenwillow/AMiner/AMiner-Coauthor.txt"
#app.G = setup_graph(AUTHOR_FILE, COAUTHOR_FILE)
app.G = pickle.load(open('littleG.pickle'))
app.tech_index = build_tech_index(app.G)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/tech/')
def get_techs():
return render_template('tech_list.html', tech=app.tech_index)
@app.route('/tech/<tech_guid>')
def get_tech(tech_guid):
tech = app.tech_index[tech_guid]
(name, nodes) = tech
return render_template('tech_detail.html', tech_name=name, nodes=nodes)
@app.route('/search/tech/<term>')
def search_tech(term):
terms = list()
for k,v in app.tech_index.iteritems():
if term in v[0]:
terms.append((k,app.tech_index[k]))
return render_template('tech_list.html', tech=terms)
#return str(terms)
@app.route('/search/affil/<term>')
def search_affil(term):
nodes = nodes_by_affil(app.G, term)
#return str(nodes)
return render_template('search/affil_results.html', nodes=nodes, affil_term=term )
@app.route('/authors/')
def get_authors():
return render_template('author_list.html', nodes=app.G.nodes())
@app.route('/authors/<idx>.nodejson')
def get_neighbors_nodejson(idx):
subgraph = get_attached_subgraph(app.G, int(idx))
return graph_to_d3nodelink_json(subgraph)
@app.route('/authors/<idx>.treejson')
def get_neighbors_treejson(idx):
#subgraph = get_attached_subgraph(app.G, int(idx))
subgraph = deep_subgraph(app.G, neighborhood(app.G, int(idx), 3))
# Convert to tree
tree = nx.bfs_tree(subgraph, int(idx))
# populate attributes
for index in tree.nodes():
tree.node[index] = app.G.node[index]
return graph_to_d3tree_json(tree, int(idx))
@app.route('/authors/<idx>.d3force')
def get_neighbors_d3force(idx):
return render_template('node_forcegraph.html', idx=int(idx))
@app.route('/authors/<idx>.d3tree')
def get_neighbors_d3tree(idx):
return render_template('node_radialtree.html', idx=int(idx))
@app.route('/authors/<idx>')
def get_author(idx):
record = app.G.node[int(idx)]
return render_template('author_detail.html', node=record, neighbors=app.G[int(idx)].keys())
if __name__ == '__main__':
app.debug=True
app.run(host='0.0.0.0', port=8000) |
from Rule import Rule
import json
class RulesList:
def __init__(self, filename : str, rules : list = []):
self.filename = filename
self.rules = rules
def readRules(self) -> None:
try:
with open(self.filename) as data:
self.rules = json.load(data)
except Exception as e:
print('Error while reading', self.filename, ':', e)
def getRule(self) -> Rule:
for rule in self.rules['rules']:
yield Rule(rule['ruleName'], rule['patterns'], rule['responses'])
def getUnknown(self) -> list:
return self.rules['unknown']
|
import re
# import datetime
import mdiag
# from collections import defaultdict
class GroupMdiagTests:
# Note the convention: If it passes the test, then it returns True.
# Otherwise, it returns false
@classmethod
def testTransparentHugepages(cls, groupMdiag):
# NOTE can also use section transparent_hugepage
files = ['/sys/kernel/mm/transparent_hugepage/enabled',
'/sys/kernel/mm/redhat_transparent_hugepage/enabled']
# NOTE is this 'exists': True redundant?
match = {'filename': {"$in": files}, 'exists': True}
c = groupMdiag.getc(match)
ids = []
for doc in c:
md = mdiag.Mdiag(doc)
value = md.getSysfsSelection()
if value == "always":
ids.append(md.doc['_id'])
return {'ok': True, 'payload': {'pass': len(ids) == 0, 'ids': ids}}
@classmethod
def testNuma(cls, groupMdiag):
# get the number of numa nodes
c = groupMdiag.getc({'filename': "/proc/zoneinfo"})
if c.count() > 0:
md = mdiag.Mdiag(c.next())
numaNodes = md.getNumaNodes()
if numaNodes is not None:
numNumaNodes = len(numaNodes)
else:
return {'ok': False, 'payload': "unable to get /proc/zoneinfo"}
if numNumaNodes <= 1:
return {'ok': True, 'payload': {'pass': True, 'ids': []}}
# there is > 1 numa node
c = groupMdiag.getc({'section': "sysctl"})
ids = []
for doc in c:
md = mdiag.Mdiag(doc)
sysctl = md.getSysctl()
if "vm" in sysctl and "zone_reclaim_mode" in sysctl['vm'] and\
sysctl['vm']['zone_reclaim_mode'] != 0:
ids.append(doc['_id'])
return {'ok': True, 'payload': {'pass': len(ids) == 0, 'ids': ids}}
@classmethod
def testLimits(cls, groupMdiag):
pidLimits = groupMdiag.getProcPidLimits()
def checkLimit(limits, name, type, threshold):
if limits[name][type] < threshold:
# fail
return False
# pass
return True
pidIdMap = groupMdiag.getMongoProcesses()
c = groupMdiag.getc({'_id': {"$in": pidIdMap.values()}})
ids = []
for doc in c:
md = mdiag.Mdiag(doc)
name = md.getProcessName()
if name is not None and (name == "mongod" or name == "mongos"):
pid = md.getProcessPid()
if pid is not None:
limits = pidLimits[pid]
if not checkLimit(limits, "nproc", "soft", 32000):
ids.append(md.doc['_id'])
if not checkLimit(limits, "nproc", "hard", 32000):
ids.append(md.doc['_id'])
if not checkLimit(limits, "nofile", "soft", 32000):
ids.append(md.doc['_id'])
if not checkLimit(limits, "nofile", "hard", 32000):
ids.append(md.doc['_id'])
return {'ok': True, 'payload': {'pass': len(ids) == 0, 'ids': ids}}
@classmethod
def testKernelMaxPid(cls, groupMdiag):
# kernel.pid_max = 32768
section = "sysctl"
match = "^kernel\\.pid_max\\s*=\\s*(\\d+)"
conds = { "1": { "$gte": 32768 } }
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testKernelMaxThreads(cls, groupMdiag):
# kernel.threads-max = 64000
section = "sysctl"
match = "^kernel\\.threads-max\\s*=\\s*(\\d+)"
conds = { "1": { "$gte": 64000 } }
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testKernelMaxOpenFiles(cls, groupMdiag):
# fs.file-max = 131000
section = "sysctl"
match = "^fs\\.file-max\\s*=\\s*(\\d+)"
conds = { "1": { "$gte": 98000 } }
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def _sectionMatchConditionals(cls, groupMdiag, section, match, conditionals):
# As the name suggests, for a given section of the mdiag file, a regex
# match is performed on the output and conditionals are applied to the
# fields matched in the regex
ids = []
query = {'section': section, 'output': {"$exists": 1}}
c = groupMdiag.getc(query)
for doc in c:
md = mdiag.Mdiag(doc)
output = md.getOutput()
if output is not None:
for line in output:
m = re.match(match, line)
if m is not None:
# perform conditional checks
if conditionals is not None:
for field in conditionals:
if m.lastindex >= int(field):
for check in conditionals[field]:
if '$eq' in check:
if m.group(int(field)) != conditionals[field]['$eq']:
ids.append(md.doc['_id'])
elif '$gte' in check:
if m.group(int(field)) < conditionals[field]['$gte']:
ids.append(md.doc['_id'])
elif '$lte' in check:
if m.group(int(field)) > conditionals[field]['$lte']:
ids.append(md.doc['_id'])
elif '$regex' in check:
match = re.match(check['$regex'], field)
if match is None:
ids.append(md.doc['_id'])
else:
# that we're here IS the conditional ;)
ids.append(md.doc['_id'])
return {'ok': True, 'payload': {'pass': len(ids) == 0, 'ids': ids}}
@classmethod
def testDiskReadahead(cls, groupMdiag):
# rw 256 512 4096 0 8650752 /dev/sda
# NOTE can also use read_ahead_kb section
section = "blockdev"
match = "^\\S+\\s+(\\d+).*(\\/.*)"
conds = { "1": { "$lte": 64 } }
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testRedhatVersion57(cls, groupMdiag):
# NOTE can also use distro section
section = "/etc/system-release"
match = "Red Hat.+\\s(\\d+\\.\\d+)\\s"
conds = { "1": { "$gte": 5.7 } }
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testSuseVersion11(cls, groupMdiag):
# NOTE can also use distro section
section = "/etc/system-release"
match = "SUSE.+\\s(\\d+)\\s"
conds = { "1": { "$gte": 11 } }
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testDmesg(cls, groupMdiag):
section = "dmesg"
match = "error|fail|warn|blocked"
conds = None
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testVmwareBallooning(cls, groupMdiag):
# vmware_balloon 7199 0 - Live 0xffffffffa0016000
section = "procinfo"
match = "vmware_balloon.*Live"
conds = None
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testSwapInProcSwaps(cls, groupMdiag):
section = "proc/swaps"
match = "/dev"
conds = None
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testSwapInProcSwaps(cls, groupMdiag):
section = "proc/swaps"
match = "/dev"
conds = None
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
@classmethod
def testSwapInEtcFstab(cls, groupMdiag):
# NOTE can also check in section mount,fstab
section = "etc/fstab"
match = "\\sswap\\s"
conds = None
return cls._sectionMatchConditionals(groupMdiag, section, match, conds)
|
def conRebanadas(c, i, n):
n = n + i
return c[i:n]
def sinRebanadas(c, i, n):
n = n + i
nueva = ""
for i in range(i, n):
nueva += c[i]
return nueva
# PROGRAMA PRINCIPAL
c = "Oid mortales el grito sagrado"
i = int(input("Índice: "))
n = int(input("Cantidad de caracteres: "))
print(conRebanadas(c, i, n))
print(sinRebanadas(c, i, n)) |
n = int(input())
a = list(map(int, input().split()))
if a.count(0) > 0:
ans = 0
else:
ans = 1
for i in a:
ans *= i
if ans > (10 ** 18):
break
if not ans > (10 ** 18):
print(ans)
else:
print(-1) |
import numpy as np
import sqlalchemy as sq
import bs4
import requests as rq
import pandas as pd
import time as t
import datetime
import random
print('import successful')
nom=900291
while nom<999999:
r=random.randrange(1,2)
res=rq.get('https://bina.az/items/'+str(nom))
if str(res)!='<Response [200]>':
print('request error',str(res))
pass
else:
res=rq.get('https://bina.az/items/' +str(nom))
soup=bs4.BeautifulSoup(res.text,'lxml')
alert=soup.find_all('p',{'class':"flash"})
print(datetime.datetime.now(),nom)
try:
alert[0].text
except Exception as d:
error=str(d)
print(error)
price_full=""
id=0
while error.find("index out of range")!=-1 and id<1:
ad,phone,lables=[],[],[]
print("while isleyir")
name=soup.find('li',{'class':"name"})
tel=soup.find_all('a',{'class':"phone"})
price=soup.find_all('span',{'class':"price-val"})
price_cur=soup.find_all('span',{'class':"price-cur"})
error=""
try:
price_full= price[0].text+" "+price_cur[0].text
except Exception as l:
error=str(l)
if error.find("index out of range")!=-1:
pass
else:
pass
error=""
try:
phone.append(tel[0].text)
except Exception as s:
error=str(s)
if error.find("index out of range")!=-1:
phone.append(None)
pass
else:
#phone.append(tel[0].text)
pass
print(price_full,phone,nom)
table_df = pd.DataFrame({'Load_date': datetime.datetime.now(),'Amount':price_full,
'Phone':phone,'Elan_nom':nom})
engine=sq.create_engine('sqlite:///direction') #burada oz database inizin yerini göstərin
table_df.to_sql('table_data',con=engine,if_exists='append')
id=id+1
nom=nom+1
t.sleep(r)
from gc import collect
collect()
|
#In the 20x20,grid below, four numbers along a diagonal line have been marked in red.
from operator import mul
from functools import reduce
tab = [[ 8, 2,22,97,38,15, 0,40, 0,75, 4, 5, 7,78,52,12,50,77,91, 8],
[ 49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48, 4,56,62, 0],
[ 81,49,31,73,55,79,14,29,93,71,40,67,53,88,30, 3,49,13,36,65],
[ 52,70,95,23, 4,60,11,42,69,24,68,56, 1,32,56,71,37, 2,36,91],
[ 22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],
[ 24,47,32,60,99, 3,45, 2,44,75,33,53,78,36,84,20,35,17,12,50],
[ 32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],
[ 67,26,20,68, 2,62,12,20,95,63,94,39,63, 8,40,91,66,49,94,21],
[ 24,55,58, 5,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],
[ 21,36,23, 9,75, 0,76,44,20,45,35,14, 0,61,33,97,34,31,33,95],
[ 78,17,53,28,22,75,31,67,15,94, 3,80, 4,62,16,14, 9,53,56,92],
[ 16,39, 5,42,96,35,31,47,55,58,88,24, 0,17,54,24,36,29,85,57],
[ 86,56, 0,48,35,71,89, 7, 5,44,44,37,44,60,21,58,51,54,17,58],
[ 19,80,81,68, 5,94,47,69,28,73,92,13,86,52,17,77, 4,89,55,40],
[ 4,52, 8,83,97,35,99,16, 7,97,57,32,16,26,26,79,33,27,98,66],
[ 88,36,68,87,57,62,20,72, 3,46,33,67,46,55,12,32,63,93,53,69],
[ 4,42,16,73,38,25,39,11,24,94,72,18, 8,46,29,32,40,62,76,36],
[ 20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74, 4,36,16],
[ 20,73,35,29,78,31,90, 1,74,31,49,71,48,86,81,16,23,57, 5,54],
[ 1,70,54,71,83,51,54,69,16,92,33,48,61,43,52, 1,89,19,67,48]]
#The product of these numbers is 26× 63× 78× 14= 1788696,
#What is the greatest product of four adjacent numbers in any direction (up, down, left, right, or diagonally) in the 20,?0,grid?
def findGreatestProd(table,howMany):
i=0
j=0
length = len(table)
maxProd =0
ret = ()
maxCoordAndOrientation=()
tempTab=[]
#check R, RD, D, LD
while i < length:
j=0
while j < length:
if j + howMany <= length : #R
k=0
tempTab=[]
while k < howMany:
tempTab.append(table[i][j+k])
k+=1
newProd = reduce(mul,tempTab)
if newProd > maxProd:
maxProd = newProd
ret = tempTab
maxCoordAndOrientation = (i+1,j+1,'R')
if i + howMany <= length : # RD
k=0
tempTab=[]
while k < howMany:
tempTab.append(table[i+k][j+k])
k+=1
newProd = reduce(mul,tempTab)
if newProd > maxProd:
maxProd = newProd
ret = tempTab
maxCoordAndOrientation = (i+1,j+1,'RD')
if i + howMany <= length : #D
k=0
tempTab=[]
while k < howMany:
tempTab.append(table[i+k][j])
k+=1
newProd = reduce(mul,tempTab)
if newProd > maxProd:
maxProd = newProd
ret = tempTab
maxCoordAndOrientation = (i+1,j+1,'D')
if j + 1 - howMany >= 0 : #LD
k=0
tempTab=[]
while k < howMany:
tempTab.append(table[i+k][j-k])
k+=1
newProd = reduce(mul,tempTab)
if newProd > maxProd:
maxProd = newProd
ret = tempTab
maxCoordAndOrientation = (i+1,j+1,'LD')
j+=1
i+=1
return (maxCoordAndOrientation,ret, maxProd)
|
import cv2
import time
import os
import HandTrackingModule as htm
#webcam setting
cap = cv2.VideoCapture(2)
hcam, wcam = 480, 640
cap.set(3, wcam)
cap.set(4, hcam)
#fps parameters
curTime = 0
prevTime = 0
#finger Images
folderPath = "Finger Images"
myList = os.listdir(folderPath)
myList.sort()
imgList = []
for imPath in myList:
image = cv2.imread(f'{folderPath}/{imPath}')
imgList.append(image)
tipIds = [4, 8, 12, 16, 20]
detector = htm.handDetector(detectionConf = 0.7)
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img, draw = False)
if len(lmList) != 0:
fingers = []
#special case for thumb
#Right hand or inverted left hand
if lmList[ tipIds[4] ][1] < lmList[ tipIds[0] ][1]:
if lmList[ tipIds[0] ][1] > lmList[ tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Right hand or inverted left hand
else:
if lmList[ tipIds[0] ][1] < lmList[ tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
#cases for remaining 4 fingers
for id in range(1, 5):
if lmList[ tipIds[id] ][2] < lmList[ tipIds[id] - 2 ][2]:
fingers.append(1)
else:
fingers.append(0)
totalFingers = fingers.count(1)
h, w, c = imgList[totalFingers].shape
img[0:h, 0:w] = imgList[totalFingers]
cv2.rectangle(img, (0, 300), (100, 450), (0, 255, 0), cv2.FILLED)
cv2.putText(img, f'{str(int(totalFingers))}', (0, 425), cv2.FONT_HERSHEY_PLAIN, 10,(0, 0, 255), 5)
#frame per second
curTime = time.time()
fps = 1 / (curTime - prevTime)
prevTime = curTime
cv2.putText(img, str(int(fps)), (550, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
cv2.imshow("WebCam", img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
from google.appengine.ext import vendor
vendor.add('certifi')
vendor.add('chardet')
vendor.add('urllib3')
vendor.add('requests')
vendor.add('requests-toolbelt')
vendor.add('prawcore')
vendor.add('praw')
vendor.add('dateutil')
def webapp_add_wsgi_middleware(app):
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
appstats_CALC_RPC_COSTS = True
return app |
class HeaderMixin:
header_path = ''
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['header_path'] = self.header_path
return context
class SecondHeaderMixin(HeaderMixin):
url_name = None
menu_title = ''
def get_menu_queryset(self):
pass
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['url_name'] = self.url_name
context['menu_title'] = self.menu_title
context['menu_courses'] = self.get_menu_queryset() or self.get_queryset()
return context
|
# Generated by Django 3.1.4 on 2021-02-05 14:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='timestamp',
new_name='created',
),
migrations.AlterField(
model_name='author',
name='profile_picture',
field=models.ImageField(upload_to='profile_images/'),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.author'),
),
]
|
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Tue May 26 09:15:05 2009
import wx, __builtin__
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class AboutDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: AboutDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.bitmap_1 = wx.StaticBitmap(self, -1, wx.Bitmap(__builtin__.application_path + "/res/ab.jpg", wx.BITMAP_TYPE_ANY))
self.htmlw_about = wx.html.HtmlWindow(self, -1, style=wx.SIMPLE_BORDER)
self.__set_properties()
self.__do_layout()
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyDown)
self.htmlw_about.SetFocus()
self.Center()
def __set_properties(self):
# begin wxGlade: AboutDialog.__set_properties
self.SetTitle(_("Sobre"))
self.SetSize((779, 260))
self.bitmap_1.SetMinSize((380, 253))
self.htmlw_about.SetBackgroundColour(wx.Colour(192, 192, 192))
self.htmlw_about.SetScrollRate(10, 10)
# end wxGlade
self.htmlw_about.SetPage(u'''
<p>
<font color='blue'><strong>E-Dictor</strong></font><br>
<small>v%s</small>
</p>
<p>
E-Dictor is a tool for transcription and coding of text corpora in XML format, so it can be edited and used in many ways (as for
linguistic analises - morphology, syntax, etc.). The XML tag set defined was meant to incorporate edition and POS information as well
as text layout information (titles, subtitles, page/line/column breaks, header, footer, etc.).<br>
<br>
Home page: <a href='https://oncoto.dyndns.org:44883/projects/edictor/' target='_blank'>https://oncoto.dyndns.org:44883/projects/edictor/</a>
</p>
<p>
<b>Authors:</b><br><br>
Pablo Faria <pablofaria@gmail.com><br>
Fábio Kepler <fabio.kepler@gmail.com><br>
Maria C. Paixão e Souza <mariaclara.ps@gmail.com><br>
</p>
<p>
This software is freely available under the terms of <b>MIT</b> public licence:<br>
<br>
<font color='gray'>
Copyright © 2009 Pablo Faria<br>
<br>
Portions copyright Fábio Kepler e Maria C. Paixão e Souza.<br>
<br>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:<br>
<br>
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.<br>
<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
</font>
</p>
'''%(__builtin__.version))
def __do_layout(self):
# begin wxGlade: AboutDialog.__do_layout
sizer_60 = wx.BoxSizer(wx.HORIZONTAL)
sizer_60.Add(self.bitmap_1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 2)
sizer_60.Add(self.htmlw_about, 1, wx.ALL|wx.EXPAND, 2)
self.SetSizer(sizer_60)
self.Layout()
# end wxGlade
def OnClose(self, event):
"""
Handles the user clicking the window/dialog "close" button/icon.
"""
self.EndModal(wx.ID_CLOSE)
def OnKeyDown(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_ESCAPE:
self.Close(True)
event.Skip()
# end of class AboutDialog
|
from os.path import dirname, join
from pathlib import Path
from tokenizers import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
model_name = 'dabert'
file_dir = dirname(__file__)
data_folder = join(file_dir, 'data')
data_paths = [str(x) for x in Path(data_folder).glob("**/*.txt")]
output_folder = join(file_dir, f'models/{model_name}/')
vocab_file = join(output_folder, f'vocab.json')
merges_file = join(output_folder, f'merges.txt')
if __name__ == "__main__":
tokenizer = ByteLevelBPETokenizer()
tokenizer.train(
files=data_paths,
vocab_size=52000,
min_frequency=2,
special_tokens = [
'<s>',
'<pad>',
'</s>',
'<unk>',
'<mask>',
]
)
tokenizer.save_model(output_folder, model_name)
tokenizer = ByteLevelBPETokenizer(vocab_file, merges_file)
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
) |
from argparse import ArgumentParser, ArgumentTypeError
from locale import getdefaultlocale
from multiprocessing import Pool
from contextlib import redirect_stdout
from io import StringIO
from zdict import constants, utils, easter_eggs
from zdict.api import dump
from zdict.completer import DictCompleter
from zdict.loader import get_dictionary_map
from zdict.utils import readline, check_zdict_dir_and_db
def user_set_encoding_and_is_utf8():
# Check user's encoding settings
try:
(lang, enc) = getdefaultlocale()
except ValueError:
print("Didn't detect your LC_ALL environment variable.")
print("Please export LC_ALL with some UTF-8 encoding.")
return False
else:
if enc != "UTF-8":
print("zdict only works with encoding=UTF-8, ")
print("but your encoding is: {} {}".format(lang, enc))
print("Please export LC_ALL with some UTF-8 encoding.")
return False
return True
def get_args():
# parse args
parser = ArgumentParser(prog='zdict')
parser.add_argument(
'words',
metavar='word',
type=str,
nargs='*',
help='Words for searching its translation'
)
parser.add_argument(
"-v", "--version",
action="version",
version='%(prog)s-' + constants.VERSION
)
parser.add_argument(
"-d", "--disable-db-cache",
default=False,
action="store_true",
help="Temporarily not using the result from db cache.\
(still save the result into db)"
)
parser.add_argument(
"-t", "--query-timeout",
type=float,
default=5.0,
action="store",
help="Set timeout for every query. default is 5 seconds."
)
def positive_int_only(value):
ivalue = int(value)
if ivalue <= 0:
raise ArgumentTypeError(
"%s is an invalid positive int value" % value
)
return ivalue
parser.add_argument(
"-j", "--jobs",
type=positive_int_only,
nargs="?",
default=0, # 0: not using, None: auto, N (1, 2, ...): N jobs
action="store",
help="""
Allow N jobs at once.
Do not pass any argument to use the number of CPUs in the system.
"""
)
parser.add_argument(
"-sp", "--show-provider",
default=False,
action="store_true",
help="Show the dictionary provider of the queried word"
)
parser.add_argument(
"-su", "--show-url",
default=False,
action="store_true",
help="Show the url of the queried word"
)
available_dictionaries = list(dictionary_map.keys())
available_dictionaries.append('all')
parser.add_argument(
"-dt", "--dict",
default="yahoo",
action="store",
choices=available_dictionaries,
metavar=','.join(available_dictionaries),
help="""
Must be seperated by comma and no spaces after each comma.
Choose the dictionary you want. (default: yahoo)
Use 'all' for qureying all dictionaries.
If 'all' or more than 1 dictionaries been chosen,
--show-provider will be set to True in order to
provide more understandable output.
"""
)
parser.add_argument(
"-ld", "--list-dicts",
default=False,
action="store_true",
help="Show currently supported dictionaries."
)
parser.add_argument(
"-V", "--verbose",
default=False,
action="store_true",
help="Show more information for the queried word.\
(If the chosen dictionary have implemented verbose related functions)"
)
parser.add_argument(
"-c", "--force-color",
default=False,
action="store_true",
help="Force color printing (zdict automatically disable color printing \
when output is not a tty, use this option to force color printing)"
)
parser.add_argument(
'--dump', dest='pattern',
nargs='?',
default=None, const=r'^.*$',
help='Dump the querying history, can be filtered with regex'
)
parser.add_argument(
"-D", "--debug",
default=False,
action="store_true",
help="Print raw html prettified by BeautifulSoup for debugging."
)
return parser.parse_args()
def set_args(args):
if args.force_color:
utils.Color.set_force_color()
args.dict = args.dict.split(',')
if 'all' in args.dict:
args.dict = tuple(dictionary_map.keys())
else:
# Uniq and Filter the dict not in supported dictionary list then sort.
args.dict = sorted(set(d for d in args.dict if d in dictionary_map))
if len(args.dict) > 1:
args.show_provider = True
return args
def lookup_string_wrapper(dict_class, word, args):
import sys
if args.force_color:
utils.Color.set_force_color()
else:
utils.Color.set_force_color(sys.stdout.isatty())
dictionary = dict_class(args)
f = StringIO()
with redirect_stdout(f):
dictionary.lookup(word)
return f.getvalue()
def init_worker():
# When -j been used, make subprocesses ignore KeyboardInterrupt
# for not showing KeyboardInterrupt traceback error message.
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def normal_mode(args):
if args.jobs == 0:
# user didn't use `-j`
for word in args.words:
for d in args.dict:
zdict = dictionary_map[d](args)
zdict.lookup(word)
else:
# user did use `-j`
# If processes is None, os.cpu_count() is used.
pool = Pool(args.jobs, init_worker)
for word in args.words:
futures = [
pool.apply_async(lookup_string_wrapper,
(dictionary_map[d], word, args))
for d in args.dict
]
results = [i.get() for i in futures]
print(''.join(results))
easter_eggs.lookup_pyjokes(word)
class MetaInteractivePrompt():
def __init__(self, args):
self.args = args
self.dicts = tuple(
dictionary_map[d](self.args) for d in self.args.dict
)
self.dict_classes = tuple(dictionary_map[d] for d in self.args.dict)
if self.args.jobs == 0:
# user didn't use `-j`
self.pool = None
else:
# user did use `-j`
# If processes is None, os.cpu_count() is used.
self.pool = Pool(self.args.jobs, init_worker)
def __del__(self):
del self.dicts
def prompt(self):
user_input = input('[zDict]: ').strip()
if user_input:
if self.pool:
futures = [
self.pool.apply_async(lookup_string_wrapper,
(d, user_input, self.args))
for d in self.dict_classes
]
results = [i.get() for i in futures]
print(''.join(results))
else:
for dictionary_instance in self.dicts:
dictionary_instance.lookup(user_input)
else:
return
def loop_prompt(self):
while True:
self.prompt()
def interactive_mode(args):
# configure readline and completer
readline.parse_and_bind("tab: complete")
readline.set_completer(DictCompleter().complete)
zdict = MetaInteractivePrompt(args)
zdict.loop_prompt()
def execute_zdict(args):
if args.list_dicts:
for provider in sorted(
dictionary_map,
key=lambda x: {'yahoo': 0, 'pyjokes': 2}.get(x, 1)
):
print(
'{}: {}'.format(
provider,
dictionary_map[provider](args).title
)
)
exit()
if args.pattern:
for word in dump(pattern=args.pattern):
print(word)
exit()
try:
if args.words:
normal_mode(args)
else:
interactive_mode(args)
except (KeyboardInterrupt, EOFError):
print()
return
def main():
if user_set_encoding_and_is_utf8():
check_zdict_dir_and_db()
global dictionary_map
dictionary_map = get_dictionary_map()
args = get_args()
args = set_args(args)
execute_zdict(args)
else:
exit() |
import ROOT
from ROOT import TFile, TTree
from ROOT import TCanvas, TGraph
from ROOT import gROOT
import numpy as np
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import matplotlib.colorbar as cbar
from scipy.optimize import curve_fit
from array import array
import glob, os, sys
sys.path.insert(0, "/home/christoph/Documents/MasterThesis/Analyse/Utlities/")
import python_utilities as util
save_output = 0
mapping = ("A1", "A2", "A3", "A4", "B1", "B2", "B3", "B4", "C1", "C2", "C3", "C4", "D1", "D2", "D3", "D4")
dir = "/home/christoph/Documents/MasterThesis/Data/Absorption/"
measurements = ["Al/Calibration_20190529", "Al/Calibration_20190603", "Al/Calibration_20190604_0", "Al/Calibration_20190604_1", "Al/Calibration_20190611", "Cu/Calibration_20190701",
"Fe/Calibration_20190624", "H2O/Calibration_20190617"]
allNeutronRates=[]
allNeutronRatesErr=[]
allGammaRates=[]
allGammaRatesErr=[]
for measurement in measurements:
thisfile = dir + measurement + "/Results/" + measurement.split("/")[1] + "_Results.root";
print "reading:", thisfile
file1 = ROOT.TFile.Open(thisfile)
neutronRates = (np.array(file1.Get("neutronRates")))
neutronRates_err = np.array( file1.Get("neutronRates_err"))
gammaRates = (np.array(file1.Get("gammaRates")))
gammaRates_err = np.array( file1.Get("gammaRates_err"))
# print "abs"
# neutronRates_abs = np.array(file1.Get("neutronRates_abs"))
# neutronRates_abs_err = np.array(file1.Get("neutronRates_abs_err"))
# print neutronRates_abs_err[0] / neutronRates_abs[0] * 100.
# print "calib"
# neutronRates_calib = np.array(file1.Get("neutronRates_calib"))
# neutronRates_calib_err = np.array(file1.Get("neutronRates_calib_err"))
# print neutronRates_calib_err[0] / neutronRates_calib[0] * 100.
# print "total: ", np.sqrt( (neutronRates_abs_err[0] / neutronRates_abs[0])**2 + (neutronRates_calib_err[0] / neutronRates_calib[0])**2 ) * 100.
allNeutronRates.append(neutronRates)
allNeutronRatesErr.append(neutronRates_err)
allGammaRates.append(gammaRates)
allGammaRatesErr.append(gammaRates_err)
# allNeutronRates /= np.mean(allNeutronRates[0])
# allNeutronRatesErr /= np.mean(allNeutronRates[0])
# allGammaRates /= np.mean(allGammaRates[0])
# allGammaRatesErr /= np.mean(allGammaRates[0])
f1 = plt.figure("f1")
axN = f1.add_subplot(211)
axG = f1.add_subplot(212)
axN.set_title("Neutrons")
axG.set_title("Gammas")
axG.set_xlabel("Pixel")
axN.set_ylabel("Relative rate")
axG.set_ylabel("Relative rate")
axN.set_xticks([])
axG.set_xticks(range(16))
for i in range(len(measurements)):
axN.errorbar(range(16), allNeutronRates[i], yerr=allNeutronRatesErr[i], label=measurements[i], marker="o", markersize=5, linestyle="--", linewidth=0.5)
axG.errorbar(range(16), allGammaRates[i], yerr=allGammaRatesErr[i], label=measurements[i], marker="o", markersize=5, linestyle="--", linewidth=0.5)
# axN.legend(loc='upper center', bbox_to_anchor=(0.5, -1.4), fancybox=True, shadow=True, ncol=4)
# f1.tight_layout()
axG.legend()
# print np.mean(allNeutronRates, axis=1)
# f2 = plt.figure("f2")
# axN_mean = f2.add_subplot(111)
# # axN_mean = f2.add_subplot(211)
# # axG_mean = f2.add_subplot(212)
# x = np.arange(1,len(measurements)+1)
# # axN_mean.errorbar(x, allNeutronRates[0], yerr=allNeutronRatesErr[0], marker="o", ls="", label="Neutrons")
# axN_mean.errorbar(x, np.mean(allNeutronRates, axis=1), yerr=np.std(allNeutronRates, axis=1), marker="o", ls="", label="Neutrons")
# axN_mean.errorbar(x, np.mean(allGammaRates, axis=1), yerr=np.std(allGammaRates, axis=1), marker="o", ls="", label="Gammas")
#
# if not fitWithOffset:
# poptN, pcovN = curve_fit(exponential, x, np.mean(allNeutronRates, axis=1), sigma=np.std(allNeutronRates, axis=1), p0=[0.9,8.])
# poptG, pcovG = curve_fit(exponential, x, np.mean(allGammaRates, axis=1), sigma=np.std(allGammaRates, axis=1), p0=[0.9,8.])
# axN_mean.plot(x, exponential(x, *poptN), 'r-', label="Neutrons:\nA=%5.1f\n$x_{1/2}$=%5.1f cm" % tuple([poptN[0], poptN[1]]))
# axN_mean.plot(x, exponential(x, *poptG), 'r-', label="Gammas:\nA=%5.1f\n$x_{1/2}$=%5.1f cm" % tuple([poptG[0], poptG[1]]))
# print chisq(x, np.mean(allNeutronRates, axis=1), np.std(allNeutronRates, axis=1), exponential, poptN)
# print chisq(x, np.mean(allGammaRates, axis=1), np.std(allGammaRates, axis=1), exponential, poptG)
# axN_mean.text(6, 0.7, r"$f(x)=A\cdot \exp(-x/x_0)$")
#
# if fitWithOffset:
# poptN, pcovN = curve_fit(exponential_offset, x, np.mean(allNeutronRates, axis=1), sigma=np.std(allNeutronRates, axis=1), p0=[0.9,8.,0.])
# poptG, pcovG = curve_fit(exponential_offset, x, np.mean(allGammaRates, axis=1), sigma=np.std(allGammaRates, axis=1), p0=[0.9,8.,0.])
# axN_mean.plot(x, exponential_offset(x, *poptN), 'r-', label="Neutrons:\nA=%5.1f\n$x_{1/2}$=%5.1f cm\nA_0=%5.1f" % tuple([poptN[0], poptN[1]*np.log(2.), poptN[2]]))
# axN_mean.plot(x, exponential_offset(x, *poptG), 'r-', label="Gammas:\nA=%5.1f\n$x_{1/2}$=%5.1f cm\nA_0=%5.1f" % tuple([poptG[0], poptG[1]*np.log(2.), poptG[2]]))
# print "chi square n", chisq(x, np.mean(allNeutronRates, axis=1), np.std(allNeutronRates, axis=1), exponential_offset, poptN)
# print "chi square g", chisq(x, np.mean(allGammaRates, axis=1), np.std(allGammaRates, axis=1), exponential_offset, poptG)
# axN_mean.text(6, 0.7, r"$f(x)=A\cdot \exp(-x/x_0) + A_0$")
#
#
#
# axN_mean.set_title("")
# axN_mean.set_xlabel("Thickness / cm")
# # axN_mean.set_ylabel("Decrease in rate / %")
# axN_mean.set_ylabel("Relative rate")
# axN_mean.set_xticks(range(1,len(measurements)+1))
# axN_mean.legend()
# # axG_mean.set_title("Neutrons")
plt.show()
|
from typing import List
import functools
def jump(nums: List[int]) -> int:
'''贪心算法,每次找当前能跳到位置中能跳最远的,作为下一个落脚点,当到达边界时,更新边界为最远落脚点,并且steps+1'''
maxPos, end, step = 0, 0, 0
for i in range(len(nums)-1):
maxPos = max(maxPos, i+nums[i]) # 更新最大值
if i == end: # 该跳了
end = maxPos
step += 1
return step
def jump2(nums: List[int]) -> int:
'''DP'''
if len(nums) == 1: return 0
@functools.lru_cache(None)
def dp(i):
if i == 0: return 0
return min(dp(j) + 1 for j in range(0, i) if nums[j] + j >= i)
return dp(len(nums)-1)
if __name__ == '__main__':
nums = [1] * 500
print(jump(nums) == jump2(nums)) |
import hashlib
import hmac
import json
import requests
from flask import jsonify
import time
with open("slack_config.json", "r") as f:
data = f.read()
config = json.loads(data)
#VERIFY SLACK WEBHOOK
def verify_signature(request):
timestamp = request.headers.get("X-Slack-Request-Timestamp", "")
signature = request.headers.get("X-Slack-Signature", "")
req = str.encode("v0:{}:".format(timestamp)) + request.get_data()
request_digest = hmac.new(
str.encode(config["SLACK_SECRET"]),
req, hashlib.sha256
).hexdigest()
request_hash = "v0={}".format(request_digest)
if not hmac.compare_digest(request_hash, signature):
raise ValueError("Invalid request/credentials.")
#BLOCK OPTIONS FOR STATE
def state_options():
meta=requests.get('https://covidwire.firebaseio.com/meta.json').json()
options=[]
for i in meta:
if not meta[i]["type"]=="Empty":
options.append({
"text": {
"type": "plain_text",
"text": meta[i]['name'],
},
"value": meta[i]['name']
})
return options
#BLOCK OPTIONS FOR GENERAL
def general_options():
opt=["Global","National"]
options=[]
for i in opt:
options.append({
"text": {
"type": "plain_text",
"text": i,
},
"value": i
})
return options
#LISTEN TO SLASH COMMANDS
def slash_commands(request):
if request.method != "POST":
return "Only POST requests are accepted", 405
verify_signature(request)
update_type=request.form['command']
if update_type=="/update_state":
options=state_options()
prompt="Select the state you want to update"
placeholder="Select state"
if update_type=='/update_general':
options=general_options();
prompt="Select the region you want to update"
placeholder="Select region"
response={
"text":prompt,
"blocks": [{
"type": "section",
"text": {
"type": "mrkdwn",
"text": prompt
},
"accessory": {
"type": "static_select",
"placeholder": {
"type": "plain_text",
"text": placeholder,
},
"options": options
}
}]
}
return jsonify(response)
|
from sdk.color_print import c_print
from tqdm import tqdm
def update_login_ips(session, ips, dst_ips, logger):
updated = 0
if ips:
logger.info(f'Updating Login IPs for tenant: \'{session.tenant}\'')
for ip in tqdm(ips, desc='Updating Login IPs', leave=False):
name = ip.get('name')
#Translate ID
l_id = ''
if name in [i.get('name') for i in dst_ips]:
l_id = [i.get('id') for i in dst_ips if i.get('name') == name][0]
ip.pop('id')
ip.pop('lastModifiedTs')
logger.debug('API - Update login allow IP')
session.request('PUT', f'/ip_allow_list_login/{l_id}', json=ip)
updated += 1
else:
logger.debug(f'No Login IPs to update for tenant: \'{session.tenant}\'')
return updated
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from logging.handlers import RotatingFileHandler
# création de l'objet logger qui va nous servir à écrire dans les logs
logger = logging.getLogger()
# on met le niveau du logger à DEBUG, comme ça il écrit tout
logger.setLevel(logging.DEBUG)
# création d'un formateur qui va ajouter le temps, le niveau
# de chaque message quand on écrira un message dans le log
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
# création d'un handler qui va rediriger une écriture du log vers
# un fichier en mode 'append', avec 1 backup et une taille max de 1Mo
file_handler = RotatingFileHandler('activity.log', 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# création d'un second handler qui va rediriger chaque écriture de log
# sur la console
steam_handler = logging.StreamHandler()
steam_handler.setLevel(logging.DEBUG)
logger.addHandler(steam_handler)
# Après 3 heures, on peut enfin logguer
# Il est temps de spammer votre code avec des logs partout :
logger.info('Hello')
logger.warning('Testing %s', 'foo')
# Et derrière on peut filtrer le log facilement
from datetime import datetime
lines = (ligne.split(' :: ') for ligne in open('activity.log'))
errors = ((date, mes) for date, lvl, mes in lines if lvl in ('WARNING', 'CRITICAL'))
before, after = datetime(2013, 1, 12), datetime(2013, 3, 24)
parse = lambda d: datetime.strptime(d, '%Y-%m-%d %H:%M:%S,%f')
dated_line = ((date, mes) for date, mes in errors if before <= parse(date) <= after)
for date, message in dated_line:
print date, message
# Bon, la c'est sur, vous allez avoir juste une entrée... |
from shorty.common.exceptions.shorty_exception import ShortyException
HTTP_STATUS = 422
class ValidationException(ShortyException):
def __init__(self, code: str, detail: str):
super().__init__(HTTP_STATUS, code, detail)
|
import numpy as np
from math import pi
import math
class SwingPendulum(object):
min_pos = -pi
max_pos = pi
umax = 2.0
mass = 1.0
length = 1.0
G = 9.8
timestep = 0.01
required_up_time = 10.0
up_range = pi/4.0
max_speed = (pi/16.0)/timestep
pos_start = pi/2.0
vel_start = 0.0
damping = 0.2
state_range =[ np.array([min_pos, -max_speed]),
np.array([max_pos, max_speed])]
action_range = [[-umax], [umax]]
__discrete_actions = [np.array([-umax]),
np.array([0]),
np.array([umax])]
def __init__(self,
random_start = False,
max_episode_length = 10000,
required_up_time=10.0,
**argk):
self.state= np.zeros(2)
self.random_start = random_start
self.required_up_time = required_up_time
self.max_episode_length = max_episode_length
self.reset()
def step(self, action):
self.update(action)
if self.inGoal() or self.count>self.max_episode_length:
next_state = None
else:
next_state = self.state.copy()
self.count += 1
return np.cos(self.state[0]), next_state
def reset(self):
if self.random_start:
self.state[:] = [np.random.uniform(self.state_range[0][0], self.state_range[1][0]),
0]
else:
self.state[:] = [self.pos_start, self.vel_start]
self.uptime = 0
self.count = 0
return np.cos(self.state[0]), self.state.copy()
def update(self, action):
torque = np.clip(action, *self.action_range)
theta_acc = self.timestep *( -self.state[1]*self.damping
+ self.mass * self.G * self.length * math.sin(self.state[0])
+ torque)
self.state[1] = np.clip(self.state[1] + theta_acc, self.state_range[0][1], self.state_range[1][1])
self.state[0] += self.state[1] * self.timestep
self.adjustTheta()
self.uptime = 0 if np.abs(self.state[0]) > self.up_range else self.uptime + self.timestep
def adjustTheta(self):
if self.state[0] >= pi:
self.state[0] -= 2*pi
if self.state[0] < -pi:
self.state[0] += 2*pi
def inGoal(self):
return self.uptime >= self.required_up_time
def copy(self):
newpendulum = SwingPendulum(random_start = self.random_start)
newpendulum.state[:] = self.state
return newpendulum
@property
def discrete_actions(self):
return self.__discrete_actions
@property
def state_dim(self):
return len(self.state_range[0])
@property
def action_dim(self):
return len(self.action_range[0])
|
import time
def find(start, end):
if start > end:
countdownfrom(start, end)
elif end > start:
countupfrom(start, end)
def countdownfrom(start, end):
while start >= end:
print start
start -= 1
def countupfrom(start, end):
while start <= end:
print start
start += 1
def main():
start = float(raw_input("Insert the starting number: "))
end = float(raw_input("Insert the ending number: "))
find(start, end)
main()
|
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import cv2
import pandas as pd
import paho.mqtt.client as mqtt
def messageFunction (client, userdata, message):
topic = str(message.topic)
message = str(message.payload.decode("utf-8"))
print(topic+","+message)
def on_connect(client, userdata, flags,rc):
if rc == 0:
print("Connect success")
else:
print(f"Connected fail with code {rc}")
client = mqtt.Client()
client.on_connect = on_connect
broker = "192.168.0.107"
client.connect(broker)
#Initialize 'currentname' to trigger only when a new person is identified.
currentname = "unknown"
#Determine faces from encodings.pickle file model created from train_model.py
encodingsP = "encodings.pickle"
#use this xml file
#https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
cascade = "haarcascade_frontalface_default.xml"
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(encodingsP, "rb").read())
detector = cv2.CascadeClassifier(cascade)
#Read csv file containing users information
user_data = pd.read_csv('name_data.csv')
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
#vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# start the FPS counter
fps = FPS().start()
#Start the MQTT and open camer
client.loop_start()
while True:
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
frame = imutils.resize(frame, width=500)
#Convert to gray scale for face detection, and backward to RGB for face recognition
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#Set the bounding box to color green for normal individuals
color =(0,255,0)
# detect faces in grayscale
rects = detector.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
# bounding box coordinates in (x, y, w, h) order for each face detected
boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
matches = face_recognition.compare_faces(data["encodings"],encoding)
name = "Unknown" #if face is not recognized, then print Unknown
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
#If someone in your dataset is identified, print their name on the screen
if currentname != name:
currentname = name
for i, x in user_data.iterrows():
if currentname == x["Name"]:
if x["Past"] == "Murder":
print("The criminal has found which is {}".format(currentname))
client.publish("alert", "criminal")
client.subscribe("button")
client.on_message = messageFunction
print(currentname)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
if name == "Lucy":
color = (0,0,255)
# draw the predicted face name on the image - color is in BGR
cv2.rectangle(frame, (left, top), (right, bottom),color, 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,.8, (255, 0, 0), 2)
# display the image to our screen
cv2.imshow("Facial Recognition is Running", frame)
key = cv2.waitKey(1) & 0xFF
# quit when 'q' key is pressed
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information and MQTT
fps.stop()
client.loop_stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() |
from django.core import serializers
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.views.generic import View, CreateView, UpdateView, DeleteView
from django.urls import reverse, reverse_lazy
from ..models import Driver
from ..form import DriverForm
from django.contrib import messages
# Create your views here.
class DriversView(View):
def get(self,request):
drivers=Driver.objects.all().order_by('-id')
contex = {'drivers':drivers}
return render(request, 'driver/index.html',contex)
class AddDriverView(CreateView):
model = Driver
form_class = DriverForm
template_name = 'driver/create.html'
success_url = reverse_lazy('drivers')
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
instance = form.save()
ser_instance = serializers.serialize('json', [instance, ])
return JsonResponse({"instance": ser_instance}, status=200)
else:
return JsonResponse({"error": form.errors}, json_dumps_params={'indent': 2})
class EditDriverView(UpdateView):
model = Driver
template_name = 'driver/update.html'
context_object_name = 'driver'
fields = ('name', 'phone', 'driving_licence', 'driving_licence_validity', 'age', 'gender', 'address')
success_url = reverse_lazy('drivers')
class DriverUpdateView(View):
form_class = DriverForm
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
rooms = Driver.objects.get(pk=request.POST.get('driver_id'))
rooms.name = request.POST.get('name')
rooms.phone = request.POST.get('phone', )
rooms.driving_licence = request.POST.get('driving_licence', )
rooms.driving_licence_validity = request.POST.get('driving_licence_validity', )
rooms.age = request.POST.get('age', )
rooms.gender = request.POST.get('gender', )
rooms.address = request.POST.get('address', )
rooms.save()
return JsonResponse({"instance": 'messages'}, status=200)
else:
return JsonResponse({"error": form.errors}, json_dumps_params={'indent': 2})
class DeleteDriverView(DeleteView):
def get(self, request):
id1 = request.GET.get('id', None)
Driver.objects.get(id=id1).delete()
data = {
'deleted': True
}
return JsonResponse(data)
|
#!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""@TEST_NAME@
"""
import sys
import unittest
import os
import glob
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from integration.test import geopm_context
import geopmpy.io
import geopmpy.error
from integration.test import util
if util.do_launch():
# Note: this import may be moved outside of do_launch if needed to run
# commands on compute nodes such as geopm_test_launcher.geopmread
from integration.test import geopm_test_launcher
geopmpy.error.exc_clear()
class AppConf(object):
"""Class that is used by the test launcher in place of a
geopmpy.io.BenchConf when running the @test_name@ benchmark.
"""
def write(self):
"""Called by the test launcher prior to executing the test application
to write any files required by the application.
"""
pass
def get_exec_path(self):
"""Path to benchmark filled in by template automatically.
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(script_dir, '.libs', 'test_@test_name@')
def get_exec_args(self):
"""Returns a list of strings representing the command line arguments
to pass to the test-application for the next run. This is
especially useful for tests that execute the test-application
multiple times.
"""
return []
class TestIntegration_@test_name@(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Create launcher, execute benchmark and set up class variables.
"""
sys.stdout.write('(' + os.path.basename(__file__).split('.')[0] +
'.' + cls.__name__ + ') ...')
test_name = '@test_name@'
cls._report_path = 'test_{}.report'.format(test_name)
cls._trace_path = 'test_{}.trace'.format(test_name)
cls._image_path = 'test_{}.png'.format(test_name)
cls._skip_launch = not util.do_launch()
cls._agent_conf_path = 'test_' + test_name + '-agent-config.json'
# Clear out exception record for python 2 support
geopmpy.error.exc_clear()
if not cls._skip_launch:
# Set the job size parameters
num_node = 1
num_rank = 1
time_limit = 6000
# Configure the test application
app_conf = AppConf()
# Configure the agent
# Query for the min and sticker frequency and run the
# energy efficient agent over this range.
freq_min = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
freq_sticker = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
agent_conf_dict = {'FREQ_MIN': freq_min,
'FREQ_MAX': freq_sticker}
agent_conf = geopmpy.agent.AgentConf(cls._agent_conf_path,
'energy_efficient',
agent_conf_dict)
# Create the test launcher with the above configuration
launcher = geopm_test_launcher.TestLauncher(app_conf,
agent_conf,
cls._report_path,
cls._trace_path,
time_limit=time_limit)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
# Run the test application
launcher.run('test_' + test_name)
def tearDown(self):
if sys.exc_info() != (None, None, None):
TestIntegration_@test_name@._keep_files = True
def test_load_report(self):
"""Test that the report can be loaded
"""
report = geopmpy.io.RawReport(self._report_path)
if __name__ == '__main__':
# Call do_launch to clear non-pyunit command line option
util.do_launch()
unittest.main()
|
# Ported from a Java benchmark whose history is :
# This is adapted from a benchmark written by John Ellis and Pete Kovac
# of Post Communications.
# It was modified by Hans Boehm of Silicon Graphics.
#
# This is no substitute for real applications. No actual application
# is likely to behave in exactly this way. However, this benchmark was
# designed to be more representative of real applications than other
# Java GC benchmarks of which we are aware.
# It attempts to model those properties of allocation requests that
# are important to current GC techniques.
# It is designed to be used either to obtain a single overall performance
# number, or to give a more detailed estimate of how collector
# performance varies with object lifetimes. It prints the time
# required to allocate and collect balanced binary trees of various
# sizes. Smaller trees result in shorter object lifetimes. Each cycle
# allocates roughly the same amount of memory.
# Two data structures are kept around during the entire process, so
# that the measured performance is representative of applications
# that maintain some live in-memory data. One of these is a tree
# containing many pointers. The other is a large array containing
# double precision floating point numbers. Both should be of comparable
# size.
#
# The results are only really meaningful together with a specification
# of how much memory was used. It is possible to trade memory for
# better time performance. This benchmark should be run in a 32 MB
# heap, though we don't currently know how to enforce that uniformly.
#
# Unlike the original Ellis and Kovac benchmark, we do not attempt
# measure pause times. This facility should eventually be added back
# in. There are several reasons for omitting it for now. The original
# implementation depended on assumptions about the thread scheduler
# that don't hold uniformly. The results really measure both the
# scheduler and GC. Pause time measurements tend to not fit well with
# current benchmark suites. As far as we know, none of the current
# commercial Java implementations seriously attempt to minimize GC pause
# times.
#
# Known deficiencies:
# - No way to check on memory use
# - No cyclic data structures
# - No attempt to measure variation with object size
# - Results are sensitive to locking cost, but we dont
# check for proper locking
import time
USAGE = """gcbench [num_repetitions] [--depths=N,N,N..] [--threads=N]"""
ENABLE_THREADS = True
class Node(object):
def __init__(self, l=None, r=None):
self.left = l
self.right = r
kStretchTreeDepth = 18 # about 16Mb (for Java)
kLongLivedTreeDepth = 16 # about 4Mb (for Java)
kArraySize = 500000 # about 4Mb
kMinTreeDepth = 4
kMaxTreeDepth = 16
def tree_size(i):
"Nodes used by a tree of a given size"
return (1 << (i + 1)) - 1
def num_iters(i):
"Number of iterations to use for a given tree depth"
return 2 * tree_size(kStretchTreeDepth) / tree_size(i);
def populate(depth, node):
"Build tree top down, assigning to older objects."
if depth <= 0:
return
else:
depth -= 1
node.left = Node()
node.right = Node()
populate(depth, node.left)
populate(depth, node.right)
def make_tree(depth):
"Build tree bottom-up"
if depth <= 0:
return Node()
else:
return Node(make_tree(depth-1), make_tree(depth-1))
def print_diagnostics():
"ought to print free/total memory"
pass
def time_construction(depth):
niters = num_iters(depth)
print "Creating %d trees of depth %d" % (niters, depth)
t_start = time.time()
for i in range(niters):
temp_tree = Node()
populate(depth, temp_tree)
temp_tree = None
t_finish = time.time()
print "\tTop down constrution took %f ms" % ((t_finish-t_start)*1000.)
t_start = time.time()
for i in range(niters):
temp_tree = make_tree(depth)
temp_tree = None
t_finish = time.time()
print "\tBottom up constrution took %f ms" % ((t_finish-t_start)*1000.)
DEFAULT_DEPTHS = range(kMinTreeDepth, kMaxTreeDepth+1, 2)
def time_constructions(depths):
for d in depths:
time_construction(d)
def time_parallel_constructions(depths, nthreads):
import threading
threadlist = []
print "Starting %d parallel threads..." % (nthreads,)
for n in range(nthreads):
t = threading.Thread(target=time_constructions, args=(depths,))
t.start()
threadlist.append(t)
for t in threadlist:
t.join()
print "All %d threads finished" % (nthreads,)
def main(depths=DEFAULT_DEPTHS, threads=0):
print "Garbage Collector Test"
print " Stretching memory with a binary tree of depth %d" % kStretchTreeDepth
print_diagnostics()
t_start = time.time()
temp_tree = make_tree(kStretchTreeDepth)
temp_tree = None
# Create a long lived object
print " Creating a long-lived binary tree of depth %d" % kLongLivedTreeDepth
long_lived_tree = Node()
populate(kLongLivedTreeDepth, long_lived_tree)
# Create long-lived array, filling half of it
print " Creating a long-lived array of %d doubles" % kArraySize
array = [0.0] * kArraySize
i = 1
while i < kArraySize/2:
array[i] = 1.0/i
i += 1
print_diagnostics()
if threads:
time_parallel_constructions(depths, threads)
else:
time_constructions(depths)
if long_lived_tree is None or array[1024] != 1.0/1024:
raise Failed
t_finish = time.time()
print_diagnostics()
print "Completed in %f ms." % ((t_finish-t_start)*1000.)
class Failed(Exception):
pass
def argerror():
print "Usage:"
print " ", USAGE
return 2
def entry_point(argv):
depths = DEFAULT_DEPTHS
threads = 0
repeatcount = 1
for arg in argv[1:]:
if arg.startswith('--threads='):
arg = arg[len('--threads='):]
if not ENABLE_THREADS:
print "threads disabled (they cannot be translated)"
return 1
try:
threads = int(arg)
except ValueError:
return argerror()
elif arg.startswith('--depths='):
arg = arg[len('--depths='):].split(',')
try:
depths = [int(s) for s in arg]
except ValueError:
return argerror()
else:
try:
repeatcount = int(arg)
except ValueError:
return argerror()
for i in range(repeatcount):
main(depths, threads)
return 0
if __name__ == '__main__':
import sys
sys.exit(entry_point(sys.argv))
|
import re
import numpy as np
from scipy.spatial import distance
f = open('../sentenceCosinusDistance/sentences.txt', 'r')
def split_low(line):
return re.split('[^a-z]', line.lower())
not_empty_words = list(
filter(lambda word: word,
[word for sentence in f.readlines() for word in split_low(sentence)]))
print(not_empty_words)
# sentence1: I(0) am(1) Anton(2) and(3) I(0) work(4) at(5) EPAM(6) - 7 unique words
# sentence2: Oleski(8) works(9) with(10) me(11) at(5) EPAM(6) - 4 unique words
# идем по всем предложениям,
# берем первое, создаем словарь, создаем счетчик слов, идем по всем словам,
# берем первое, берем словарь и проверяем наличие слова,
# если оно есть переходим к следующему слову
# если его нет инкрементим счетчик, добавляем слово в словарь со счетчиком
f = open('../sentenceCosinusDistance/sentences.txt', 'r')
unique_word_occurrence = {}
word_unique_index = 0
for sentence in f.readlines():
for word in split_low(sentence):
if word and word not in unique_word_occurrence:
unique_word_occurrence[word] = word_unique_index
word_unique_index += 1
print(word_unique_index)
# can be refactor to functional style
# матрица n*d (2*11)
# n - число предложений (2),
# d - число уникальных слов(0 - 11)
# матрица вхождений j слово в i - предложение
# 2,1,1,1, ... all unique words occurrence
# 0,0,0,0
# создаем матрицу (колво предложений, на кол-во слов в словаре)
# берем уникальное слово из словаря
# идем по всем предложениям
# берем первое, создаем элемент (i, j) = 0, идем по всем словам
# берем первое, проверяем на равенство с уникальным
# если равно увеличиваем элемент (i = 1, j = 0) на 1
# в конце цикла по словам присваиваем элемент (i, j) в матрицу
f = open('../sentenceCosinusDistance/sentences.txt', 'r')
sentences = f.readlines()
n = sentences.__len__()
d = word_unique_index
unique_word_occurrences_matrix = np.zeros(shape=(n, d), dtype=int)
current_sentence = 0
for sentence in sentences:
for word in split_low(sentence):
if word:
unique_word_occurrences_matrix[current_sentence, unique_word_occurrence[word]] += 1
current_sentence += 1
dists = list()
first_row = None
current_row_index = 0
for current_row in unique_word_occurrences_matrix:
if first_row is None:
first_row = current_row
else:
dists.append((current_row_index, distance.cosine(first_row, current_row)))
current_row_index += 1
dists.sort(key=lambda tup: tup[1])
print(dists)
|
import pytest
import cogctl.cli.bundle.enable as bundle # TODO: Eww, this import
pytestmark = pytest.mark.usefixtures("mocks")
# TODO: What happens when you try to enable a bundle that's already enabled?
def test_enable_no_version_enables_latest(cogctl):
result = cogctl(bundle.enable, ["disabled_bundle"])
assert result.exit_code == 0
assert result.output == """\
Enabled disabled_bundle 0.0.6
"""
def test_enable_with_version(cogctl):
result = cogctl(bundle.enable, ["disabled_bundle", "0.0.4"])
assert result.exit_code == 0
assert result.output == """\
Enabled disabled_bundle 0.0.4
"""
def test_enable_nonexistent_version(cogctl):
result = cogctl(bundle.enable, ["enabled_bundle", "100.0.0"])
assert result.exit_code == 2
assert result.output == """\
Usage: enable [OPTIONS] NAME [VERSION]
Error: Invalid value for "version": No version 100.0.0 found for enabled_bundle
"""
def test_enable_nonexistent_bundle(cogctl):
result = cogctl(bundle.enable, ["not_a_bundle"])
assert result.exit_code == 2
assert result.output == """\
Usage: enable [OPTIONS] NAME [VERSION]
Error: Invalid value for "name": Bundle 'not_a_bundle' not found
"""
def test_enable_invalid_version(cogctl):
result = cogctl(bundle.enable, ["enabled_bundle", "not_a_version"])
assert result.exit_code == 2
assert result.output == """\
Usage: enable [OPTIONS] NAME [VERSION]
Error: Invalid value for "version": Versions must be of the form 'major.minor.patch'
""" # noqa: E501
|
from PIL import Image, ImageFile
import torchvision.transforms as transforms
import torch.utils.data as data
import os
Image.MAX_IMAGE_PIXELS = 933120000
ImageFile.LOAD_TRUNCATED_IMAGES = True
def is_img_file(filename):
return any(filename.lower().endswith(extension) for extension in ('jpg', 'png', 'jpeg'))
def default_loader(path):
return Image.open(path).convert('RGB')
class Resize(object):
def __init__(self, fine_size, is_test=False, interpolation=Image.BICUBIC):
super(Resize, self).__init__()
self.fine_size = fine_size
self.is_test = is_test
self.interpolation = interpolation
def __call__(self, image):
w, h = image.size
if w < h:
scaled_w = self.fine_size if not self.is_test else w // 8 * 8
scaled_h = (h / w) * scaled_w
scaled_h = scaled_h // 8 * 8
else:
scaled_h = self.fine_size if not self.is_test else h // 8 * 8
scaled_w = (w / h) * scaled_h
scaled_w = scaled_w // 8 * 8
scaled_h, scaled_w = int(scaled_h), int(scaled_w)
scaled_image = transforms.functional.resize(image, (scaled_h, scaled_w), interpolation=self.interpolation)
return scaled_image
def build_transform(cfg, train=False, interpolation=Image.BICUBIC, normalize=True):
if train:
if normalize:
t = transforms.Compose([
# transforms.Resize(cfg.INPUT.FINE_SIZE),
Resize(cfg.INPUT.FINE_SIZE, is_test=False, interpolation=interpolation),
transforms.RandomCrop(cfg.INPUT.FINE_SIZE),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
else:
t = transforms.Compose([
# transforms.Resize(cfg.INPUT.FINE_SIZE),
Resize(cfg.INPUT.FINE_SIZE, is_test=False, interpolation=interpolation),
transforms.RandomCrop(cfg.INPUT.FINE_SIZE),
transforms.RandomHorizontalFlip(),
])
else:
if normalize:
t = transforms.Compose([
# transforms.Resize(cfg.INPUT.FINE_SIZE),
Resize(cfg.INPUT.FINE_SIZE, is_test=True, interpolation=interpolation),
transforms.ToTensor()
])
else:
t = transforms.Compose([
# transforms.Resize(cfg.INPUT.FINE_SIZE),
Resize(cfg.INPUT.FINE_SIZE, is_test=True, interpolation=interpolation),
])
return t
"""
WCT, FastPhotoStyle - learning-free
AdaIN - learning required
For learning-free methods, we do inference only, i.e., the desired content and style
images should be given as a pair.
For learning-required methods, we should pair content images with different kinds of
style images during training. It is done by building separate datasets for content
and style images - DatasetNoSeg. However, in test mode, the content and style images
should be paired.
"""
class DatasetNoSeg(data.Dataset):
def __init__(self, cfg, directory, train=True):
super(DatasetNoSeg, self).__init__()
self.img_list = [x for x in os.listdir(directory) if is_img_file(x)]
self.img_list = sorted(self.img_list)
self.dir = directory
# build transform
self.transform = build_transform(cfg, train=train)
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img_path = os.path.join(self.dir, self.img_list[index])
try:
img = default_loader(img_path)
except OSError as e:
print(e)
print(img_path)
exit()
img = self.transform(img)
return img
|
def get_content_info(file):
num_lines = file.count('\n') + 1
num_else = file.count('else')
num_char = len(file.replace('\n', ''))
return {'lines': num_lines, 'else': num_else, 'characters': num_char}
def print_file_info(info):
for key, value in info.items():
print(f'number of {key} in file: {value}')
try:
syntax_js = open('syntax.js', 'r')
content = syntax_js.read()
finally:
syntax_js.close()
print_file_info(get_content_info(content))
# reference material: https://www.programiz.com/python-programming/file-operation |
import sys
DEBUG=True if len(sys.argv) > 1 else False
def debug(*texts):
if DEBUG:
print("[DEBUG]", *texts, flush=True)
def solve(senators):
sol = ""
while True:
r = sum(senators.values())
if not r:
break
k = max(senators, key = senators.get)
sol += chr(64 + k)
senators[k] -= 1
r = sum(senators.values())
if not r:
break
k = max(senators, key = senators.get)
senators[k] -= 1
new_r = sum(senators.values())
if new_r > 0 and max(senators, key = senators.get) > sum(senators.values())/2:
senators[k] += 1
else:
sol += chr(64 + k)
sol += " "
return sol
for case in range(1, int(sys.stdin.readline())+1):
_ = sys.stdin.readline()
senators = dict(enumerate(map(int, sys.stdin.readline().split()), start=1))
debug(senators)
solution = solve(senators)
print("Case #{0}: {1}".format(case, solution))
|
import numpy as np
from pylab import ylim, title, ylabel, xlabel
import matplotlib.pyplot as plt
from kalman import SingleStateKalmanFilter
import pandas as pd
from collections import defaultdict
import seaborn as sns
# ------------------------------------------- Filtering: PARAMETERS : (START) --------------------------------------
"""
The Parameters for the Filter are outlined below.
Q: The Process Covariance weights the final Kalman Output either towards the actual measurement or Kalman
Prediction. The lower this value is, for example, 0.005, the less noise/volatility in the output.
You must use judgement when setting this variable.
A: Process Innovation: I tend to leave as 1 (has no affect). If >1 then the graph will drift upwards by the scaling
factor.
x: We make an arbitrary guess at the beginning. After a certain amount of data, the model increases to the correct
value. This is similar to the Posterior in Bayesian Updating.
P: Again this is arbitrary and will update to the correct values after multiple data is passed into the Filter.
"""
# Initialise the Kalman Filter
A = 1 # No process innovation - use 1
C = 1 # Measurement
B = 0 # No control input
Q = 0.3 # Process covariance
R = 1 # Measurement covariance
x = 100 # Initial state_estimate
P = 1 # Initial covariance/ prob_estimate
# ------------------------------------------- Filtering: PARAMETERS : (END) --------------------------------------
real_time = False
if real_time:
""" Probably Don't Need This. It was designed to update incoming SLM Data in Real Time With the Filter"""
# This CSV will be extracted from the Cloud
df = pd.read_csv('kalman_data.csv')
col = df.columns
# Previous State Estimate
x = float(col[0])
# Previous Probability Estimate
P = float(col[1])
# Initialise The Kalman Object With Parameters
kalman_filter = SingleStateKalmanFilter(A, B, C, x, P, Q, R)
# Empty List For Kalman Filter Estimates
kalman_filter_estimates = []
# Need to read the incoming data for new iterations in real time.
if real_time:
""" If Real_Time is True then read in real time data from the Crontab. Else Just Run all the
data through the Filter.
"""
csv = 'kalman_real_time.csv'
else:
csv = '16090_CSO_SLM_data.csv'
df = pd.read_csv(csv)
level_data = df["Level"]
time_stamp = df["Time/Date"]
Value = df["Value"]
OFlow = df["OFlow"]
kalman_filtered_data_dict = defaultdict()
kalman_filtered_data_dict["Time/Date"] = ["Level","Value","OFlow"]
# ------------------------------------------- Benching: View Distribution (START) --------------------------------------
"""
We First need to inspect the distribution of the data points to identify benching locations.
Change show_distribution to True to view the plot.
Once Complete change this to False.
"""
df.Level = df.Level.round(1)
# Create Distribution
show_distribution = False
if show_distribution:
sns.displot(df, x="Level")
plt.show()
# ------------------------------------------- Benching: View Distribution ( END) ---------------------------------------
# ------------------------------------------- Benching: Benchng Tool (START) --------------------------------
"""
Change show_benching_locatinons to True to View data points that are most likely to be benching.
"""
Benching_Tool = False
# Decide Count or Instances of a certain value appearing for which we wich to erase.
#This is a Threshold Value. The Higher the Count, the more likely benching is at this location.
count_to_erase = 50
if Benching_Tool:
df_benching_loc = df['Level'].value_counts()
df_benching_loc = df_benching_loc.to_frame()
df_benching_loc.reset_index(level=0, inplace=True)
df_benching_loc.rename(columns={'index': 'Levels'}, inplace=True)
df_benching_loc.rename(columns={'Level': 'Count'}, inplace=True)
# Choose Benching Corridors : Hint: Run show_distribution plot to identify benching regions
df_benching_loc = df_benching_loc[((df_benching_loc['Levels'] >= 24.) & (df_benching_loc['Levels'] <= 26.5)) \
| ((df_benching_loc['Levels'] >= 95) & (df_benching_loc['Levels'] <= 97)) \
| ((df_benching_loc['Levels'] >= 95) & (df_benching_loc['Levels'] <= 97)) \
| ((df_benching_loc['Levels'] >= 108) & (df_benching_loc['Levels'] <= 112))\
| ((df_benching_loc['Levels'] >= 66) & (df_benching_loc['Levels'] <= 68))]
df_benching_loc = df_benching_loc[(df_benching_loc['Count'] >= count_to_erase)]
level_series = df['Level']
for level_to_remove in df_benching_loc['Levels']:
level_series = level_series.replace(level_to_remove, np.nan)
df = df.assign(Level=level_series)
#print(df)
level_data = df["Level"]
# ------------------------------------------- Benching: View Benching Tool (END) --------------------------------
# ------------------------------------------- Filtering: MAIN : (START) --------------------------------------
# Simulate the data arriving sequentially
for i in range(len(level_data)):
temp = []
#print(f'level data before: {level_data[i]}')
kalman_filter.step(0, level_data[i])
kalman_prediction = round(kalman_filter.current_state(),2)
#print(f'Prediction After: {kalman_prediction}')
if kalman_prediction < 0:
print("negative!!!!")
# Negative Data Should be Zerod
kalman_prediction = max(kalman_prediction,0)
kalman_filter_estimates.append(kalman_prediction)
temp.append(kalman_prediction)
temp.append(Value[i])
temp.append(OFlow[i])
kalman_filtered_data_dict[time_stamp[i]] = temp
# kalman_filtered_data_dict should contain all of the filtered data.
#print(kalman_filtered_data_dict)
# ------------------------------------------- Filtering: MAIN : (END) --------------------------------------
# ------------------------------------------- Real-Time (Crontab) Variable Storing (START) -----------------------------
# Write and Save Variables For Real Time Updated
if real_time:
store_variables = False
else:
store_variables = True
if store_variables == True:
with open('kalman_data.csv', 'w') as f:
current_state_estimate, current_prob_estimate = kalman_filter.store_variables()
f.write("%s, %s\n" % (current_state_estimate, current_prob_estimate))
# ------------------------------------------- Real-Time (Crontab) Variable Storing (END) -----------------------------
# ------------------------------------------- View Filtered Data On Graph (START) -----------------------------
# Plot Seaborn
"""
Change Sea_Plot to True
"""
Sea_Plot = True
if Sea_Plot == True:
df["Time/Date"] = pd.to_datetime(df["Time/Date"])
start_date = '2021-01-01'
end_date = '2021-03-30'
start_index_loc = df[df['Time/Date'] == start_date].index
end_index_loc = df[df['Time/Date'] == end_date].index
level_data = level_data[start_index_loc[0]:end_index_loc[0]]
level_data = level_data.reset_index()
level_data = level_data["Level"]
# Format Seaborn Data
g = sns.lineplot(data=level_data, label = "Unfiltered Sensor Data").set_title(f"Sensor Data Between \n {start_date} & {end_date}")
g = sns.lineplot(data=kalman_filter_estimates[start_index_loc[0]:end_index_loc[0]], label = "Filtered Data")
g.set_xticklabels([start_date])
ylabel('CS0 Water Level (%)')
ylim([0,150])
plt.legend()
plt.show()
# ------------------------------------------- View Filtered Data On Graph (END) -----------------------------
use_matlib = False
if use_matlib:
plt.plot(level_data)
title("Filtering Real-Time Data")
ylabel('CS0 Water Level (%)')
xlabel('Dates')
ylim([0,100])
plt.plot(kalman_filter_estimates, 'k', linewidth=2.0)
# Show the plot
plt.show()
# ------------------------------------------- Create Filtered Data CSV (START)-----------------------------------------
create_csv = True
if create_csv:
with open('16090_CSO_SLM_data_KF.csv', 'w') as f:
for i, key in enumerate(kalman_filtered_data_dict.keys()):
if i == 0:
f.write("%s,%s,%s,%s\n" % (key, kalman_filtered_data_dict[key][0], kalman_filtered_data_dict[key][1], kalman_filtered_data_dict[key][2]))
else:
f.write("%s,%.2f,%.2f,%i\n"%(key, kalman_filtered_data_dict[key][0],kalman_filtered_data_dict[key][1], 1 if kalman_filtered_data_dict[key][0] > 100 else 0))
# ------------------------------------------- Create Filtered Data CSV (END)--------------------------------------------
|
# Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
from contextlib import contextmanager
from giza.command import command
logger = logging.getLogger('giza.git')
class GitRepo(object):
def __init__(self, path=None):
if path is None:
self.path = os.getcwd()
else:
self.path = path
logger.debug("created git repository management object for {0}".format(self.path))
def cmd(self, *args):
args = ' '.join(args)
return command(command='cd {0} ; git {1}'.format(self.path, args), capture=True)
def remotes(self):
return self.cmd('remote').out.split('\n')
def branch_file(self, path, branch='master'):
return self.cmd('show {branch}:{path}'.format(branch=branch, path=path)).out
def checkout(self, ref):
return self.cmd('checkout', ref)
def hard_reset(self, ref='HEAD'):
return self.cmd('reset', '--hard', ref)
def reset(self, ref='HEAD'):
return self.cmd('reset', ref)
def fetch(self, remote='origin'):
return self.cmd('fetch', remote)
def update(self):
return self.cmd('pull', '--rebase')
def pull(self, remote='origin', branch='master'):
return self.cmd('pull', remote, branch)
def current_branch(self):
return self.cmd('symbolic-ref', 'HEAD').out.split('/')[2]
def sha(self, ref='HEAD'):
return self.cmd('rev-parse', '--verify', ref).out
def clone(self, remote, repo_path=None, branch=None):
args = ['clone', remote]
if branch is not None:
args.extend(['--branch', branch])
if repo_path is not None:
args.append(repo_path)
return self.cmd(*args)
def cherry_pick(self, *args):
if len(args) == 1:
args = args[0]
for commit in args:
self.cmd('cherry-pick', commit)
logger.info('cherry picked ' + commit )
def am(self, patches, repo=None, sign=False):
cmd_base = 'curl {path} | git am --3way'
if sign is True:
cmd_base += ' --signoff'
for obj in patches:
if obj.startswith('http'):
if not obj.endswith('.patch'):
obj += '.patch'
command(cmd_base.format(path=obj))
logger.info("applied {0}".format(obj))
elif re.search('[a-zA-Z]+', obj):
path = '/'.join([ repo, 'commit', obj ]) + '.patch'
command(cmd_base.format(path=path))
logger.info('merged commit {0} for {1} into {2}'.format(obj, repo, self.current_branch()))
else:
if repo is None:
logger.warning('not applying "{0}", because of missing repo'.format(obj))
else:
path = '/'.join([ repo, 'pull', obj ]) + '.patch'
command(cmd_base.format(path=path))
logger.info("applied {0}".format(obj))
@contextmanager
def branch(self, name):
starting_branch = self.current_branch()
if name != starting_branch:
self.checkout(name)
yield
if name != starting_branch:
self.checkout(starting_branch)
|
# Author: ambiguoustexture
# Date: 2020-03-11
import pickle
import numpy as np
from scipy import io
from similarity_cosine import sim_cos
file_context_matrix_X_PC = './context_matrix_X_PC'
file_t_index_dict = './t_index_dict'
with open(file_t_index_dict, 'rb') as t_index_dict:
t_index_dict = pickle.load(t_index_dict)
context_matrix_X_PC = io.loadmat(file_context_matrix_X_PC)['context_matrix_X_PC']
words_additive = context_matrix_X_PC[t_index_dict['Spain']] \
- context_matrix_X_PC[t_index_dict['Madrid']] \
+ context_matrix_X_PC[t_index_dict['Athens']]
words_similarities = [sim_cos(words_additive, context_matrix_X_PC[i])
for i in range(len(t_index_dict))]
words_similarities_sorted = np.argsort(words_similarities)
words = list(t_index_dict.keys())
for index in words_similarities_sorted[:-11:-1]:
print(words[index].ljust(14, ' '), words_similarities[index])
|
'''
1. Given an NxN matrix, write a function that would rotate it
by 90 degrees clock-wise or counter clockwise.
'''
# Rotate n clockwise or counter-clockwise
# If d == 0, clockwise
# If d == 1, counter-clockwise
# Else no change
def rotate(n, d):
m = n
if d == 0:
m = zip(*m[::-1])
elif d == 1:
m = list(zip(*m))[::-1]
return m
def matrixPrint(m):
for n in m:
for o in n:
print(str(o) + ' ', end='')
print('\n')
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
matrixPrint(m)
print('Clockwise:')
matrixPrint(rotate(m, 0))
print('Counter-Clockwise:')
matrixPrint(rotate(m, 1))
|
import argparse
import os
import cv2
import json
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tf_inferencer import TFInferencer
# The following variables are used to configure the compression analysis
COMPRESSION_LEVELS = [100, 90, 80, 70, 60, 50, 40, 30, 20, 10]
ENCODING = "JPG" # If this is changed, be sure to change the corresponding OpenCV encoding parameter as well
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", "-d", help="Directory of images to use for comparison", required=True)
parser.add_argument("--model", "-m", help="Path of machine learning model", required=True)
parser.add_argument("--annotations", "-a", help="Path of annotations file", default="annotations.json")
args = parser.parse_args()
inferencer = TFInferencer(args.model)
images = os.listdir(args.dir)
imgIds = []
for image_name in images:
imgId = int(image_name.split('_')[2].split('.')[0].lstrip('0'))
imgIds.append(imgId)
results = []
for comp_level in COMPRESSION_LEVELS:
for i, image_name in enumerate(images):
img = cv2.imread(os.path.join(args.dir, image_name))
_, img = cv2.imencode("." + ENCODING, img, [cv2.IMWRITE_JPEG_QUALITY, comp_level])
img = cv2.imdecode(img, 1)
imgId = int(image_name.split('_')[2].split('.')[0].lstrip('0'))
inference = inferencer.inference(img)
for res in inference:
entry = {"image_id": imgId,
"category_id": int(res[1]),
"bbox": [round(float(x), 2) for x in res[0]],
"score": round(float(res[2]), 3)}
results.append(entry)
print("Inferenced image", i, "| Compression level", comp_level)
with open("results/results{}.json".format(comp_level), "w") as f:
f.write(json.dumps(results))
inferencer.sess.close()
cocoGT = COCO(args.annotations)
for comp_level in COMPRESSION_LEVELS:
print("COMPRESSION LEVEL", comp_level)
cocoDT = cocoGT.loadRes("results/results{}.json".format(comp_level))
eval = COCOeval(cocoGT, cocoDT, 'bbox')
eval.params.imgIds = imgIds
eval.evaluate()
eval.accumulate()
eval.summarize()
if __name__ == '__main__':
main()
|
# coding=utf-8
# 测试command属性绑定事件,测试lambda表达式帮助传参
from tkinter import *
root = Tk()
root.geometry("270x50")
def mouseTest1():
print("command方式,简单情况:不涉及获取event对象,可以使用")
def mouseTest2(a,b):
print("a={0},b={1}".format(a, b))
Button(root, text="测试command1", command=mouseTest1).pack(side="left")
Button(root, text="测试command2", command=lambda: mouseTest2("gaoqi", "xixi")).pack(side="left")
root.mainloop()
|
# -*- coding: utf-8 -*-
#
import numpy
import fastfunc
import asciiplotlib as apl
def print_stats(mesh, extra_cols=None):
extra_cols = [] if extra_cols is None else extra_cols
angles = mesh.angles / numpy.pi * 180
angles_hist, angles_bin_edges = numpy.histogram(
angles, bins=numpy.linspace(0.0, 180.0, num=73, endpoint=True)
)
q = mesh.cell_quality
q_hist, q_bin_edges = numpy.histogram(
q, bins=numpy.linspace(0.0, 1.0, num=41, endpoint=True)
)
grid = apl.subplot_grid(
(1, 4 + len(extra_cols)), column_widths=None, border_style=None
)
grid[0, 0].hist(angles_hist, angles_bin_edges, grid=[24], bar_width=1, strip=True)
grid[0, 1].aprint("min angle: {:7.3f}".format(numpy.min(angles)))
grid[0, 1].aprint("avg angle: {:7.3f}".format(60))
grid[0, 1].aprint("max angle: {:7.3f}".format(numpy.max(angles)))
grid[0, 1].aprint("std dev angle: {:7.3f}".format(numpy.std(angles)))
grid[0, 2].hist(q_hist, q_bin_edges, bar_width=1, strip=True)
grid[0, 3].aprint("min quality: {:5.3f}".format(numpy.min(q)))
grid[0, 3].aprint("avg quality: {:5.3f}".format(numpy.average(q)))
grid[0, 3].aprint("max quality: {:5.3f}".format(numpy.max(q)))
for k, col in enumerate(extra_cols):
grid[0, 4 + k].aprint(col)
grid.show()
return
def runner(
get_new_points,
mesh,
tol,
max_num_steps,
omega=1.0,
method_name=None,
verbose=False,
callback=None,
step_filename_format=None,
uniform_density=False,
get_stats_mesh=lambda mesh: mesh,
):
k = 0
stats_mesh = get_stats_mesh(mesh)
print("\nBefore:")
print_stats(stats_mesh)
if step_filename_format:
stats_mesh.save(
step_filename_format.format(k),
show_coedges=False,
show_axes=False,
cell_quality_coloring=("viridis", 0.0, 1.0, False),
)
if callback:
callback(k, mesh)
mesh.flip_until_delaunay()
while True:
k += 1
new_points = get_new_points(mesh)
diff = omega * (new_points - mesh.node_coords)
# Abort the loop if the update is small
is_final = (
numpy.all(numpy.einsum("ij,ij->i", diff, diff) < tol ** 2)
or k >= max_num_steps
)
# The code once checked here if the orientation of any cell changes and reduced
# the step size if it did. Computing the orientation is unnecessarily costly
# though and doesn't easily translate to shell meshes. Since orientation changes
# cannot occur, e.g., with CPT, advise the user to apply a few steps of a robust
# smoother first (CPT) if the method crashes, or use relaxation.
mesh.node_coords += diff
mesh.update_values()
mesh.flip_until_delaunay()
if verbose or is_final or step_filename_format:
stats_mesh = get_stats_mesh(mesh)
if verbose and not is_final:
print("\nstep {}:".format(k))
print_stats(stats_mesh)
elif is_final:
info = "{} steps".format(k)
if method_name is not None:
if abs(omega - 1.0) > 1.0e-10:
method_name += ", relaxation parameter {}".format(omega)
info += " of " + method_name
print("\nFinal ({}):".format(info))
print_stats(stats_mesh)
if step_filename_format:
stats_mesh.save(
step_filename_format.format(k),
show_coedges=False,
show_axes=False,
cell_quality_coloring=("viridis", 0.0, 1.0, False),
)
if callback:
callback(k, mesh)
if is_final:
break
return
def get_new_points_volume_averaged(mesh, reference_points):
scaled_rp = (reference_points.T * mesh.cell_volumes).T
new_points = numpy.zeros(mesh.node_coords.shape)
for i in mesh.cells["nodes"].T:
fastfunc.add.at(new_points, i, scaled_rp)
omega = numpy.zeros(len(mesh.node_coords))
for i in mesh.cells["nodes"].T:
fastfunc.add.at(omega, i, mesh.cell_volumes)
new_points /= omega[:, None]
idx = mesh.is_boundary_node
new_points[idx] = mesh.node_coords[idx]
return new_points
def get_new_points_count_averaged(mesh, reference_points):
# Estimate the density as 1/|tau|. This leads to some simplifcations: The new point
# is simply the average of of the reference points (barycenters/cirumcenters) in the
# star.
new_points = numpy.zeros(mesh.node_coords.shape)
for i in mesh.cells["nodes"].T:
fastfunc.add.at(new_points, i, reference_points)
omega = numpy.zeros(len(mesh.node_coords), dtype=int)
for i in mesh.cells["nodes"].T:
fastfunc.add.at(omega, i, numpy.ones(i.shape, dtype=int))
new_points /= omega[:, None]
idx = mesh.is_boundary_node
new_points[idx] = mesh.node_coords[idx]
return new_points
|
import math
'''
'''
s = input('请输入一个数 ') # 这是一个输入语句
s = float(s)
if s > 0:
s = math.sqrt(s)
print(s)
else:
print('没有平方根')
# !/usr/bin/python3
a = 21
b = 10
c = 0
c = a + b
print("1 - c 的值为:", c)
c = a - b
print("2 - c 的值为:", c)
c = a * b
print("3 - c 的值为:", c)
c = a / b
print("4 - c 的值为:", c)
c = a % b
print("5 - c 的值为:", c)
# 修改变量 a 、b 、c
a = 2
b = 3
c = a ** b
print("6 - c 的值为:", c)
a = 10
b = 5
c = a // b
print("7 - c 的值为:", c)
|
pesos = float(input("Ingrese un monto: "))
valor_dolar =3875
dolares = round(pesos / valor_dolar)
dolares = str(dolares)
print("Tienes "+ " USD$ "+ dolares)
|
def genWaveHeader(data) :
length = len(data);
header = "RIFF";
size = 4 + 24 + 8 + length;
ch = chr((size & 0x000000FF) >> 0);
header += ch;
ch = chr((size & 0x0000FF00) >> 8);
header += ch;
ch = chr((size & 0x00FF0000) >> 16);
header += ch;
ch = chr((size & 0xFF000000) >> 24);
header += ch;
header += "WAVE";
header += "fmt ";
ch = chr((16 & 0x000000FF) >> 0);
header += ch;
ch = chr((16 & 0x0000FF00) >> 8);
header += ch;
ch = chr((16 & 0x00FF0000) >> 16);
header += ch;
ch = chr((16 & 0xFF000000) >> 24);
header += ch;
ch = chr((1 & 0x00FF) >> 0);
header += ch;
ch = chr((1 & 0xFF00) >> 8);
header += ch;
ch = chr((1 & 0x00FF) >> 0);
header += ch;
ch = chr((1 & 0xFF00) >> 8);
header += ch;
ch = chr((16000 & 0x000000FF) >> 0);
header += ch;
ch = chr((16000 & 0x0000FF00) >> 8);
header += ch;
ch = chr((16000 & 0x00FF0000) >> 16);
header += ch;
ch = chr((16000 & 0xFF000000) >> 24);
header += ch;
ch = chr((32000 & 0x000000FF) >> 0);
header += ch;
ch = chr((32000 & 0x0000FF00) >> 8);
header += ch;
ch = chr((32000 & 0x00FF0000) >> 16);
header += ch;
ch = chr((32000 & 0xFF000000) >> 24);
header += ch;
ch = chr((2 & 0x00FF) >> 0);
header += ch;
ch = chr((2 & 0xFF00) >> 8);
header += ch;
ch = chr((16 & 0x00FF) >> 0);
header += ch;
ch = chr((16 & 0xFF00) >> 8);
header += ch;
header += "data";
ch = chr((length & 0x000000FF) >> 0);
header += ch;
ch = chr((length & 0x0000FF00) >> 8);
header += ch;
ch = chr((length & 0x00FF0000) >> 16);
header += ch;
ch = chr((length & 0xFF000000) >> 24);
header += ch;
return bytearray(header, "latin1"); |
# this is the program for socket communication
# it can be used both for server and client
# python socket_communication.py -server for server
# python socket_communication.py -client for client
import wx
import socket
import time
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = sys.argv[0]
print 'port=', sys.argv[0]
# for server point
if '-server' in sys.argv: #server
print 'server program running'
sock.bind(('localhost', 16898))
sock.listen(5)
connection, address = sock.accept()
#connection.settimeout(5)
while True:
try:
buf = connection.recv(1024)
print buf
#if buf == '1':
# connection.send('Welcome to server!')
#else:
# connection.send('Please go out!')
connection.send(buf)
except socket.timeout:
print 'time out'
# for client point
if '-client' in sys.argv: #client
print 'client program running'
sock.connect(('localhost', 16898))
time.sleep(1)
while True:
user_input = raw_input('input anything:')
sock.send(user_input)
print sock.recv(1024)
#both server point and client point need this operation
sock.close
|
from builtins import range
import numpy as np
# from skimage.io import imread
import tensorflow as tf
import sys
import cv2
def main():
# Read in image with the shape (rows, cols, channels)
im = cv2.imread('./img/face.png')
im = np.array(im) / 255.
invSpatialStdev = float(1. / 5.)
invColorStdev = float(1. / .125)
rows = im.shape[0]
cols = im.shape[1]
nb_points = rows * cols
# Construct the position vectors out of x, y, r, g, and b.
positions = np.zeros((nb_points, 5), dtype='float32')
color = np.zeros((nb_points, 3), dtype='float32')
output = np.zeros(im.shape, dtype="float32")
for r in range(rows):
for c in range(cols):
positions[cols * r + c, 0] = invSpatialStdev * c
positions[cols * r + c, 1] = invSpatialStdev * r
positions[cols * r + c, 2] = invColorStdev * im[r, c, 0]
positions[cols * r + c, 3] = invColorStdev * im[r, c, 1]
positions[cols * r + c, 4] = invColorStdev * im[r, c, 2]
color[cols * r + c, 0] = im[r, c, 0]
color[cols * r + c, 1] = im[r, c, 1]
color[cols * r + c, 2] = im[r, c, 2]
lattice_fllter_module = tf.load_op_library("./my_op.so")
position_tensor = tf.placeholder(dtype=tf.float32, shape=positions.shape, name="pos_input")
value_tensor = tf.placeholder(dtype=tf.float32, shape=color.shape, name="val_input")
out_tensor = lattice_fllter_module.lattice_filter(value_tensor, position_tensor)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
out_ = sess.run(out_tensor, feed_dict={position_tensor: positions, value_tensor: color})
for r in range(rows):
for c in range(cols):
# print("--------")
output[r, c, 0] = out_[cols * r + c, 0]
output[r, c, 1] = out_[cols * r + c, 1]
output[r, c, 2] = out_[cols * r + c, 2]
output -= output.min()
output /= output.max()
cv2.imwrite("./results/filtered_face.png", (255 * output).astype(np.uint8))
if __name__ == '__main__':
main()
|
import torch
import argparse
import docker
import os
import sys
import logging
import torchvision.models as models
from cloudpickle import CloudPickler
import tempfile
import tarfile
trained_models = ['resnet18', 'densenet201']
logger = logging.getLogger(__name__)
class ClipperException(Exception):
"""A generic exception indicating that Clipper encountered a problem."""
def __init__(self, msg, *args):
self.msg = msg
super(Exception, self).__init__(msg, *args)
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
def build_model(name,
base_image,
container_registry=None,
pkgs_to_install=None):
# run_cmd = ''
run_cmd = 'RUN python deploy.py -m {name}'.format(name=name)
entrypoint = 'ENTRYPOINT ["python", "pytorch_container.py", "-m", "{name}"]'.format(name=name)
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
# context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
# "FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
"FROM {container_name}\n{run_command}\n{entrypoint}".
format(
container_name=base_image,
# data_path=model_data_path,
run_command=run_cmd,
entrypoint=entrypoint)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
# "FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
"FROM {container_name}\n{run_command}\n".
format(
container_name=base_image,
# data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{name}".format(name=name)
print(image)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
# logger.info("Building model Docker image with model data from {}".format(model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
if 'stream' in b and b['stream'] != '\n': #log build steps only
logger.info(b['stream'].rstrip())
return image
def build_and_deploy_model(name,
base_image,
container_registry=None,
pkgs_to_install=None):
image = build_model(name, base_image,
container_registry, pkgs_to_install)
def save_python_function(name):
serialization_dir = "/tmpfs/model/{}.model".format(name)
return serialization_dir
def deploy_pytorch_model(name,
pytorch_model,
base_image = "default",
pkgs_to_install=None):
try:
serialization_dir = save_python_function(name)
torch.save(pytorch_model.state_dict(), serialization_dir) # saves only the model parameters
# torch.save(pytorch_model, serialization_dir)
py_minor_version = (sys.version_info.major, sys.version_info.minor)
# Check if Python 2 or Python 3 image
if base_image == "default":
if py_minor_version < (3, 0):
logger.info("Using Python 2 base image")
# base_image = "{}/pytorch-container:{}".format(
# __registry__, __version__)
base_image = "pytorch-container"
elif py_minor_version == (3, 5):
logger.info("Using Python 3.5 base image")
# base_image = "{}/pytorch35-container:{}".format(
# __registry__, __version__)
# base_image = "pytorch35-container"
base_image = "pytorch-container"
elif py_minor_version == (3, 6):
logger.info("Using Python 3.6 base image")
# base_image = "{}/pytorch36-container:{}".format(
# __registry__, __version__)
# base_image = "pytorch36-container"
else:
msg = (
"PyTorch deployer only supports Python 2.7, 3.5, and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major,
minor=sys.version_info.minor)
logger.error(msg)
# Remove temp files
# shutil.rmtree(serialization_dir)
raise ClipperException(msg)
# Deploy model
build_and_deploy_model(
name, base_image,
pkgs_to_install)
except Exception as e:
raise ClipperException("Error saving torch model: %s" % e)
def deploy_and_test_model(model,
model_name):
deploy_pytorch_model(model_name, model)
def main():
parser = argparse.ArgumentParser("Deploy models");
parser.add_argument("-m", dest="model_name", type=str, required=True)
para_sets = parser.parse_args();
torch.manual_seed(0)
if para_sets.model_name == "all":
for model_name in trained_models:
deploy_and_test_model(getattr(models, model_name)(), model_name)
else:
deploy_and_test_model(getattr(models, para_sets.model_name)(), para_sets.model_name)
if __name__ == '__main__':
main() |
"""
Stripping Filtering file for B->D*munuX where D0->K3pi
@author Philip Hunt
@date 2013-07-26
"""
from Gaudi.Configuration import *
MessageSvc().Format = "% F%60W%S%7W%R%T %0W%M"
#
# Build the streams and stripping object
#
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStream, cloneLinesFromStream
from StrippingArchive import strippingArchive
# import our modified stripping line module (no default PID, IP chi2 and pt cut son D daughters)
import CharmConfig.StrippingB2DMuNuX_AllNoPIDsHadrons as myStripModule
stripping='stripping20'
stripConf='B2DMuNuX'
#get the configuration dictionary from the database
config = strippingConfiguration(stripping)
# Make the necessary changes to the configuration dictionary
cnf=config[stripConf]['CONFIG']
# remove the kaon cuts (up to the value in StdLooseKaons)
cnf['KaonPIDK']=-1.0e6
cnf['KaonPIDKTight']=-1.0e6
# remove the pion cuts (up to the value in StdLoosePions)
cnf['PionPIDK']=1.0e6
cnf['PionPIDKTight']=1.0e6
# remove the D daughter IP chi2 cuts
cnf['MINIPCHI2']=0
myConf=myStripModule.B2DMuNuXAllLinesConf(stripConf, cnf)
streams = []
###########################################
###########################################
# Lines demanded by analyst (Philip Hunt)
#Strippingb2D0MuXK3PiB2DMuNuX
_filterlines = StrippingStream('Semileptonic')
_filterlines.appendLines(myConf.lines())
# Select lines you want
# Stream name will control name in book-keeping - make it something descriptive
MyStream = StrippingStream("B2DstMuNuX_D02K3Pi.StripTrig")
# Select lines by name
MyLines = [ 'Strippingb2D0MuXK3PiDstB2DMuNuXLine' ]
for line in _filterlines.lines :
if line.name() in MyLines:
line._prescale = 1.0
MyStream.appendLines( [ line ] )
# Configure Stripping
from Configurables import ProcStatusCheck
filterBadEvents = ProcStatusCheck()
sc = StrippingConf( Streams = [ MyStream ],
MaxCandidates = 2000,
AcceptBadEvents = False,
BadEventSelection = filterBadEvents )
MyStream.sequence().IgnoreFilterPassed = False # so that we get only selected events written out
###########################################
###########################################
from DSTWriters.microdstelements import *
from DSTWriters.Configuration import (SelDSTWriter,
stripDSTStreamConf,
stripDSTElements
)
#
# Configuration of SelDSTWriter (DST)
#
SelDSTWriterElements = {
'default' : stripDSTElements()
}
SelDSTWriterConf = {
'default' : stripDSTStreamConf()
}
#
# Uncomment to use MicroDST writing
#
# from DSTWriters.Configuration import (stripMicroDSTStreamConf,
# stripMicroDSTElements
# )
## SelDSTWriterElements = {
## 'default' : stripMicroDSTElements(pack=True,
## isMC=True)
## }
## SelDSTWriterConf = {
## 'default' : stripMicroDSTStreamConf(pack=True,
## isMC=True)
## }
for stream in sc.activeStreams() :
print "there is a stream called " + stream.name() + " active"
dstWriter = SelDSTWriter( "MyDSTWriter",
StreamConf = SelDSTWriterConf,
MicroDSTElements = SelDSTWriterElements,
OutputFileSuffix ='',
SelectionSequences = sc.activeStreams()
)
#----------Include trigger filtering---------------------------
# Bring in the filter
from PhysConf.Filters import LoKi_Filters
trigfltrs = LoKi_Filters (
HLT_Code = "HLT_PASS_RE('Hlt1Track.*Decision') & (HLT_PASS_RE('Hlt2.*Topo.*Decision') | HLT_PASS_RE('Hlt2SingleMuon.*Decision'))"
)
#----------------------------------------------------------------
#
# DaVinci Configuration
#
from Configurables import DaVinci
DaVinci().InputType = 'DST'
DaVinci().DataType = "2012"
DaVinci().Simulation = True
DaVinci().EvtMax = -1
DaVinci().HistogramFile = "DVHistos.root"
#
# Need to add bank-killer for testing
##from Configurables import EventNodeKiller
##eventNodeKiller = EventNodeKiller('StripKiller')
##eventNodeKiller.Nodes = [ '/Event/AllStreams', '/Event/Strip' ]
##DaVinci().appendToMainSequence( [ eventNodeKiller ] ) # Kill old stripping banks first
#
DaVinci().appendToMainSequence( [ sc.sequence() ] )
DaVinci().appendToMainSequence( [ dstWriter.sequence() ] )
DaVinci().EventPreFilters = trigfltrs.filters('TrigFilters')
#DaVinci().UseTrigRawEvent=True #
# Bring in some local test data
## Signal test data
## B0->D*munu(D0->K3pi) MC
##EventSelector().Input = [
## "DATAFILE='PFN:/data/lhcb/users/hunt/ALLSTREAMS.DST/Sim08-Reco14-Stripping20Flagged/11676001/00025040_00000030_1.allstreams.dst' TYP='POOL_ROOTTREE' OPT='READ'",
## "DATAFILE='PFN:/data/lhcb/users/hunt/ALLSTREAMS.DST/Sim08-Reco14-Stripping20Flagged/11676001/00025040_00000115_1.allstreams.dst' TYP='POOL_ROOTTREE' OPT='READ'",
## "DATAFILE='PFN:/data/lhcb/users/hunt/ALLSTREAMS.DST/Sim08-Reco14-Stripping20Flagged/11676001/00025040_00000118_1.allstreams.dst' TYP='POOL_ROOTTREE' OPT='READ'"
##]
|
##Problem Name : Leetcode Week 2 problem
## Problem Name: Valid Perfect Square
##Time 32 ms
class Solution:
def isPerfectSquare(self, num: int) -> bool:
i = 1
the_sum = 0
while the_sum < num:
the_sum += i
if the_sum == num:
return True
i += 2
return False
##Time 16 MS
##class Solution:
## def isPerfectSquare(self, num: int) -> bool:
## l = 1
## r = num
## while(r >= l):
## mid = int((l + r) / 2)
## if mid ** 2 == num:
## return True
## elif mid ** 2 > num:
## r = mid - 1
## else:
## l = mid + 1
## return False
|
import pandas as pd
import numpy as np
import json
import os
import re
import copy
import openpyxl as xl
import xlsxwriter
from paper_functions import integrated_paper_file_generator
from Patent_Functions import integrated_patent_file_generator
from typing import List, Dict, Optional, Union, Tuple
from pandas.api.types import is_numeric_dtype
from FileProcessor import *
def convert_text_to_dictionary(text_file):
code_to_codeName_dictionary = {}
while True:
line = text_file.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
split_line = line.split(" ")
code = split_line[0]
if len(split_line) == 2: # CODE_NAME에 공백이 없는 경우
code_name = split_line[1]
else: # CODE_NAME에 공백이 포함된 경우
code_name = ""
for i in range(1, len(split_line)):
code_name += split_line[i]
code_to_codeName_dictionary[code] = code_name
text_file.close()
return code_to_codeName_dictionary
def generate_stat_file(settings):
text_file = open("CODE별 CODE_NAME.txt", "r", encoding='UTF-8')
dict_info = convert_text_to_dictionary(text_file)
if settings["result_type"] == "PAPER":
paper = integrated_paper_file_generator(settings, dict_info)
paper.generate_excel_file()
else:
patent = integrated_patent_file_generator(settings, dict_info)
patent.generate_excel_file() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.