text stringlengths 38 1.54M |
|---|
# -*- coding:utf8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
from tutorial.items import QiuShiItem
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem
from tutorial.models import db_connect, create_qiushi_table, insert_into_db
class TutorialPipeline(object):
def process_item(self, item, spider):
return item
# 百科附件图片处理管道
class QiuShiThumbPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for thumb in item['thumb']:
yield scrapy.Request('http:' + thumb)
def item_completed(self, results, item, info):
image_paths = [x['path'] for status, x in results if status]
if not image_paths:
item['thumb'] = ''
item['thumb'] = image_paths
return item
# 百科用户头像处理管道
class QiuShiHeaderPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for header in item['header']:
yield scrapy.Request('http:' + header)
def item_completed(self, results, item, info):
image_paths = [x['path'] for status, x in results if status]
if not image_paths:
item['header'] = ''
item['header'] = image_paths
return item
# 百科内容存储在数据库与文件中管道
class QiuShiPipeline(object):
def __init__(self):
self.f = open("QiuShiBaike.txt", 'w+')
self.connect = db_connect()
cursor = self.connect.cursor()
create_qiushi_table(cursor)
def process_item(self, item, spider):
# 将获取的内容写入文件
header = ""
if type(item['header']) == list and len(item['header']) > 0 :
header = item['header'][0]
thumb = ""
if type(item['thumb']) == list and len(item['thumb']) > 0:
thumb = item['thumb'][0]
author = ""
if item['author']:
author = item['author']
content = ""
if item['content']:
content = "<br />".join([x.strip() for x in item['content'] if x.strip()])
self.f.write((author + "\r\n" + header + "\r\n" + content + "\r\n" + thumb + "\r\n" + item['created_at'] + "\r\n").encode('UTF-8'))
self.f.write("="*30+"\r\n")
# 保存数据库数据
datas = {
"header": header,
"author": author,
"content": content,
"created_at": item['created_at'],
"thumb": thumb
}
try:
cursor = self.connect.cursor()
insert_into_db(cursor, datas)
self.connect.commit()
except:
self.connect.rollback()
raise
finally:
cursor.close()
return item
|
import pymysql
# 打开数据库
try:
db = pymysql.connect(host="localhost",user="root",password="root",database="spider_1",charset='utf8')
except:
print("数据库连接失败") |
from .graph import Graph
from PIL import Image
import pydot
import tempfile
def display_graph(graph, graph_name=None):
"""
Generate graph image by using pydot and Graphviz
Display graph image by using PIL (Python Image Library)
"""
graph_type = "digraph" if graph.is_directed() else "graph"
pydot_graph = pydot.Dot(graph_type=graph_type)
if graph_name:
pydot_graph.set_label(graph_name)
# draw vertices
for vertex in graph.get_vertices().values():
node = pydot.Node(vertex.get_label())
node.set_style("filled")
node.set_fillcolor("#a1eacd")
pydot_graph.add_node(node)
# draw edges
for edge in graph.get_edges():
start_vertex_label = edge.get_start_vertex().get_label()
end_vertex_label = edge.get_end_vertex().get_label()
weight = str(edge.get_weight())
pydot_edge = pydot.Edge(start_vertex_label, end_vertex_label)
pydot_edge.set_label(weight)
pydot_graph.add_edge(pydot_edge)
temp = tempfile.NamedTemporaryFile()
pydot_graph.write_png(temp.name)
image = Image.open(temp.name)
temp.close()
image.show()
|
db.define_table('images',
Field('represent', type='string', length=100, required=True),
Field('file_name', type='string', length=100, required=True)) |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018-05-07 14:39:40
# Project: MTime
from pyspider.libs.base_handler import *
import re
class Handler(BaseHandler):
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'
}
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self): #从首页进入
self.crawl('http://theater.mtime.com/China_Fujian_Province_Xiamen/',callback=self.index_page,headers=self.headers,fetch_type='js')
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
if re.match("http://movie.mtime.com/\d+/$",each.attr.href): #选择符合格式的URL
self.crawl(each.attr.href,callback=self.detail_page,headers=self.headers,fetch_type='js')
@config(priority=2)
def detail_page(self, response):
title = response.doc('.clearfix > h1').text() #电影名称
publish_time = response.doc('.db_year > a').text() #上映时间
marketed = response.doc('.__r_c_ > b').text() #评分
ticket_num = response.doc('.only').text() #票房
markered_people = response.doc('#ratingCountRegion').text() #参与评分人数
wanted_people = response.doc('#attitudeCountRegion').text() #想要看人数
message_details = response.doc('.info_l > dd > a').text() #电影的信息
message = response.doc('.lh18').text() #电影简介
main_actor = response.doc('.main_actor p > a').text() #主演人员
return {
"title": title,
"publish_time": publish_time,
"marketed":marketed,
"ticket_num":ticket_num,
"markered_people":markered_people,
"wanted_people":wanted_people,
"message_details":message_details,
"message":message,
"main_actor":main_actor,
} |
import matplotlib.pyplot as pyplt
import numpy as np
import math
import sys
from scipy.signal import argrelmax, argrelmin
#this library contains different CFD methods that may be used for on-the-fly processing
###########################################
#Ave CFD section
###########################################
def aveGattonCFD_main(dataIn_Amplitude):
CFD_TH = 10. # the noise is scaled to approximately 5. This sets the threshold to 10x the noise level.
CFD_offset = 5
conv_len = 3
wf_amp = dataIn_Amplitude
#these look like threshold calculations
wf_amp_avg = np.sum(wf_amp) / wf_amp.shape[0]
wf_amp_sigma = .5 * np.sqrt(np.sum((wf_amp - wf_amp_avg) ** 2) / wf_amp.shape[0])
wf_amp = (wf_amp - wf_amp_avg) / wf_amp_sigma
#this loop is executed once. not apparent why it is in a loop statement
for _ in np.arange(1):
#interesting choice of convolution filter
wf_amp = np.convolve(wf_amp, np.concatenate((np.ones(conv_len), np.zeros(conv_len))) / conv_len,
mode='same')
#some sort of thresholding
wf_histo, wf_bins = np.histogram(wf_amp, bins=100)
wf_log_histo = np.log(wf_histo)
good_value_cut = np.logical_not(np.isinf(wf_log_histo))
coeff = np.polynomial.polynomial.polyfit(x=wf_bins[:-1][good_value_cut], y=wf_log_histo[good_value_cut], deg=5)
sigma = np.sqrt(-1. / coeff[2])
CFD_TH = 3.0 * sigma
boolean_threshold = np.logical_or(wf_amp > CFD_TH, wf_amp < -CFD_TH)
boolean_zero_padding = np.logical_and(np.append(np.logical_not(boolean_threshold), np.zeros(CFD_offset)),
np.append(np.zeros(CFD_offset), boolean_threshold))[0:-CFD_offset]
wf_zeroed = wf_amp.copy()
wf_zeroed[boolean_zero_padding] = 0
#i think boolean_select is True at indices that contain a found hit?
boolean_select = np.logical_or(boolean_threshold, boolean_zero_padding)
th_wave = wf_zeroed[boolean_select]
indexList = np.arange(1, (len(wf_amp) + 1))
th_time_wave = indexList[boolean_select]
#some sort of traditional CFD method
CFD_input = np.append(th_wave, np.zeros(CFD_offset))
CFD_shift_scale = np.append(np.zeros(CFD_offset), th_wave)
CFD_wave = CFD_input - CFD_shift_scale
CFD_wave_pos = np.where(CFD_wave > 0, True, False)
CFD_wave_neg = np.where(CFD_wave <= 0, True, False)
#zero_points is a zero crossing within a local subset?
zero_points = np.logical_and(CFD_wave_pos[:-1], CFD_wave_neg[1:])
time_locs = th_time_wave[:-1][zero_points[5:]]
return dataIn_Amplitude, time_locs
##################################################
#Andrei CFD section
##################################################
def andreiKamalovCFD_main(dataIn):
#initialize 'hitIndices', which will contain the indices of any hits found in the trace supplied as 'dataIn_Amplitude'
hitIndices = []
#subtract a mean offset
dataIn_Centered = dataIn - np.mean(dataIn)
#calculate the variance of the trace
sigma = np.std(dataIn_Centered)
#calculate an upper threshold above which to look for peaks in the raw trace
threshold = 4*sigma
#return the indices for which the raw data exceeds the threshold.
dataIn_AboveThreshold_Indices = np.flatnonzero(dataIn_Centered > threshold)
#if it's likely that there are zero hits in this trace, there's no need to perform the remainder of the CFD processing.
if(len(dataIn_AboveThreshold_Indices) == 0):
#create an empty array of found hits
#NOT IMPLEMENTED YET BUT SHOULD BE
return dataIn_Centered, hitIndices
#convolve the raw data with a triangular filter
convFilterLength = 35#this must be an odd value
convolvedData = convoluteByTriangle(dataIn_Centered, convFilterLength)
#add up an inverse and an offset. this is the type of approach an electronic CFD performs.
lengthTrace = len(convolvedData)
CFDOffset = 20
inverseMultiplier = -0.75
offsetTrace = convolvedData[0:(lengthTrace - CFDOffset)]
inverseTrace = inverseMultiplier * convolvedData[CFDOffset:lengthTrace]
#traditional CFD adds a time-offset copy of the trace with an inverser copy of original trace.
comparedTrace = offsetTrace + inverseTrace
#shift the region with zero-point crossing to be more centered on the zero cross. The initial array is found based on being above some amount of standard deviations
indicesShift = round(CFDOffset * (1 + inverseMultiplier))
dataIn_AboveThreshold_Indices -= indicesShift
#call a method which will take the array of indices, and separate that one array into a set of arrays, wherein each array is a continuous set of integers.
tupleOfRegionIndicesArrays = separateArrayIntoTupleOfContinuousArrays(dataIn_AboveThreshold_Indices)
#findZeroCrossings for each array of continuous integers
for ind in range(len(tupleOfRegionIndicesArrays)):
seriesToProcess = tupleOfRegionIndicesArrays[ind]
#method 'findZeroCrossings' inspects a series to validate it. if it's a good zero-crossing, it returns: True, indexOfCrossing. if it's a bad series, the return is 'False, 0'
validSeriesFlag, hitIndex = findZeroCrossings(seriesToProcess, comparedTrace)
#append good hits to the array 'hitIndices'
if(validSeriesFlag):
hitIndices.append(hitIndex)
#there are now a set of found hitIndices. but these are in respect to the processed comparedTrace. need to un-shift the indices to represent hits for the actual trace (dataIn_Centered)
hitIndices = [x + indicesShift for x in hitIndices]
#control whether to do diagnostic plots or not
if(False):
halfSpan = 200
for ind in range(len(hitIndices)):
#diagnostic plots
lowBound = hitIndices[ind].item() - halfSpan
highBound = hitIndices[ind].item() + halfSpan
pyplt.plot(range(lowBound, highBound), convolvedData[lowBound:highBound])
if (len(hitIndices) > 0):
pyplt.scatter(hitIndices[ind].item(), convolvedData[hitIndices[ind].item()])
pyplt.show()
pyplt.plot(range(lowBound, highBound), dataIn_Centered[lowBound:highBound])
if (len(hitIndices) > 0):
pyplt.scatter(hitIndices[ind].item(), dataIn_Centered[hitIndices[ind].item()])
pyplt.show()
return dataIn_Centered, hitIndices
#CFD that finds hits based on statistical analysis of the raw trace. The main idea is to look at the stastitical probability of each trace having it's magnitude. Look for clusters that are above some threshold, and consider those clusters as unique. Then look through the cluster to see if theres multiple separate hits within it.
zScoreUnversal = 6
def andreiKamalovCFD_statistical(dataIn, noiseRegionLimitLow=0, noiseRegionLimitHigh=1000):
hitIndices = []
hitLimitsHigh = []
convPeakMax = []
#normalize the trace to be positive, and have max value of +1
normedTrace = normalizeTrace(dataIn)
# dataInNormalize = normedTrace
dataInNormalize = np.diff(normedTrace)
#use the suggested noise region to establish some understanding of the trace and it's signal/noise ratio
stdDev = np.std(dataInNormalize[noiseRegionLimitLow:noiseRegionLimitHigh])
#convert trace to a series of z-scores
zScoresArray = dataInNormalize/stdDev
#convolve zScoresArray across some length
minimumWidthOfHit = 9
convolved_zScores = np.convolve(zScoresArray, np.ones(minimumWidthOfHit), 'same')
#use convolved z-scores array to look for local maxima
findMaxIndices = argrelmax(convolved_zScores)
findMaxIndices = findMaxIndices[0]#unwrap the output of argrelmax
firstRealPeakZScore = 0
firstPeakEstablished = False
#look through each local maxima
for localMax in findMaxIndices:
combined_zScoreHere = convolved_zScores[localMax]
#check whether the current local maxima meets criteria for certainty that a peak is found
if combined_zScoreHere/math.sqrt(minimumWidthOfHit) > zScoreUnversal:
#check if localMax is already accounted for in between a previously located low and high limit pair
if not checkIfAlreadyConsidered(localMax, hitIndices, hitLimitsHigh):
if not firstPeakEstablished:
firstRealPeakZScore = combined_zScoreHere
firstPeakEstablished = True
if combined_zScoreHere > 0.2*firstRealPeakZScore:
#current maxima believed to be a legitimate peak. Process it to isolate the domain of the peak.
peakLimitLow, peakLimitHigh = isolatePeakBy_zScores(zScoresArray, localMax, minimumWidthOfHit)
#check if this is possible ringing - basic check is to see if the start is negative valued and if the positive value is not that much larger in terms of magnitude
if (normedTrace[peakLimitLow] < 0) and (np.absolute(normedTrace[peakLimitHigh]) <= 3.5*np.absolute(normedTrace[peakLimitLow])):
#this could be a ringing peak, better to drop it
pass
else:
hitIndices.append(peakLimitLow)
hitLimitsHigh.append(peakLimitHigh)
convPeakMax.append(localMax)
# #go backwards from end to start, and eliminate peaks that begin too soon after the end of the previous peak. This helps eliminate ripples that the algorithm claims are separate peaks, but are actually part of the previous peak, just separated by a short burst of low z-score
# for i in range(len(hitIndices)-1, 0, -1):
# if ((hitIndices[i] - hitLimitsHigh[i-1]) < 7):
# #hit is too close to previous hit. remove it from the list
# hitIndices.pop(i)
# hitLimitsHigh.pop(i)
# convPeakMax.pop(i)
#convert hitIndices list into an array
hitIndices = np.asarray(hitIndices)
hitLimitsHigh = np.asarray(hitLimitsHigh)
convPeakMax = np.asarray(convPeakMax)
return dataIn, hitIndices, hitLimitsHigh, convPeakMax
#quick hack to apply CFD to MCP direct output. This is a quick hack to use the normal CFD but negate the data to help it find the hits associated with the MCP readout.
def andreiKamalovCFD_MCPHack(dataIn):
dataInNegated = -1 * dataIn
dataOut_Centered, hitIndices = andreiKamalovCFD_main(dataInNegated)
return dataOut_Centered, hitIndices
#####################################################################################
#support methods for andrei's CFD
#convolute the array 'signalIn' by a triangular waveform, with true width (binWidth + 2). the two extra bits are for the 'zero' value of the triangles convolution. the max height occurs at the central bin. think of the convolution filter as a sawtooth.
def convoluteByTriangle(signalIn, binWidth):
#convoluteByTriangle requires an odd value for 'binWidth' to work.
#construct triangular convolution pattern
multipliers = np.zeros(binWidth)
numTriangleSide = ((binWidth+1)//2)#this is the number of non-zero points associated with the triangular pattern of the conv filter
#run a for loop to populate the convolution filter
for ind in range(0, numTriangleSide):
#populate the leading side of triangular filter
multipliers[ind] = (ind+1)/numTriangleSide
#populate the falling side of the triangular filter
multipliers[binWidth - 1 - ind] = (ind+1)/numTriangleSide
normFactor = np.sum(multipliers)
multipliers = multipliers/normFactor
lengthData = len(signalIn)
#apply the convolution filter
#convolution produces a list of the same length + 2*offsets
offsets = (binWidth - 1)//2
convolvedData = np.zeros(lengthData + (binWidth - 1))#populate a list of zeroes
for ind in range(0, binWidth):
#apply convolution
convolvedData[ind:(lengthData+ind)] += multipliers[ind] * signalIn
#return the subset of the convolved data that represents the correct data length
return convolvedData[offsets:(lengthData + offsets)]
#this method is designed to take an array of integers, some of which are continuous, and separate it into a set of arrays wherein each array is a continuous set of integers. these individual arrays are placed into a tuple that is then returned.
def separateArrayIntoTupleOfContinuousArrays(dataIn_AboveThreshold_Indices):
#setup the 'first' currentList and the tuple that will be populated
currentList = []
tupleOfLists = ()
#handle the odd case that there is exactly 1 index found. This is a rarity, but it needs to be handled to avoid error
if len(dataIn_AboveThreshold_Indices) == 1:
currentList += dataIn_AboveThreshold_Indices[0]
tupleOfLists += (currentList,)
#the cases which matter are the ones that have more than one element, and are handled in the else statement
else:
for ind in range(0, len(dataIn_AboveThreshold_Indices) - 1):
#add the current index to the current list
currentList.append(dataIn_AboveThreshold_Indices[ind])
#inspect whether the next element in the list of indices is the start of a new continuous set. if it is, close out this list
if (dataIn_AboveThreshold_Indices[ind + 1] - dataIn_AboveThreshold_Indices[ind]) != 1:
#the next index is a the start of a new continuous set
tupleOfLists += (currentList,)
#clear the currentList, so that the next value considered will be the first value in a new array
currentList = []
#process the final index in the array, and close out the current list since the list of indices is complete
currentList.append(dataIn_AboveThreshold_Indices[-1])
tupleOfLists += (currentList,)
return tupleOfLists
#method findZeroCrossings inspects the index series in seriesToProcess, and verifies that the associated y-values in comparedTrace are an appropriate rising edge. if it's a good series, return true and the zero crossing index. if false, return False and 0
def findZeroCrossings(seriesToProcess, comparedTrace):
numIndices = len(seriesToProcess)
if numIndices <= 1:
#series of length 1 won't have a proper zero crossing and are therefore, not valid zero crossings
return False, 0
else:
#the ideal zero crossing series starts negative and trends positive. it is good to filter series for validity by verifying this.
seriesLowest = seriesToProcess[0]
seriesHighest = seriesToProcess[-1]
#verify that series crossing isn't too close to either start or end of the trace. If it is too near to either, can't test whether zero crossing is valid.
#Note that the condition checks look at seriesLowest - 1 and seriesHighest + 1. This is because the way the while loops go through below, the loop can cause either indLow or indHigh to go out of bounds of comparedTrace, and then require a call to comparedTrace with an invalid index on the next boolean condition check.
if (seriesLowest - 1) < 0 or seriesHighest < 0:
#verify that the seriesToProcess does not include negative integers - that is, that it is not too close to the start of trace to pass the test
#if it is, return that the series is not valid
return False, 0
elif (seriesHighest + 1) >= len(comparedTrace) or seriesLowest >= len(comparedTrace):
#verify that the seriesToProcess is not too close to the end of the trace - that is, verify it isn't at the cutoff edge of the time axis.
#if it is, return that the series is not valid
return False, 0
#inspect where the series stops being negative
indLow = seriesLowest
while (comparedTrace[indLow] < 0) and (indLow <= seriesHighest):
indLow += 1
#inspect where the series stops being positive if coming in from the positive side
indHigh = seriesHighest
while (comparedTrace[indHigh] > 0) and (indHigh >= seriesLowest):
indHigh -= 1
#if indLow and indHigh are adjacent to each other, then the series passed in was a monotonically positive zero-crossing.
if ((indHigh + 1) == indLow): #the way the while loops are broken out of, it's a valid series if indLow is one value higher than indHigh
#return true, and the index of the first positive value after the crossing.
return True, indHigh
else:
#this was not a valid series
return False, 0
#function to normalize a trace to be positive, such that the max of the trace is 1.
def normalizeTrace(dataIn):
#ensure that the dataIn value is normalized to zero. Do this by finding a median value of the trace
dataInNorm = dataIn - np.median(dataIn)
#normalize the data such that the highest point has absolute value of 1. First, find the maximal value but also figure out if peak is upwards or downwards going
maximalValueAbs = np.absolute(np.max(dataInNorm))
minimalValueAbs = np.absolute(np.min(dataInNorm))
if(maximalValueAbs > minimalValueAbs):
#the peak is positive going. normalize with positive value
dataInNorm = dataInNorm/maximalValueAbs
else:
#the peak is negative going. normalize with negative value
dataInNorm = -1*dataInNorm/minimalValueAbs
return dataInNorm
#a suspected peak was found based on combined z-scores. find a good length for it based on the middle.
def isolatePeakBy_zScores(zScoresArray, localMaxIndex, minimumWidthOfHit):
#I should use indices (localMaxIndex-minimumWidthOfHit/2):(localMaxIndex+minimumWidthOfHit/2). IF supplied width is odd, give preference to rising edge
streakBreakerCount = 3
thresholdScore = zScoreUnversal
#denote the starintg upper and lower boundaries for the peak. calling method provides some minimal width.
indexCutoffLow = localMaxIndex - int(np.ceil(minimumWidthOfHit/2))#set starting high cutoff as max index minus half the width
indexCutoffHigh = localMaxIndex + int(np.floor(minimumWidthOfHit/2)) - 1#set starting high cutoff as max index plus half the width
if indexCutoffHigh >= len(zScoresArray):
indexCutoffHigh = len(zScoresArray) - 1
#start by trying to find the later times for which the peak still seems statistically significant
current_zScoreSum = np.sum(zScoresArray[indexCutoffLow:(indexCutoffHigh + 1)])
currentLength = indexCutoffHigh - indexCutoffLow + 1
lastImproved_zScore = (current_zScoreSum/math.sqrt(currentLength))
lastImproved_zScoreSum = current_zScoreSum
currentStrikes = 0
#loop through later times, increasing the upper x-limit of the found peak until enough strikes have been taken
while currentStrikes < streakBreakerCount:
#find new variables associated with an incremented width of peak
if(indexCutoffHigh + 1 == len(zScoresArray)):
break
breakpoint()
else:
indexCutoffHigh += 1
currentLength += 1
current_zScoreSum = current_zScoreSum + zScoresArray[indexCutoffHigh]
new_zScore = (current_zScoreSum/math.sqrt(currentLength))
#see if adding this index helped the cumulative z-score or not
if new_zScore >= lastImproved_zScore:
#expanding to this index benefits the cumulative z-score, so thi series is likely part of the hit
lastImproved_zScore = new_zScore
lastImproved_zScoreSum = current_zScoreSum
#upper index has been increased, reset the strike count so method can continue to try expanding the peak length
currentStrikes = 0
else:
#expanding to this index hurts the cumulative z-score metric. Would be better to not include this index as part of the found hit
currentStrikes += 1
#the while loop has been exited, suggesting that the max permissible number of strikes has been reached while trying to expand the peak. This number of indices appended to the peak on the positive side failed to improve the cumulative z-score, and they should be removed from the final result
indexCutoffHigh -= streakBreakerCount
currentLength -= streakBreakerCount
#perform a similar process to expand the lower time limit of the found peak
currentStrikes = 0
current_zScoreSum = lastImproved_zScoreSum
#loop through earlier times, decreasing the lower x-limit of the found peak until strike limit has been reached
while currentStrikes < streakBreakerCount:
indexCutoffLow -= 1
currentLength += 1
current_zScoreSum = current_zScoreSum + zScoresArray[indexCutoffLow]
new_zScore = (current_zScoreSum/math.sqrt(currentLength))
#see if adding this index helped the cumulative z-score or not
if new_zScore >= lastImproved_zScore:
#expanding to this index benefits the cumulative z-score, so thi series is likely part of the hit
lastImproved_zScore = new_zScore
lastImproved_zScoreSum = current_zScoreSum
#upper index has been increased, reset the strike count so method can continue to try expanding the peak length
currentStrikes = 0
else:
#expanding to this index hurts the cumulative z-score metric. Would be better to not include this index as part of the found hit
currentStrikes += 1
#the while loop has completed, meaning the max permissible of invalid indices on the lower limit have been investigated across. Undo their effects here
indexCutoffLow += streakBreakerCount
currentLength -= streakBreakerCount
#the cutoff indices found thus far normally under-represent the width of the peak. This is because of how small the noise floor can be compared to a good peak. Can optionally continue to add tid-bit sections until the noise is confidently encountered.
#this double while loop below will scout out regions of incrementally smaller steps to check them for whether they may be added on or not.
lengthsToCheck = 3
keepAddingHighSide = True
addOnScoreThresh = 2
while lengthsToCheck > 0:
while keepAddingHighSide:
if(indexCutoffHigh + lengthsToCheck >= len(zScoresArray)):
break
zScoreSumAddOn = np.sum(zScoresArray[indexCutoffHigh:(indexCutoffHigh + lengthsToCheck)])
if (zScoreSumAddOn/math.sqrt(lengthsToCheck)) > addOnScoreThresh:
indexCutoffHigh += lengthsToCheck
currentLength += lengthsToCheck
else:
keepAddingHighSide = False
#reset the loop for the next, shorter checking iteration.
lengthsToCheck -= 1
keepAddingHighSide = True
#repeat the process for the lower cutoff index.
lengthsToCheck = 3
keepAddingLowSide = True
addOnScoreThresh = 2
while lengthsToCheck > 0:
while keepAddingLowSide:
zScoreSumAddOn = np.sum(zScoresArray[(indexCutoffLow-lengthsToCheck):indexCutoffLow])
if (zScoreSumAddOn/math.sqrt(lengthsToCheck)) > addOnScoreThresh:
indexCutoffLow -= lengthsToCheck
currentLength += lengthsToCheck
else:
keepAddingLowSide = False
#reset the loop for the next, shorter checking iteration.
lengthsToCheck -= 1
keepAddingLowSide = True
#return x-limits of the found hit
return indexCutoffLow, indexCutoffHigh
#method for checking whether a peak is in fact a peak and not probable ringing
def validatePeak(dataIn, peakIndex):
spanToIntegrateAcrossPerDirection = 30
#figure out limits across which to integrate to do the peak validation
indexIntegralLow = peakIndex - spanToIntegrateAcrossPerDirection
if indexIntegralLow < 0:
indexIntegralLow = 0
indexIntegralHigh = peakIndex + spanToIntegrateAcrossPerDirection
if indexIntegralHigh >= len(dataIn):
indexIntegralHigh = len(dataIn) - 1
integral = np.sum(dataIn[indexIntegralLow:indexIntegralHigh])
if integral >= 0:
return True
else:
return False
# #test whether there is a double peak that is about to be reported.
# def postFindValidate(dataIn, peakLimitLow, peakLimitHigh):
# convolvedTrace = np.convolve(dataIn[peakLimitLow:peakLimitHigh], np.ones(7), 'same')
# #test whether theres more than one peak, and handle the situation if so.
# foundPeaks = argrelmax(convolvedTrace)
# if len(foundPeaks[0]) == 1:
# return True
# else:
# return False
#This method will take a supplied range and inspect it to be either a single peak or a cluster of multiple peaks that never return to noise floor level. IF it is a multipeak, it goes ahead and separates the conglomerate by separating based on local minima.
def separateStructureIntoUniquePeaks(normalizedData, startOfStructure, endOfStructure):
#convolve the subset of the trace that is believed to be a multi-peak
convolvedSubTrace = np.convolve(normalizedData[startOfStructure:endOfStructure], np.ones(31), 'same')
#look for breaking points. minima probably serve as good as anything
peakMaxima = argrelmax(convolvedSubTrace) + startOfStructure
peakMaxima = peakMaxima[0]
foundMinima = argrelmin(convolvedSubTrace) + startOfStructure
foundMinima = foundMinima[0]
#the code expects there to be one more maxima than minima. if this is not the case, there are a number of potential causes, and it seems non-trivial to figure out what to do.
if peakMaxima.size == foundMinima.size + 1:
# may proceed
pass
else:
#something weird happening with max/min finders. cannot split a peak up into sub-components.
peakStarts = np.zeros(1, dtype=int)
peakStarts[0] = startOfStructure
peakEnds = np.zeros(1, dtype=int)
peakEnds[0] = endOfStructure
return peakStarts, peakEnds, peakStarts
# if peakMaxima.size > foundMinima.size:
# #this is fine, we should have 1 more maxima than minima
# pass
# else:
# if peakMaxima.size == foundMinima.size :
# #likely have to delete a minima peak. but first, make sure theyre not both zero
# if peakMaxima.size == 0:
# #they are both zero due to this and the previous if statements. something has gone wrong with the man/min finders, return original series as single peak
# #convert values from integer to array to be consistent with other output option.
# peakStarts = np.zeros(1, dtype=int)
# peakStarts[0] = startOfStructure
# peakEnds = np.zeros(1, dtype=int)
# peakEnds[0] = endOfStructure
# return peakStarts, peakEnds, peakStarts
# if foundMinima.size == 0 or peakMaxima.size == 0:
# breakpoint()
# #look for a faulty minima (minima that is between either startOfStructure and 1st maxima or last maxima and endOfStructure) and drop it
# if foundMinima[0] < peakMaxima[0]:
# #faulty minima is between start and 1st peak
# foundMinima = foundMinima[1:]
# elif foundMinima[-1] > peakMaxima[-1]:
# #faulty minima lies between final maxima and end
# foundMinima = foundMinima[0:-1]
# else:
# #not sure what went wrong, might be best to simply return initial conditions.
# peakStarts = np.zeros(1, dtype=int)
# peakStarts[0] = startOfStructure
# peakEnds = np.zeros(1, dtype=int)
# peakEnds[0] = endOfStructure
# return peakStarts, peakEnds, peakStarts
# # #error out and inform terminal
# # sys.exit("ERROR: There is a mismatch between number of found local minima and maxima. This means the code mut be further developed to figure out the origin of this.")
if(len(peakMaxima) == 1):
#there is only one peak here. no need to do the other possible processing
#convert values from integer to array to be consistent with other output option.
peakStarts = np.zeros(1, dtype=int)
peakStarts[0] = startOfStructure
peakEnds = np.zeros(1, dtype=int)
peakEnds[0] = endOfStructure
return peakStarts, peakEnds, peakMaxima
else:
#there are truly multiple peaks here, and it is important to separate the construct into its sub-peaks
#decide on number of sub-peaks to report back. Use this number to initialize start/end lists
numPeaks = foundMinima.size + 1
peakStarts = np.zeros(numPeaks, dtype=int)
peakEnds = np.zeros(numPeaks, dtype=int)
#break up the start/end of structures with the discovered minima
for i in range(numPeaks):
if i == 0:
#if first peak in series, the start is the overall structure start
peakStarts[i] = startOfStructure
peakEnds[i] = foundMinima[i] - 1#the subtract 1 is to not have overlap between what is considered end of a peak and start of a new one
elif i == (numPeaks - 1):
#if this is the final peak in the series, the end of the peak is the overall structure end
peakStarts[i] = foundMinima[i - 1]
peakEnds[i] = endOfStructure
else:
#use the found local minima as start/ends of the peaks in the multipeak structure
peakStarts[i] = foundMinima[i - 1]
peakEnds[i] = foundMinima[i] - 1#the subtract 1 is to not have overlap between what is considered end of a peak and start of a new one
#return the indices at which the peaks are claimed to start and end at.
return peakStarts, peakEnds, peakMaxima
#check if index localMax is already accounted for, by seeing if there's a pair of corresponding hitStarts and hitEnds that encompass localMax's index
def checkIfAlreadyConsidered(localMax, hitStarts, hitEnds):
#set the default return value. if a hit is found in previously accounted list, this flag is changed to true
flagFound = False
#figure out how many previous hits to check through
numPreviousHits = len(hitStarts)
#scan through each of the previous hits
for i in range(numPreviousHits):
#check if local max falls within the i'th previous hit
if (localMax > hitStarts[i]) and (localMax < hitEnds[i]):
#if it does, return True.
flagFound = True
return flagFound
return flagFound |
# Generated by Django 2.1b1 on 2019-01-04 12:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signup', '0007_auto_20190103_2016'),
]
operations = [
migrations.AddField(
model_name='profile',
name='profile_pic',
field=models.ImageField(blank=True, help_text='Select Your Profile Image', upload_to=''),
),
]
|
inventory = {'rope':1,'torch':6,'gold coin':42,'dagger':1,'arrow':12}
Alarm = {'rope':12,'gold coin':420,'arrow':12,'bottle':10}
def displayinventory(player):
liste = list(player)
print(liste)
total = 0
for i in range(len(player)):
print(str(player[liste[i]]) + ' ' + str(liste[i]))
total += player[liste[i]]
print('Total number of items: ' + str(total))
#displayinventory(inventory)
print()
#displayinventory(Alarm)
def displayInventory(player):
total = 0
for k, v in player.items():
print(str(v) + ' ' + str(k))
total += v
print('Total number of items: ' + str(total))
#displayInventory(inventory)
print()
#displayInventory(Alarm)
def addtoinventory(player,addeditems):
newInventory = {}
for k, v in player.items():
v += addeditems.get(k,0)
newInventory[k] = v
player = newInventory
print(player)
addtoinventory(inventory,Alarm) |
"""Google_Drive URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import re_path
from gdriveApp import views
from django.conf import settings
from django.views.static import serve
urlpatterns = [
path('admin/', admin.site.urls),
re_path(r'^$', views.index,name='Default Page'),
re_path(r'register/', views.register,name='Registeration Page'),
re_path(r'login/', views.login,name='Login Page'),
re_path(r'upload/', views.upload,name='Upload Page'),
re_path(r'logout/', views.log_out,name='Log Out'),
re_path(r'myUploads/', views.myUploads,name='myUploads'),
re_path(r'about/', views.aboutUs,name='AboutUs'),
]
if settings.DEBUG:
urlpatterns += [
re_path(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BookDistance'
db.create_table(u'books_bookdistance', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first_book', self.gf('django.db.models.fields.related.ForeignKey')(related_name='first_book', to=orm['books.Book'])),
('second_book', self.gf('django.db.models.fields.related.ForeignKey')(related_name='second_book', to=orm['books.Book'])),
('distance', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'books', ['BookDistance'])
def backwards(self, orm):
# Deleting model 'BookDistance'
db.delete_table(u'books_bookdistance')
models = {
u'books.author': {
'Meta': {'object_name': 'Author'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'books.book': {
'Meta': {'object_name': 'Book'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['books.Author']"}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['books.BookGenre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text_file': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
u'books.bookdistance': {
'Meta': {'object_name': 'BookDistance'},
'distance': ('django.db.models.fields.FloatField', [], {}),
'first_book': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'first_book'", 'to': u"orm['books.Book']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'second_book': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'second_book'", 'to': u"orm['books.Book']"})
},
u'books.bookgenre': {
'Meta': {'object_name': 'BookGenre'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['books'] |
import sys
import main
from PyQt5 import QtGui, QtCore, QtMultimedia
from PyQt5.QtWidgets import *
from PyQt5.QtCore import qDebug, QTimer, QUrl, QFile, QFileInfo, QDir
from PyQt5.QtGui import QColor, QPixmap, QScreen
from PyQt5.QtMultimedia import *
from PIL.ImageQt import ImageQt
from PIL import Image
app = QApplication(sys.argv)
class MessageBox(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setWindowTitle('Local watcher')
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
# media content
self.screen = app.primaryScreen()
self.mediaPLayer = QMediaPlayer()
url = QUrl.fromLocalFile(QFileInfo("D:\\1.mp3").absoluteFilePath())
qDebug(QDir.currentPath())
content = QMediaContent(url)
self.mediaPLayer.setMedia(content);
self.mediaPLayer.setVolume(50);
##
self.rect = {
'x': 424,
'y': 42,
'w': 88,
'h': 400
}
## Settings
self.initUi()
self.timer = QTimer()
self.timer.timeout.connect(self.onTimer)
self.timer.start(300)
def initUi(self):
formLo = QFormLayout()
labelAlarm = QLabel("Alarm?")
self.cb = QCheckBox(self)
formLo.addRow(labelAlarm, self.cb)
labelName = QLabel("Name")
self.inputName = QLineEdit(self)
formLo.addRow(labelName, self.inputName)
labelPin = QLabel("Pin")
self.toggleButton = QPushButton("Toggle", self)
self.toggleButton.setCheckable(True)
self.toggleButton.clicked[bool].connect(self.toggleSettings)
formLo.addRow(labelPin, self.toggleButton)
self.labelX = QLabel("X:")
self.sbX = QSpinBox(self)
self.sbX.setRange(0, 1600)
self.sbX.setValue(self.rect['x'])
self.sbX.valueChanged[int].connect(self.changeValue)
formLo.addRow(self.labelX, self.sbX)
self.labelW = QLabel("W:")
self.sbW = QSpinBox(self)
self.sbW.setRange(50, 1600)
self.sbW.setValue(self.rect['w'])
self.sbW.valueChanged[int].connect(self.changeValue)
formLo.addRow(self.labelW, self.sbW)
self.labelY = QLabel("Y:")
self.sbY = QSpinBox(self)
self.sbY.setRange(10, 900)
self.sbY.setValue(self.rect['y'])
self.sbY.valueChanged[int].connect(self.changeValue)
formLo.addRow(self.labelY, self.sbY)
self.labelH = QLabel("H:")
self.sbH = QSpinBox(self)
self.sbH.setRange(10, 900)
self.sbH.setValue(self.rect['h'])
self.sbH.valueChanged[int].connect(self.changeValue)
formLo.addRow(self.labelH, self.sbH)
mainlo = QVBoxLayout(self)
self.character = 'sublime'
self.labelName = QLabel(self)
self.labelName.setText(self.character)
self.labelImage = QLabel()
#self.labelImage.show()
self.hwnd = main.get_hwnd(self.character)
mainlo.addLayout(formLo)
mainlo.addWidget(self.labelName)
mainlo.addWidget(self.labelImage)
self.setLayout(mainlo)
def changeValue(self):
self.rect['x'] = self.sbX.value()
self.rect['w'] = self.sbW.value()
self.rect['y'] = self.sbY.value()
self.rect['h'] = self.sbH.value()
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def toggleSettings(self, isVisible):
flags = self.windowFlags();
self.sbX.setVisible(not isVisible)
self.sbY.setVisible(not isVisible)
self.sbW.setVisible(not isVisible)
self.sbH.setVisible(not isVisible)
self.labelX.setVisible(not isVisible)
self.labelY.setVisible(not isVisible)
self.labelW.setVisible(not isVisible)
self.labelH.setVisible(not isVisible)
if isVisible:
qDebug("Visible")
self.setWindowFlags(flags | QtCore.Qt.FramelessWindowHint);
self.show();
else:
qDebug("Invisible")
self.setWindowFlags(flags ^ QtCore.Qt.FramelessWindowHint);
self.show()
def onTimer(self):
pixmap = self.screen.grabWindow(self.hwnd,
self.rect['x'],
self.rect['y'],
self.rect['w'],
self.rect['h'])
if self.cb.isChecked():
img = pixmap.toImage()
gridY = [y for y in range(img.height()) if y % 5 == 0]
x = 5
targetColor = QtGui.QColor(255,0,0)
minDelta = 99999999
for i in gridY:
deltaR = targetColor.red() - QColor(img.pixel(x, i)).red()
deltaG = targetColor.green() - QColor(img.pixel(x, i)).green()
deltaB = targetColor.blue() - QColor(img.pixel(x, i)).blue()
delta = abs(deltaR) + abs(deltaG) + abs(deltaB)
if delta < minDelta:
minDelta = delta
# qDebug(str(minDelta))
if minDelta < 200:
QtCore.qDebug("ALERT!!!")
self.mediaPLayer.play();
self.cb.setChecked(False)
# img.setPixel(x, i, targetColor.rgb())
# pixmap = QtGui.QPixmap.fromImage(img)
self.labelImage.setPixmap(pixmap)
qb = MessageBox()
qb.show()
sys.exit(app.exec_())
|
import os
import json
import asyncio
import aio_pika
import datetime
from dateutil.parser import parse
from pyawad.request import RouteRequest, FareRequest, RequestException
RABBIT_URL = 'amqp://data:passx@127.0.0.1:5672'
REQUEST_QUEUE_NAME = os.environ.get('AMPQ-QUEUE-REQUEST', 'route-request')
RESPONSE_QUEUE_NAME = os.environ.get('AMPQ-QUEUE-RESPONSE', 'route-response')
async def main(loop):
connection = await aio_pika.connect_robust(RABBIT_URL, loop=loop)
async with connection:
# Creating channel
channel = await connection.channel()
# Declaring queue
queue = await channel.declare_queue(
REQUEST_QUEUE_NAME,
durable=True,
auto_delete=False
)
await channel.declare_queue(
RESPONSE_QUEUE_NAME,
durable=True,
auto_delete=False
)
print('Connected to {0} ({1}).'.format(RABBIT_URL, REQUEST_QUEUE_NAME))
async with queue.iterator() as queue_iter:
async for message in queue_iter:
async with message.process():
query = json.loads(message.body)
task_response = {
'status': None,
'task_id': query['task_id'],
'result': {
'fares': [],
'routes': [],
},
}
print('Got a task: {city_from} ({date_from}) — {city_to} ({date_till})'.format(**query))
query_there = {
'date': parse(query.get('date_from')),
'departure': query.get('city_from'),
'arrival': query.get('city_to'),
}
query_back = {
'date': parse(query.get('date_till')),
'departure': query.get('city_to'),
'arrival': query.get('city_from'),
}
# Get fares data and prepare response.
for query in [query_there, query_back]:
route = RouteRequest(**query)
await route.create()
print('Route created: {0.uid}'.format(route))
# Add route data to response.
task_response['result']['routes'].append(route.to_dict())
async for fare in route.find_fares():
task_response['result']['fares'].append(fare.to_dict())
result = json.dumps(task_response).encode('utf8')
task_response['status'] = 'ok'
# Publish results to response task.
await channel.default_exchange.publish(
aio_pika.Message(body=result),
routing_key=RESPONSE_QUEUE_NAME,
)
print('Result with {0} fares is sent'.format(len(task_response['result']['fares'])))
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
loop.close()
print('Connection closed.')
|
import platform
import os
dirPathLst = ['..', '..','InOut']
# dirPathLst = ['..', 'InOut']
type = '.txt'
outPrefix = '_out'
dirPath = ''.join(s + os.sep for s in dirPathLst);
dirPath = os.sep + dirPath
def getFullPathInput(fileName):
return os.path.dirname(__file__) + dirPath + fileName + type
def getFullPathOutput(fileName):
return os.path.dirname(__file__) + dirPath + fileName + outPrefix + type
def getFullPathOther(fileName):
return os.path.dirname(__file__) + os.sep + fileName |
from typing import List, Union
class Cell:
def __init__(self, x, y, cell_type: Union[int, str] = 0, food: int = 1):
'''cell_type: 0 или field для поля, 1 или wall для стены, по умолчанию 0\n
food: для еды 1, для энергии 10, по умолчанию 1, если клетка является полем'''
self.x, self.y = x, y # Координаты клетки
self.previous = None # Предыдущий оптимальный по пути узел графа.
self.cost = 58 # Цена узла графа. Необходима для расчёта
if isinstance(cell_type, str):
self.type = cell_type # field для ячейки, по которой можно ходить, и wall для ячейки, являющейся стеной.
else:
self.type = 'field' if cell_type == 0 else 'wall'
# Думаю, как реализовать наличие еды в клетке. Для self.type == 'wall' можно определять None.
self.has_food = (True if food == 1 else False) if self.type != 'wall' else None
# Думаю, как реализовать наличие энерджайзера в клетке. Для self.type == 'wall' можно определять None.
self.has_energy = (True if food == 10 else False) if self.type != 'wall' else None
def reset(self):
# Сброс характеристик клетки для построения следующего пути.
self.previous = None
self.cost = 58
def __repr__(self) -> str:
return ('0' if self.x < 10 else '') + str(self.x) + '-' + ('0' if self.y < 10 else '') + str(self.y)
def __str__(self) -> str:
return '-' if self.type == 'wall' else ('E' if self.has_energy else ('F' if self.has_food else '0'))
r = range
nodes_matrix: List[List[Cell]] = [
# ----------------------------
[Cell(x, 0, 1) for x in r(28)],
# -FFFFFFFFFFFF--FFFFFFFFFFFF-
[Cell(0, 1, 1), *(Cell(x, 1) for x in r(1, 13)), Cell(13, 1, 1),
Cell(14, 1, 1), *(Cell(x, 1) for x in r(15, 27)), Cell(27, 1, 1)],
# -F----F-----F--F-----F----F-
[Cell(0, 2, 1), Cell(1, 2), *(Cell(x, 2, 1) for x in r(2, 6)), Cell(6, 2), *(Cell(x, 2, 1) for x in r(7, 12)),
Cell(12, 2), Cell(13, 2, 1), Cell(14, 2, 1), Cell(15, 2), *(Cell(x, 2, 1) for x in r(16, 21)), Cell(21, 2),
*(Cell(x, 2, 1) for x in r(22, 26)), Cell(26, 2), Cell(27, 2, 1)],
# -E----F-----F--F-----F----E-
[Cell(0, 3, 1), Cell(1, 3, 0, 10), *(Cell(x, 3, 1) for x in r(2, 6)), Cell(6, 3), *(Cell(x, 3, 1) for x in r(7, 12)),
Cell(12, 3), Cell(13, 3, 1), Cell(14, 3, 1), Cell(15, 3), *(Cell(x, 3, 1) for x in r(16, 21)), Cell(21, 3),
*(Cell(x, 3, 1) for x in r(22, 26)), Cell(26, 3, 0, 10), Cell(27, 3, 1)],
# -F----F-----F--F-----F----F-
[Cell(0, 4, 1), Cell(1, 4), *(Cell(x, 4, 1) for x in r(2, 6)), Cell(6, 4), *(Cell(x, 4, 1) for x in r(7, 12)),
Cell(12, 4), Cell(13, 4, 1), Cell(14, 4, 1), Cell(15, 4), *(Cell(x, 4, 1) for x in r(16, 21)), Cell(21, 4),
*(Cell(x, 4, 1) for x in r(22, 26)), Cell(26, 4), Cell(27, 4, 1)],
# -FFFFFFFFFFFFFFFFFFFFFFFFFF-
[Cell(0, 5, 1), *(Cell(x, 5) for x in r(1, 27)), Cell(27, 5, 1)],
# -F----F--F--------F--F----F-
[Cell(0, 6, 1), Cell(1, 6), *(Cell(x, 6, 1) for x in r(2, 6)), Cell(6, 6), Cell(7, 6, 1), Cell(8, 6, 1),
Cell(9, 6), *(Cell(x, 6, 1) for x in r(10, 18)), Cell(18, 6), Cell(19, 6, 1), Cell(20, 6, 1), Cell(21, 6),
*(Cell(x, 6, 1) for x in r(22, 26)), Cell(26, 6), Cell(27, 6, 1)],
# -F----F--F--------F--F----F-
[Cell(0, 7, 1), Cell(1, 7), *(Cell(x, 7, 1) for x in r(2, 6)), Cell(6, 7), Cell(7, 7, 1), Cell(8, 7, 1),
Cell(9, 7), *(Cell(x, 7, 1) for x in r(10, 18)), Cell(18, 7), Cell(19, 7, 1), Cell(20, 7, 1), Cell(21, 7),
*(Cell(x, 7, 1) for x in r(22, 26)), Cell(26, 7), Cell(27, 7, 1)],
# -FFFFFF--FFFF--FFFF--FFFFFF-
[Cell(0, 8, 1), *(Cell(x, 8) for x in r(1, 7)), Cell(7, 8, 1), Cell(8, 8, 1), *(Cell(x, 8) for x in r(9, 13)),
Cell(13, 8, 1), Cell(14, 8, 1), *(Cell(x, 8) for x in r(15, 19)), Cell(19, 8, 1), Cell(20, 8, 1),
*(Cell(x, 8) for x in r(21, 27)), Cell(27, 8, 1)],
# ------F----- -- -----F------
[*(Cell(x, 9, 1) for x in r(0, 6)), Cell(6, 9), *(Cell(x, 9, 1) for x in r(7, 12)), Cell(12, 9, 0, 0),
Cell(13, 9, 1), Cell(14, 9, 1), Cell(15, 9, 0, 0), *(Cell(x, 9, 1) for x in r(16, 21)),
Cell(21, 9), *(Cell(x, 9, 1) for x in r(22, 28))],
# ------F----- -- -----F------
[*(Cell(x, 10, 1) for x in r(0, 6)), Cell(6, 10), *(Cell(x, 10, 1) for x in r(7, 12)), Cell(12, 10, 0, 0),
Cell(13, 10, 1), Cell(14, 10, 1), Cell(15, 10, 0, 0), *(Cell(x, 10, 1) for x in r(16, 21)), Cell(21, 10),
*(Cell(x, 10, 1) for x in r(22, 28))],
# ------F-- --F------
[*(Cell(x, 11, 1) for x in r(0, 6)), Cell(6, 11), Cell(7, 11, 1), Cell(8, 11, 1),
*(Cell(x, 11, 0, 0) for x in r(9, 19)), Cell(19, 11, 1), Cell(20, 11, 1), Cell(21, 11),
*(Cell(x, 11, 1) for x in r(22, 28))],
# ------F-- --- --- --F------
[*(Cell(x, 12, 1) for x in r(0, 6)), Cell(6, 12), Cell(7, 12, 1), Cell(8, 12, 1), Cell(9, 12, 0, 0),
*(Cell(x, 12, 1) for x in r(10, 13)), Cell(13, 12, 0, 0), Cell(14, 12, 0, 0), *(Cell(x, 12, 1) for x in r(15, 18)),
Cell(18, 12, 0, 0), Cell(19, 12, 1), Cell(20, 12, 1), Cell(21, 12), *(Cell(x, 12, 1) for x in r(22, 28))],
# ------F-- - - --F------
[*(Cell(x, 13, 1) for x in r(0, 6)), Cell(6, 13), Cell(7, 13, 1), Cell(8, 13, 1), Cell(9, 13, 0, 0), Cell(10, 13, 1),
*(Cell(x, 13, 0, 0) for x in r(11, 17)), Cell(17, 13, 1), Cell(18, 13, 0, 0), Cell(19, 13, 1), Cell(20, 13, 1),
Cell(21, 13), *(Cell(x, 13, 1) for x in r(22, 28))],
# F - - F
[*(Cell(x, 14, 0, 0) for x in r(0, 6)), Cell(6, 14), *(Cell(x, 14, 0, 0) for x in r(7, 10)), Cell(10, 14, 1),
*(Cell(x, 14, 0, 0) for x in r(11, 17)), Cell(17, 14, 1), *(Cell(x, 14, 0, 0) for x in r(18, 21)), Cell(21, 14),
*(Cell(x, 14, 0, 0) for x in r(22, 28)), Cell(28, 14, 0, 0)],
# ------F-- - - --F------
[*(Cell(x, 15, 1) for x in r(0, 6)), Cell(6, 15), Cell(7, 15, 1), Cell(8, 15, 1), Cell(9, 15, 0, 0), Cell(10, 15, 1),
*(Cell(x, 15, 0, 0) for x in r(11, 17)), Cell(17, 15, 1), Cell(18, 15, 0, 0), Cell(19, 15, 1), Cell(20, 15, 1),
Cell(21, 15), *(Cell(x, 15, 1) for x in r(22, 28))],
# ------F-- -------- --F------
[*(Cell(x, 16, 1) for x in r(0, 6)), Cell(6, 16), Cell(7, 16, 1), Cell(8, 16, 1), Cell(9, 16, 0, 0),
*(Cell(x, 16, 1) for x in r(10, 18)), Cell(18, 16, 0, 0), Cell(19, 16, 1), Cell(20, 16, 1), Cell(21, 16),
*(Cell(x, 16, 1) for x in r(22, 28))],
# ------F-- --F------
[*(Cell(x, 17, 1) for x in r(0, 6)), Cell(6, 17), Cell(7, 17, 1), Cell(8, 17, 1),
*(Cell(x, 17, 0, 0) for x in r(9, 19)), Cell(19, 17, 1), Cell(20, 17, 1), Cell(21, 17),
*(Cell(x, 17, 1) for x in r(22, 28))],
# ------F-- -------- --F----F-
[*(Cell(x, 18, 1) for x in r(0, 6)), Cell(6, 18), Cell(7, 18, 1), Cell(8, 18, 1), Cell(9, 18, 0, 0),
*(Cell(x, 18, 1) for x in r(10, 18)), Cell(18, 18, 0, 0), Cell(19, 18, 1), Cell(20, 18, 1), Cell(21, 18),
*(Cell(x, 18, 1) for x in r(22, 28))],
# ------F-- -------- --F----F-
[*(Cell(x, 19, 1) for x in r(0, 6)), Cell(6, 19), Cell(7, 19, 1), Cell(8, 19, 1), Cell(9, 19, 0, 0),
*(Cell(x, 19, 1) for x in r(10, 18)), Cell(18, 19, 0, 0), Cell(19, 19, 1), Cell(20, 19, 1), Cell(21, 19),
*(Cell(x, 19, 1) for x in r(22, 28))],
# -FFFFFFFFFFFF--FFFFFFFFFFFF-
[Cell(0, 20, 1), *(Cell(x, 20) for x in r(1, 13)), Cell(13, 20, 1),
Cell(14, 20, 1), *(Cell(x, 20) for x in r(15, 27)), Cell(27, 20, 1)],
# -F----F-----F--F-----F----F-
[Cell(0, 21, 1), Cell(1, 21), *(Cell(x, 21, 1) for x in r(2, 6)), Cell(6, 21), *(Cell(x, 21, 1) for x in r(7, 12)),
Cell(12, 21), Cell(13, 21, 1), Cell(14, 21, 1), Cell(15, 21), *(Cell(x, 21, 1) for x in r(16, 21)), Cell(21, 21),
*(Cell(x, 21, 1) for x in r(22, 26)), Cell(26, 21), Cell(27, 21, 1)],
# -F----F-----F--F-----F----F-
[Cell(0, 22, 1), Cell(1, 22), *(Cell(x, 22, 1) for x in r(2, 6)), Cell(6, 22), *(Cell(x, 22, 1) for x in r(7, 12)),
Cell(12, 22), Cell(13, 22, 1), Cell(14, 22, 1), Cell(15, 22), *(Cell(x, 22, 1) for x in r(16, 21)),
Cell(21, 22), *(Cell(x, 22, 1) for x in r(22, 26)), Cell(26, 22), Cell(27, 22, 1)],
# -EFF--FFFFFFF00FFFFFFF--FFE-
[Cell(0, 23, 1), Cell(1, 23, 0, 10), Cell(2, 23), Cell(3, 23), Cell(4, 23, 1), Cell(5, 23, 1),
*(Cell(x, 23) for x in r(6, 13)), Cell(13, 23, 0, 0), Cell(14, 23, 0, 0), *(Cell(x, 23) for x in r(15, 22)),
Cell(22, 23, 1), Cell(23, 23, 1), Cell(24, 23), Cell(25, 23), Cell(26, 23, 0, 10), Cell(27, 23, 1)],
# ---F--F--F--------F--F--F---
[*(Cell(x, 24, 1) for x in r(0, 3)), Cell(3, 24), Cell(4, 24, 1), Cell(5, 24, 1), Cell(6, 24), Cell(7, 24, 1),
Cell(8, 24, 1), Cell(9, 24), *(Cell(x, 24, 1) for x in r(10, 18)), Cell(18, 24), Cell(19, 24, 1), Cell(20, 24, 1),
Cell(21, 24), Cell(22, 24, 1), Cell(23, 24, 1), Cell(24, 24), *(Cell(x, 24, 1) for x in r(25, 28))],
# ---F--F--F--------F--F--F---
[*(Cell(x, 25, 1) for x in r(0, 3)), Cell(3, 25), Cell(4, 25, 1), Cell(5, 25, 1), Cell(6, 25), Cell(7, 25, 1),
Cell(8, 25, 1), Cell(9, 25), *(Cell(x, 25, 1) for x in r(10, 18)), Cell(18, 25), Cell(19, 25, 1), Cell(20, 25, 1),
Cell(21, 25), Cell(22, 25, 1), Cell(23, 25, 1), Cell(24, 25), *(Cell(x, 25, 1) for x in r(25, 28))],
# -FFFFFF--FFFF--FFFF--FFFFFF-
[Cell(0, 26, 1), *(Cell(x, 26) for x in r(1, 7)), Cell(7, 26, 1), Cell(8, 26, 1), *(Cell(x, 26) for x in r(9, 13)),
Cell(13, 26, 1), Cell(14, 26, 1), *(Cell(x, 26) for x in r(15, 19)), Cell(19, 26, 1), Cell(20, 26, 1),
*(Cell(x, 26) for x in r(21, 27)), Cell(27, 26, 1)],
# -F----------F--F----------F-
[Cell(0, 27, 1), Cell(1, 27), *(Cell(x, 27, 1) for x in r(2, 12)), Cell(12, 27), Cell(13, 27, 1),
Cell(14, 27, 1), Cell(15, 27), *(Cell(x, 27, 1) for x in r(16, 26)), Cell(26, 27), Cell(27, 27, 1)],
# -F----------F--F----------F-
[Cell(0, 28, 1), Cell(1, 28), *(Cell(x, 28, 1) for x in r(2, 12)), Cell(12, 28), Cell(13, 28, 1),
Cell(14, 28, 1), Cell(15, 28), *(Cell(x, 28, 1) for x in r(16, 26)), Cell(26, 28), Cell(27, 28, 1)],
# -FFFFFFFFFFFFFFFFFFFFFFFFFF-
[Cell(0, 29, 1), *(Cell(x, 29) for x in r(1, 27)), Cell(27, 29, 1)],
# ----------------------------
[Cell(x, 30, 1) for x in r(28)]
]
if __name__ == '__main__':
print(*[''.join([str(el) for el in l])for l in nodes_matrix], sep='\n')
|
# #自定义函数体
# def my_abs(x):
# if not isinstance(x, (int, float)):
# raise TypeError('bad operand type')
# if x >= 0:
# return x
# else:
# return -x
channels_release = ("YYBA"
, "YYBM"
, "HUAWEIM"
, "JLGLWBM"
, "YYBM", "MIM", "BDM", "360M", "WDJM", "UCAPPM", "MXM", "ANZHUOM", "91M"
, "ANZHIM", "LENOVOM", "OPPOM"
, "SGPM", "YYHM", "YOUYIM", "YIYONGHUI", "LIQUM"
, "YOUYUEM", "VIVOM"
, "LEM"
, "CHUIZIM"
)
for x in channels_release:
print(x)
|
from base import JiraBaseAction
class JiraRemoveVote(JiraBaseAction):
def _run(self, issue):
return self.jira.remove_vote(issue)
|
'''
5. Дан список чисел. Определите, сколько в этом списке элементов, которые
больше двух своих соседей, и выведите количество таких элементов. Крайние
элементы списка никогда не учитываются, поскольку у них недостаточно соседей.
'''
from random import randint
m = int(input('Enter the number of list item: '))
a = [randint(1, 30) for _ in range(m)]
b = []
max = 0
print(a)
for i in range(1, m-2):
if a[i-1] < a[i] > a[i+1]:
max +=1
b.append(a[i])
print('the number of items that are larger than their neighbors =', max)
print('volume of item', b) |
#Embedded file name: ACEStream\Core\CacheDB\Notifier.pyo
import sys
import threading
from traceback import print_exc, print_stack
from ACEStream.Core.simpledefs import *
class Notifier:
SUBJECTS = [NTFY_PEERS,
NTFY_TORRENTS,
NTFY_PLAYLISTS,
NTFY_COMMENTS,
NTFY_PREFERENCES,
NTFY_MYPREFERENCES,
NTFY_ACTIVITIES,
NTFY_REACHABLE,
NTFY_CHANNELCAST,
NTFY_VOTECAST,
NTFY_RICH_METADATA,
NTFY_SUBTITLE_CONTENTS,
NTFY_DISPERSY]
__single = None
def __init__(self, pool = None):
if Notifier.__single:
raise RuntimeError, 'Notifier is singleton'
self.pool = pool
self.observers = []
self.observerLock = threading.Lock()
Notifier.__single = self
def getInstance(*args, **kw):
if Notifier.__single is None:
Notifier(*args, **kw)
return Notifier.__single
getInstance = staticmethod(getInstance)
def add_observer(self, func, subject, changeTypes = [NTFY_UPDATE, NTFY_INSERT, NTFY_DELETE], id = None):
obs = (func,
subject,
changeTypes,
id)
self.observerLock.acquire()
self.observers.append(obs)
self.observerLock.release()
def remove_observer(self, func):
self.observerLock.acquire()
i = 0
while i < len(self.observers):
ofunc = self.observers[i][0]
if ofunc == func:
del self.observers[i]
else:
i += 1
self.observerLock.release()
def notify(self, subject, changeType, obj_id, *args):
tasks = []
self.observerLock.acquire()
for ofunc, osubject, ochangeTypes, oid in self.observers:
try:
if subject == osubject and changeType in ochangeTypes and (oid is None or oid == obj_id):
tasks.append(ofunc)
except:
print_stack()
print_exc()
print >> sys.stderr, 'notify: OIDs were', `oid`, `obj_id`
self.observerLock.release()
args = [subject, changeType, obj_id] + list(args)
for task in tasks:
if self.pool:
self.pool.queueTask(task, args)
else:
task(*args)
|
def gcd(a,b):
if a==0:
return b;
return gcd(b%a,a);
def lcm(a,b):
return a*b/gcd(a,b);
ans=1;
for i in range(1,20):
ans=lcm(ans,i);
print ans;
|
a,b,c,d=map(int, input().split())
if b<=c or d<=a:
ans = 0
elif a<=c:
if b<=d:
ans = b-c
elif d<=b:
ans = d-c
elif c<=a:
if d<=b:
ans = d-a
elif b<=d:
ans = b-a
elif a==c and b==d:
ans = b-a
print(ans)
#############
#min(b,d)-max(a,c)と考えると簡潔に書ける
a, b, c, d = map(int, input().split())
print(max(0, (min(b, d) - max(a,c))))
|
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.dbsparta
# 몽고 DB에서 특정 데이터 보기
all_movies = list(db.movies.find())
#print(all_movies)
same_stars = list(db.users.find({'star':'9.60'}))
print(same_stars)
|
import os
import speak
import datetime as dt
import commands as cmd
from colorama import Fore, Style
# Time
def tell_time():
time = dt.datetime.now().strftime("%H:%M:%S")
print(Fore.GREEN + dt.datetime.now().strftime("%H:%M") + Style.RESET_ALL)
speak.speak_only(time)
# Date
def tell_date():
date = dt.datetime.now().date()
speak.speak(date)
# Greeting
def greeting(name, real_name):
hour = dt.datetime.now().hour
if hour < 4:
print(Fore.GREEN + f'Hello {real_name}' + Style.RESET_ALL)
speak.speak_only(f'Hello {name}')
elif hour < 12:
print(Fore.GREEN + f'Good Morning {real_name}' + Style.RESET_ALL)
speak.speak_only(f'Good Morning {name}')
elif hour < 16:
print(Fore.GREEN + f'Good Afternoon {real_name}' + Style.RESET_ALL)
speak.speak_only(f'Good Afternoon {name}')
else:
print(Fore.GREEN + f'Good Evening {real_name}' + Style.RESET_ALL)
speak.speak_only(f'Good Evening {name}')
speak.speak('How can I help you?')
# Go offline
def go_offline(name, real_name):
print(Fore.GREEN + f'Good bye {real_name}, Have a nice day!' + Style.RESET_ALL)
speak.speak_only(f'Good bye {name}, Have a nice day!')
quit()
# Shutdown Computer
def shutdown():
speak.speak('Do you want to shutdown your computer?')
answer = cmd.takeCommand().lower()
if 'yes' in answer or 'sure' in answer:
speak.speak('Shutting down computer!')
os.system("shutdown /s /t 1")
# Restart Computer
def restart():
speak.speak('Do you want to restart your computer?')
answer = cmd.takeCommand().lower()
if 'yes' in answer or 'sure' in answer:
speak.speak('Restarting computer!')
os.system("shutdown /r /t 1")
def logout():
os.system("shutdown -1") |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Enterprise Management Solution
# GRP Estado Uruguay
# Copyright (C) 2017 Quanam (ATEL SA., Uruguay)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'GRP - Contratos de Proveedores',
'version': '1.0',
'author': 'Quanam',
'website': 'www.quanam.com',
'category': 'Accounting & Finance',
'images': [],
'depends': [
'base',
'account',
'mail',
'grp_seguridad',
'grp_tesoreria',
'contracts_pro',
'report_xls',
'grp_activo_fijo'
],
'description': """
Mantenimiento de contratos de proveedores
""",
'demo': [],
'test': [],
'data': [
'security/grp_contrato_proveedores_security.xml',
'wizard/grp_crear_adenda_wizard.xml', # TODO C SPRING 12 GAP_75_76
'wizard/grp_crear_prorroga_wizard.xml', # TODO: K SPRING 12 GAP 70, 71, 73, 74
'wizard/grp_crear_renovacion_wizard.xml', # TODO: K SPRING 12 GAP 70, 71, 73, 74
'views/grp_contrato_proveedores_view.xml',
'wizard/grp_invoice_contract_cession_wizard_view.xml', # TODO: M SPRING 12 GAP 77
'views/grp_account_invoice_view.xml',# TODO: L SPRING 12 GAP 499
'views/grp_tipo_contrato_view.xml', # TODO: K SPRING 12 GAP 67
'views/grp_cotizaciones_view.xml', # TODO: K SPRING 12 GAP 67
'views/grp_afectacion_view.xml', # TODO: K SPRING 12 GAP 205
'views/grp_compromiso_view.xml', # TODO: K SPRING 12 GAP 205
'views/grp_valores_custodia_view.xml', # TODO C SPRING 12 GAP_315
'views/grp_historial_contratos_view.xml', # TODO L SPRING 12 GAP 84
'views/grp_contrato_historial_parametrica_report_view.xml', # TODO C SPRING 12 GAP_360
'wizard/grp_crear_contratos_wizard.xml', # TODO: K SPRING 12 GAP 67
'wizard/grp_motivo_desvios_montos_wizard.xml', # TODO: K SPRING 12 GAP 205
'wizard/grp_resumen_ejecucion_contrato_wizard.xml', # TODO: K SPRING 13 GAP 452
'wizard/grp_ejecucion_futura_contrato_wizard.xml', # TODO: K SPRING 13 GAP 452
'data/sequence_data.xml',
'data/grp_contrato_proveedores_alertas_data.xml', # TODO: L SPRING 12 GAP 85
'security/ir.model.access.csv',
'report/contract_account_cession_report.xml',# TODO: M SPRING 12 GAP 79
'report/grp_resumen_ejecucion_contrato.xml',# TODO: K SPRING 13 GAP 452
'report/grp_estimado_ejecutar_contrato_view.xml',# TODO: K SPRING 13 GAP 452
'report/grp_ejecucion_futura_contrato.xml',# TODO: K SPRING 13 GAP 452
'report/grp_registro_ejecucion_futura_contrato_view.xml',# TODO: K SPRING 13 GAP 452
'views/account_asset_asset_view.xml',
'views/invite_view.xml',
],
'installable': True,
'auto_install': False,
}
|
import numpy as np
from .grad1D import grad1D
from scipy import sparse
from scipy.sparse import csr_matrix
def grad2D(k, m, dx, n, dy):
"""Computes a two-dimensional mimetic gradient operator
Arguments:
k (int): Order of accuracy
m (int): Number of cells along x-axis
dx (float): Step size along x-axis
n (int): Number of cells along y-axis
dy (float): Step size along y-axis
Returns:
:obj:`ndarray` containing discrete gradient operator
"""
Gx = grad1D(k, m, dx)
Gy = grad1D(k, n, dy)
Im = csr_matrix((m + 2, m), dtype=np.float)
In = csr_matrix((n + 2, n), dtype=np.float)
Im[1:m+1, :] = sparse.eye(m, m, dtype=np.float, format='csr')
In[1:n+1, :] = sparse.eye(n, n, dtype=np.float, format='csr')
Sx = sparse.kron(In.T, Gx, format='csr')
Sy = sparse.kron(Gy, Im.T, format='csr')
return sparse.vstack([Sx, Sy], format='csr')
if __name__ == '__main__':
print(grad2D(2, 5, 1, 6, 1))
|
year = int(input("请输入一个年份"))
if(year%4 == 0 and year%100 != 0):
print("%d年是闰年"%year)
elif year%400 == 0:
print("%d年是闰年"%year)
else:
print("%d年是平年"%year)
|
from django.shortcuts import render, redirect
from .forms import UserRegistrationForm, ProfileForm
from django.contrib.auth import login, authenticate
from .models import Profile
from store.utils import cartData
def create(request):
form = UserRegistrationForm()
if request.method == "POST":
form = UserRegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.data.get('username')
password = form.data.get('password1')
first_name = form.data.get('first_name')
last_name = form.data.get('last_name')
user = authenticate(request, username=username, password=password, first_name=first_name, last_name=last_name)
if user is not None:
login(request, user)
return redirect('Store')
return render(request, 'users/create.html', {'form': form})
def profile_page(request):
profile = Profile.objects.get(user=request.user.id)
data = cartData(request)
cartItems = data['cartItems']
context = {'cartItems': cartItems, 'profile': profile}
return render(request, "users/profile.html", context)
def profile_update(request):
profile = Profile.objects.get(user=request.user.id)
form = ProfileForm(instance=profile)
if request.method == "POST":
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
if request.FILES.get('image', None) != None:
print(request.FILES)
profile.image = request.FILES['image']
profile.save()
return redirect('ProfilePage')
data = cartData(request)
cartItems = data['cartItems']
return render(request, "users/profile_update.html", {'cartItems': cartItems, 'form': form}) |
def open_calculator(x: int, y: int, z: int, N: int) -> int:
count = 0
for digit in set(str(N)):
if int(digit) not in {x, y, z}:
count += 1
return count
x, y, z = map(int, input().split())
N = int(input())
print(open_calculator(x, y, z, N))
|
import math
import wave
import struct
import uuid
from apps.texthandler.models import TextBlock, Audio
FRAME_SIZE = 100000
def split_file(audio_uuid, file_dir):
audio = Audio.objects.filter(uuid=uuid.UUID(audio_uuid))[0]
filename = audio.filename
filename = filename.split(".")[0]
ifile = wave.open(file_dir + filename + ".wav")
sampwidth = ifile.getsampwidth()
fmts = (None, "=B", "=h", None, "=l")
fmt = fmts[sampwidth]
dcs = (None, 128, 0, None, 0)
dc = dcs[sampwidth]
j = 1
first_file = "{}{}_{}.wav".format(file_dir,filename, j)
ofile = wave.open(first_file, "w")
text_block = TextBlock(sequence_number=j, audio=audio, filename=first_file.split('/')[-1])
text_block.save()
ofile.setparams(ifile.getparams())
ofile.setframerate(16000)
ofile.setsampwidth(2)
ofile.setnchannels(1)
for i in range(ifile.getnframes()):
if math.floor(i / j) == FRAME_SIZE:
j += 1
ofile.close()
new_filename = "{}{}_{}.wav".format(file_dir,filename, j)
text_block = TextBlock(sequence_number=j, audio=audio, filename=new_filename.split('/')[-1])
text_block.save()
ofile = wave.open(new_filename, "w")
ofile.setparams(ifile.getparams())
iframe = ifile.readframes(1)
iframe = struct.unpack(fmt, iframe)[0]
iframe -= dc
oframe = iframe / 2
oframe += dc
oframe = struct.pack(fmt, math.ceil(oframe))
ofile.writeframes(oframe)
ofile.writeframes(oframe)
ifile.close()
ofile.close()
|
# This file contains the Politician, Button, and Point classes
# A Choice has a name, Twitter username, party, boolean value chosen,
# and position coords
class Choice(object):
def __init__(self, name, username, party, x0, y0, x1, y1):
self.name = name
self.party = party # "red" or "blue"
self.username = username # Twitter username
self.chosen = False
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
def clicked(self):
self.chosen = not self.chosen
# An Arrow has coordinates and corresponding image
class Arrow(object):
def __init__(self, x0, y0, x1, y1, image):
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.image = image
# A Politician has a name, Twitter username, and party, and count of tweets
class Politician(object):
def __init__(self, name, username, party):
self.name = name
self.party = party # "red" or "blue"
self.username = username # Twitter username
# self.pfp = None
self.count = 0
def setCount(self, count):
self.count = count
# A Button has a center, radius, and Politician object
class Button(object):
def __init__(self, x, y, r, politician):
self.x = x
self.y = y
self.r = r
self.politician = politician
# A Point has a center, radius, and list of tweets that match the query
class Point(object):
def __init__(self, tweets):
self.tweets = tweets
self.x = 0
self.y = 0
self.r = 0
self.xLabel = 0
def setAttributes(self, x, y, r, xLabel):
self.x = x
self.y = y
self.r = r
self.xLabel = xLabel
# A TweetBox has coords, tweet, display (formatted tweet), and a header
class TweetBox(object):
def __init__(self, x0, y0, x1, y1, tweet, display, header):
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.tweet = tweet
self.display = display
self.header = header
def position(self, x0, y0, x1, y1):
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
def setWidth(self, x0, x1):
self.x0 = x0
self.x1 = x1 |
# To avoid trivial solutions, try to solve this problem without the
# function int(s, base=16)
import unittest
from hexadecimal import hexa
class HexadecimalTest(unittest.TestCase):
def test_valid_hexa1(self):
self.assertEqual(hexa('1'), 1)
def test_valid_hexa2(self):
self.assertEqual(hexa('c'), 12)
def test_valid_hexa3(self):
self.assertEqual(hexa('10'), 16)
def test_valid_hexa4(self):
self.assertEqual(hexa('af'), 175)
def test_valid_hexa5(self):
self.assertEqual(hexa('100'), 256)
def test_valid_hexa6(self):
self.assertEqual(hexa('19ACE'), 105166)
def test_valid_hexa7(self):
self.assertEqual(hexa('000000'), 0)
def test_valid_hexa8(self):
self.assertEqual(hexa('ffff00'), 16776960)
def test_valid_hexa9(self):
self.assertEqual(hexa('00fff0'), 65520)
def test_invalid_hexa(self):
with self.assertRaisesWithMessage(ValueError):
hexa('carrot')
# Utility functions
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
|
"""Error DTOs"""
import sys
from typing import Union
if sys.version_info < (3, 11): # pragma: no cover
from typing_extensions import TypedDict
else: # pragma: no cover
from typing import TypedDict
class MetisErrorErrorDTO(TypedDict):
"Error's error payload DTO"
message: str
class MetisErrorDTO(TypedDict):
"Error DTO"
status: int
error: Union[MetisErrorErrorDTO, str]
|
# 효율성 생각 안하고 생각난대로 바로 푼 버전
# n 최대값이 작기 때문에 시간, 공간 복잡도 생각보다 적음
# 값, 기존 인덱스, 정렬 후 인덱스 모두 저장해서 상황에 맞는 요소 선택해서 정렬
n = int(input())
arr = list(map(int, input().split()))
for i in range(n):
arr[i] = [arr[i], i]
arr.sort()
for i in range(n):
arr[i].append(i)
arr = sorted(arr, key=lambda x: x[1])
for i in range(n):
print(arr[i][2], end=' ') |
#!/usr/bin/python3
def class_to_json(obj):
""" returs dictionary description with simple data structure list
for JSON serializaton of and object
obj: is an instance of a Class
"""
return obj.__dict__
|
from typing import List
# complexity is O(k * 2 ^ n')
def backtrack(nums: List[int], partialsol: List[int], target: int, currsum: int):
print(nums, partialsol, target, currsum)
if currsum == target:
yield tuple(sorted(partialsol))
else:
for num in nums:
if currsum + num <= target:
# make move
partialsol.append(num)
yield from backtrack(nums, partialsol, target, currsum+num)
# unmake move
partialsol.pop()
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
sol = set()
for values in backtrack(candidates, [], target, 0):
sol.add(values)
return [list(x) for x in sol]
s = Solution()
candidates = [2,3,6,7]
target = 7
print(s.combinationSum(candidates, target))
|
#!/usr/bin/env python3
import re
double_letter_matcher = re.compile(r"(.)\1")
def is_nice(name):
if len([c for c in name if c in 'aeiou']) < 3:
return False
if 'ab' in name or 'cd' in name or 'pq' in name or 'xy' in name:
return False
return double_letter_matcher.search(name)
def test_is_nice():
assert is_nice('ugknbfddgicrmopn')
assert is_nice('aaa')
assert not is_nice('jchzalrnumimnmhp')
assert not is_nice('haegwjzuvuyypxyu')
assert not is_nice('dvszwmarrgswjxmb')
if __name__ == "__main__":
with open("input.txt", "r") as f:
nice = [l for l in f if is_nice(l)]
print(len(nice))
|
rules = []
my_ticket = []
nearby_tickets = []
data_type = 'rules'
for i in [i[:-1] for i in open('data.txt')]:
if i == '': continue
if i == 'your ticket:':
data_type = 'your ticket'
continue
if i == 'nearby tickets:':
data_type = 'nearby tickets'
continue
if data_type == 'rules':
rules.append({
'name': i.split(':')[0],
'ranges': [
{
'low': int(i.split(':')[1].split(' or ')[0].split('-')[0]),
'high': int(i.split(':')[1].split(' or ')[0].split('-')[1])
},
{
'low': int(i.split(':')[1].split(' or ')[1].split('-')[0]),
'high': int(i.split(':')[1].split(' or ')[1].split('-')[1])
}
]
})
if data_type == 'your ticket':
my_ticket = [int(j) for j in i.split(',')]
if data_type == 'nearby tickets':
nearby_tickets.append([int(j) for j in i.split(',')])
def invalid_values(values):
invalids = []
for i in values:
valid = False
for r in rules:
for rule in r['ranges']:
if i >= rule['low'] and i <= rule['high']:
valid = True
break
if valid:
break
if not valid:
invalids.append(i)
return invalids
invalids = []
for i in nearby_tickets:
invalids.extend(invalid_values(i))
print(sum(invalids)) |
from __future__ import print_function
import base64
import json
import logging
import cv2
import face_recognition
import grpc
import numpy as np
import cctv_stream_pb2
import cctv_stream_pb2_grpc
from core.face_recognition_lib import face_identification
def face_recognition_v1(stub):
response = stub.SendFrame(cctv_stream_pb2.Request())
while True:
data = base64.b64decode(response.data)
frame = np.frombuffer(data, dtype=np.uint8).reshape(response.width, response.high, response.ch)
face_identification(frame, known_face_encodings, known_face_names)
def face_recognition_stream(stub):
print("face_recognition_stream started")
responses = stub.SendFrameStream(cctv_stream_pb2.Request())
for response in responses:
data = base64.b64decode(response.data)
frame = np.frombuffer(data, dtype=np.uint8).reshape(response.width, response.high, response.ch)
face_identification(frame, known_face_encodings, known_face_names)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def run():
with grpc.insecure_channel('localhost:52021') as channel:
stub = cctv_stream_pb2_grpc.CCTVStreamStub(channel)
# face_recognition_v1(stub)
face_recognition_stream(stub)
if __name__ == '__main__':
logging.basicConfig()
obama_image = face_recognition.load_image_file("obama.png")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
biden_image = face_recognition.load_image_file("biden.jpeg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
jihar_image = face_recognition.load_image_file("jihar.jpg")
jihar_face_encoding = face_recognition.face_encodings(jihar_image)[0]
known_face_encodings = [
obama_face_encoding,
biden_face_encoding,
jihar_face_encoding,
]
known_face_names = [
"Barack Obama",
"Joe Biden",
"Jihar",
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
run()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.ftp.hooks.ftp import FTPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToFTPOperator(BaseOperator):
"""
This operator enables the transferring of files from S3 to a FTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToFTPOperator`
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket from
where the file is downloaded.
:param s3_key: The targeted s3 key. This is the specified file path for
downloading the file from S3.
:param ftp_path: The ftp remote path. This is the specified file path for
uploading file to the FTP server.
:param aws_conn_id: reference to a specific AWS connection
:param ftp_conn_id: The ftp connection id. The name or identifier for
establishing a connection to the FTP server.
"""
template_fields: Sequence[str] = ("s3_bucket", "s3_key", "ftp_path")
def __init__(
self,
*,
s3_bucket,
s3_key,
ftp_path,
aws_conn_id="aws_default",
ftp_conn_id="ftp_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.ftp_path = ftp_path
self.aws_conn_id = aws_conn_id
self.ftp_conn_id = ftp_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(self.aws_conn_id)
ftp_hook = FTPHook(ftp_conn_id=self.ftp_conn_id)
s3_obj = s3_hook.get_key(self.s3_key, self.s3_bucket)
with NamedTemporaryFile() as local_tmp_file:
self.log.info("Downloading file from %s", self.s3_key)
s3_obj.download_fileobj(local_tmp_file)
local_tmp_file.seek(0)
ftp_hook.store_file(self.ftp_path, local_tmp_file.name)
self.log.info("File stored in %s", {self.ftp_path})
|
import tftables
import tensorflow as tf
with tf.device('/cpu:0'):
# This function preprocesses the batches before they
# are loaded into the internal queue.
# You can cast data, or do one-hot transforms.
# If the dataset is a table, this function is required.
def input_transform(tbl_batch):
labels = tbl_batch['label']
data = tbl_batch['data']
truth = tf.to_float(tf.one_hot(labels, num_labels, 1, 0))
data_float = tf.to_float(data)
return truth, data_float
# Open the HDF5 file and create a loader for a dataset.
# The batch_size defines the length (in the outer dimension)
# of the elements (batches) returned by the reader.
# Takes a function as input that pre-processes the data.
loader = tftables.load_dataset(filename='bottleneck_fc_model.h5',
dataset_path='/data',
input_transform=input_transform,
batch_size=16)
# To get the data, we dequeue it from the loader.
# Tensorflow tensors are returned in the same order as input_transformation
truth_batch, data_batch = loader.dequeue()
# The placeholder can then be used in your network
result = my_network(truth_batch, data_batch)
with tf.Session() as sess:
# This context manager starts and stops the internal threads and
# processes used to read the data from disk and store it in the queue.
with loader.begin(sess):
for _ in range(num_iterations):
sess.run(result) |
for item in ['Mosh', 'john', 'sarah', 'michel', 1, 2, 5.6, ]:
print(item)
# compteur
for i in range(0, 10, 2):
print(i)
# decompteur
for i in range(50, 5, -5):
print(i)
# dessin
numbers = [5, 2, 5, 2, 2]
for i in numbers:
print("x" * i)
# list
prices = [15, 20, 50] # prix des produits
total = 0
for i in prices: # avec i represente tous les elements de prices (variable compteur)
prix = i
print(f"prix = {prix} euros") # affiche tous les elements de prices
total += i
print(f"Total des prix = {total} euros")
for x in range(3):
for y in range(2):
print(f"({x}, {y})")
|
#!/usr/bin/env python
import sys
from EPPs.common import StepEPP
# the freezer location of the sample entering the step should be updated to match the step UDFs. The script checks
# if the sample is a submitted sample or aderived sample and updates the corresponding UDFs
class UpdateFreezerLocation(StepEPP):
def _run(self):
self.samples
# process each artifact in the step
for artifact in self.artifacts:
# if the sample is a submitted sample then the artifact in the step will match the artifact obtained if
# we obtain the submitted sample and then its equivalent artifact
if artifact.id == artifact.samples[0].artifact.id:
artifact.samples[0].udf['Freezer'] = self.process.udf.get('New Freezer Location')
artifact.samples[0].udf['Shelf'] = self.process.udf.get('New Freezer Location')
artifact.samples[0].put()
# if the sample is a derived sample then the artifact in the step will not match the artifact obtained if
# we obtain the submitted sample and then its equivalent artifact
elif artifact.id != artifact.samples[0].artifact.id:
artifact.udf['Freezer'] = self.process.udf.get('New Freezer Location')
artifact.udf['Shelf'] = self.process.udf.get('New Freezer Location')
artifact.put()
if __name__ == '__main__':
sys.exit(UpdateFreezerLocation().run())
|
from pprint import pprint
import logging
from flask import Flask, render_template
from flask_ask import Ask, question, statement, session
import yaml
import random
import boto3
import uuid
from datetime import datetime
logger = logging.getLogger("flask_ask")
logger.setLevel(logging.DEBUG)
app = Flask(__name__)
ask = Ask(app, "/")
#####################################################################
# Session helper functions
#####################################################################
class AlexaSession():
@staticmethod
def set_handler(handler):
session.attributes['handler'] = handler
@staticmethod
def get_handler():
if 'handler' in session.attributes:
return session.attributes['handler']
else:
return None
@staticmethod
def user_id():
return session.user.userId
#####################################################################
# Classes
#####################################################################
class ThreatModelGame:
def __init__(self):
self.user_data = {}
def load_table(self):
table_name = 'threat_model_games'
logger.debug("loading table {0}".format(table_name))
client = boto3.client('dynamodb')
resource = boto3.resource('dynamodb')
table_exists = False
try:
tabledescription = client.describe_table(TableName=table_name)
table_exists = True
except Exception as e:
if "Requested resource not found: Table" in str(e):
self.table = resource.create_table(
TableName = table_name,
KeySchema = [
{'AttributeName': 'user_id', 'KeyType': 'HASH'}
],
AttributeDefinitions = [
{'AttributeName': 'user_id', 'AttributeType': 'S'}
],
ProvisionedThroughput = {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
self.table.meta.client.get_waiter('table_exists').wait(TableName=table_name)
table_exists = True
else:
raise
self.table = resource.Table(table_name)
def load_data(self):
logger.info("loading data")
try:
response = self.table.get_item(Key={'user_id': AlexaSession.user_id()})
except ClientError as e:
logger.error(e.response['Error']['Message'])
else:
if 'Item' in response:
self.user_data = response['Item']
logger.debug("loaded existig data for user_id {0}".format(AlexaSession.user_id()))
else:
game_id = self.new_game_id()
game_name = "Quick Start"
self.user_data['user_id'] = AlexaSession.user_id()
self.user_data['current_game_id'] = game_id
self.user_data['games'] = {
game_id: {
'name': game_name,
'seed': self.new_seed(),
'index': 0,
'created': datetime.now().isoformat(),
'updated': datetime.now().isoformat()
}
}
self.table.put_item(Item=self.user_data)
logger.debug("created new data for user_id".format(AlexaSession.user_id()))
def load(self):
self.load_table()
self.load_data()
def save(self):
self.table.put_item(Item=self.user_data)
def game_id(self):
return self.user_data['current_game_id']
def current_game(self):
return self.user_data['games'][self.game_id()]
def new_game_id(self):
return str(uuid.uuid4())
def new_seed(self):
return random.randint(0,2**32-1)
def seed(self):
return self.current_game()['seed']
def reset_seed(self):
game_id = self.game_id()
self.user_data['games'][self.game_id()]['seed'] = self.new_seed()
self.save()
def name(self):
return self.current_game()['name']
def index(self):
return int(self.current_game()['index'])
def reset_index(self):
self.user_data['games'][self.game_id()]['index'] = 0
self.save()
def next_index(self):
self.user_data['games'][self.game_id()]['index'] += 1
self.save()
return self.index()
def previous_index(self):
self.user_data['games'][self.game_id()]['index'] -= 1
self.save()
return self.index()
class ThreatModelCardDeck:
def load_cards(self):
self.cards = []
logger.debug("reading cards.yaml")
with open("cards.yaml") as fh:
card_data = yaml.load(fh)
for suit in card_data["suit_order"]:
for rank in card_data["rank_order"]:
if rank in card_data["suits"][suit]:
self.cards.append({
"rank": rank,
"rank_word": card_data["ranks"][rank],
"description": card_data["suits"][suit][rank],
"suit": suit
})
def load(self, game):
self.game = game
self.load_cards()
self.restore()
def shuffle(self, seed):
self.deck = list(self.cards)
random.Random(seed).shuffle(self.deck)
def restore(self):
seed = self.game.seed()
logger.debug("restoring deck with seed {0}".format(seed))
self.shuffle(seed)
def card_at_index(self, index):
return self.deck[index]
def card(self):
index = self.game.index()
logger.debug("return card at index {0}".format(index))
return self.deck[index]
def next_card(self):
if self.game.index() < len(self.cards)-1:
self.game.next_index()
return self.card()
def previous_card(self):
if self.game.index() > 0:
self.game.previous_index()
return self.card()
game = ThreatModelGame()
deck = ThreatModelCardDeck()
#####################################################################
# Intent functions
#####################################################################
@ask.intent("AMAZON.YesIntent")
def alexa_yes():
handler = AlexaSession.get_handler()
if handler == 'help_info':
return alexa_how_to_play()
elif handler == 'how_to_play_question':
return alexa_how_to_play()
elif handler == 'how_to_play_info':
return alexa_threat_modelling()
elif handler == 'threat_modelling_question':
return alexa_threat_modelling()
elif handler == 'threat_modelling_info':
return alexa_about_game()
elif handler == 'about_game_question':
return alexa_about_game()
else:
return statement(render_template('nohandler'))
@ask.intent("AMAZON.NoIntent")
def alexa_no():
handler = AlexaSession.get_handler()
if handler == 'help_info':
return threat_modelling_question()
elif handler == 'how_to_play_question':
return threat_modelling_question()
elif handler == 'how_to_play_info':
return about_game_question()
elif handler == 'threat_modelling_question':
return about_game_question()
elif handler == 'threat_modelling_info':
return statement(render_template('end_of_help'))
elif handler == 'about_game_question':
return statement(render_template('end_of_help'))
else:
return statement(render_template('nohandler'))
@ask.intent("AMAZON.HelpIntent")
def alexa_help():
AlexaSession.set_handler('help_info')
return question(render_template('help_info'))
@ask.intent("HowToPlayIntent")
def alexa_how_to_play():
AlexaSession.set_handler('how_to_play_info')
return question(render_template('how_to_play_info'))
@ask.intent("ThreatModellingIntent")
def alexa_threat_modelling():
AlexaSession.set_handler('threat_modelling_info')
return question(render_template('threat_modelling_info'))
def threat_modelling_question():
AlexaSession.set_handler('threat_modelling_question')
return question(render_template('threat_modelling_question'))
@ask.intent('AboutGameIntent')
def alexa_about_game():
AlexaSession.set_handler('about_game_info')
return statement(render_template('about_game_info'))
def about_game_question():
AlexaSession.set_handler('about_game_question')
return question(render_template('about_game_question'))
@ask.intent("RandomCardIntent")
def alexa_random_card():
AlexaSession.set_handler('random_card')
global game
global deck
deck.load_cards()
deck.shuffle(game.new_seed())
msg = render_template('random_card', card=deck.card_at_index(0))
return statement(msg)
@ask.launch
def alexa_launch():
AlexaSession.set_handler('launch')
global game
global deck
game.load()
deck.load(game)
msg = render_template('welcome', name=game.name(), prefix='first', card=deck.card())
return statement(msg)
@ask.intent("CurrentCardIntent")
def alexa_current_card():
AlexaSession.set_handler('current_card')
global game
global deck
game.load()
deck.load(game)
msg = render_template('card', prefix='current', card=deck.card())
return statement(msg)
@ask.intent("AMAZON.NextIntent")
@ask.intent("NextCardIntent")
def alexa_next_card():
AlexaSession.set_handler('next_card')
global game
global deck
game.load()
deck.load(game)
current_card = deck.card()
new_card = deck.next_card()
if new_card == current_card:
msg = render_template('no_cards', prefix='last', card=current_card)
else:
msg = render_template('next_card', prefix='new', card=new_card)
return statement(msg)
@ask.intent("AMAZON.PreviousIntent")
@ask.intent("PreviousCardIntent")
def alexa_previous_card():
AlexaSession.set_handler('previous_card')
global game
global deck
game.load()
deck.load(game)
current_card = deck.card()
new_card = deck.previous_card()
if new_card == current_card:
msg = render_template('first_card', prefix='first', card=current_card)
else:
msg = render_template('previous_card', prefix='new', card=new_card)
return statement(msg)
@ask.intent("RestartGameIntent")
def alexa_restart_game():
AlexaSession.set_handler('restart_game')
global game
global deck
game.load()
deck.load(game)
game.reset_index()
game.reset_seed()
deck.restore()
msg = render_template('restart_game', name=game.name(), prefix='first', card=deck.card())
return statement(msg)
#####################################################################
# Main
#####################################################################
if __name__ == '__main__':
app.run(debug=True)
|
import random
def test(t):
if t == 1:
b = '9am,12am,5pm,12pm'
else:
b = 'fuckpm,all,any,open,str'
return b
print(test(2))
|
"""
import baseclasses for pytraj
"""
from __future__ import absolute_import
from .datasets.cast_dataset import cast_dataset
from .frame import Frame
from .core.topology_objects import Atom, Residue, Molecule
from .datafiles.datafiles import DataFileList
from .c_action.actionlist import ActionList
from .core.c_core import CpptrajState
from .datasets.c_datasetlist import DatasetList
from .core.c_core import AtomMask
from .trajectory import Trajectory
from .topology import Topology
from .core.c_core import ArgList
from .trajectory_iterator import TrajectoryIterator
from .c_traj.c_trajout import TrajectoryWriter
from . import c_dict
__all__ = ['Atom', 'Residue', 'Molecule', 'Topology', 'Frame', 'Trajectory',
'TrajectoryIterator', 'AtomMask', 'ArgList', 'CpptrajState',
'DatasetList', 'DataFileList', 'ActionList', 'TrajectoryWriter',
'cast_dataset', 'c_dict']
|
import sys
import requests
import json
import threading
import time
from dcusb.driver import LEDMessageBoard
leds = LEDMessageBoard()
clock_on = True
note = {
1 : [1, 0, 0, 0, 0, 1, 1],
2 : [1, 0, 0, 0, 0, 1, 1],
3 : [1, 0, 1, 1, 0, 1, 1],
4 : [1, 0, 1, 1, 0, 1, 1],
5 : [1, 0, 1, 0, 0, 1, 1],
6 : [0, 0, 1, 0, 0, 1, 1],
7 : [0, 0, 1, 1, 1, 1, 1],
}
def clock():
global clock_on
i = 0
while(True):
try:
if clock_on == True:
format_string = "%H:%M"
if i > 8:
format_string = "%H %M"
if i == 10:
i = 0
localtime = time.localtime()
time_string = time.strftime(format_string, localtime)
if time_string == '13:00':
leds.clear_screen()
leds.scroll_message('!!! hometime !!!')
else:
leds.write_string(time_string, 0)
leds.push_screen()
time.sleep(0.2)
i += 2
else:
time.sleep(1)
except KeyboardInterrupt:
sys.exit(1)
def lastfm():
global clock_on
# proxy_config = {'http': 'http://proxy.bauer-uk.bauermedia.group:3128'}
# proxy = urllib2.ProxyHandler(proxy_config)
# opener = urllib2.build_opener(proxy)
# urllib2.install_opener(opener)
api_counter = 0
now_playing_mbid = None
track_api_url = "http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=5805551071db81a56b74eecbced7318c&username=jackrabbitslim&format=json&mbid="
while(True):
try:
if api_counter % 150 == 0:
api_url = "http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&user=jackrabbitslim&api_key=5805551071db81a56b74eecbced7318c&format=json"
try:
lastfm = requests.get(api_url)
tracks = lastfm.json
now_playing = tracks['recenttracks']['track'][0]
if '@attr' in now_playing:
playing_now = now_playing['@attr']['nowplaying']
else:
playing_now = False
song_changed = (now_playing_mbid != now_playing['mbid'])
if song_changed and api_counter != 0:
clock_on = False
now_playing_mbid = now_playing['mbid']
song = now_playing['artist']['#text'] + ' - ' + now_playing['name']
# song = song.lower()
now_playing_url = track_api_url + now_playing_mbid
lastfm = requests.get(now_playing_url)
track_info = lastfm.json
plays = track_info['track']['userplaycount']
song += ' [plays: ' + plays + ']'
print song
leds.write_char(None, 0, glyph=note)
leds.write_char(None, 7, glyph=note)
leds.write_char(None, 14, glyph=note)
leds.push_screen()
leds.scroll_message(song, 28)
clock_on = True
except Exception as e:
print e
clock_on = True
pass
time.sleep(0.2)
api_counter += 2
except KeyboardInterrupt:
sys.exit(1)
print "Running clock code now..."
t_clock = threading.Thread(target=clock)
t_clock.daemon = True
t_clock.start()
t_lastfm = threading.Thread(target=lastfm)
t_lastfm.daemon = True
t_lastfm.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
sys.exit(1)
# from Pubnub import Pubnub
# pubnub = Pubnub.Pubnub(
# '',
# "sub-eff4f180-d0c2-11e1-bee3-1b5222fb6268", ## SUBSCRIBE_KEY
# None, ## SECRET_KEY
# False ## SSL_ON?
# )
# pubnub.set_proxy('http://proxy.bauer-uk.bauermedia.group', '3128')
# def receive(message):
# global clock_on
# print message
# if message['artist'] != '' and message['title'] != '':
# clock_on = False
# song = message['artist'] + ' - ' + message['title']
# print song
# song = song.lower()
# leds.clear_screen()
# leds.write_char(None, 0, f=note)
# leds.write_char(None, 7, f=note)
# leds.write_char(None, 14, f=note)
# leds.push_screen()
# time.sleep(1)
# leds.push_screen()
# time.sleep(1)
# leds.scroll_message(song)
# clock_on = True
# return True
# print "Waiting for songs..."
# pubnub.subscribe({
# 'channel' : 'np_99',
# 'callback' : receive
# })
# leds.scroll_message('abcdefghijklmnopqrstuvwxyz?!-. ')
|
from django.db import models
DESIGNATIONS = (
('SE','Sales Executive'),
('MGR','Manager')
)
class Employee(models.Model):
name = models.CharField('Employee Name',max_length=64)
employee_ID = models.CharField(max_length=16)
dob = models.DateTimeField('Date Of Birth')
doj = models.DateTimeField('Date Of Joining')
designation = models.CharField(max_length=3, choices=DESIGNATIONS)
basic_salary = models.IntegerField(default=0)
dummy_employee = Employee()
class Item(models.Model):
item_name = models.CharField('Item Name',max_length=64)
item_ID = models.CharField(max_length=16)
sale_price = models.FloatField('Item Sale Price',default=0.0)
item_cost = models.FloatField('Item Cost',default=0.0)
manufacturer = models.CharField('Manufacturing Company name',max_length=64)
mfd_date = models.DateTimeField('Manufacturing Date')
exp_date = models.DateTimeField('Expiry Date')
points = models.FloatField(default=0.0)
class Branch(models.Model):
branch_ID = models.CharField(max_length=16)
branch_manager = models.ForeignKey(Employee, on_delete=models.SET_NULL, null=True)
location = models.CharField(max_length=32)
class Inventory(models.Model):
branch = models.ForeignKey(Branch, on_delete=models.CASCADE)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
qty = models.IntegerField('Quantity Left')
class Transaction(models.Model):
transaction_ID = models.CharField(max_length=16)
sales_executive = models.ForeignKey(Employee, on_delete=models.SET_NULL, null=True)
branch = models.ForeignKey(Branch, on_delete=models.CASCADE)
date = models.DateField('Transaction Date')
class Cart(models.Model):
transaction = models.ForeignKey(Transaction, on_delete=models.CASCADE)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
qty = models.IntegerField('Quantity Sold')
|
import sys
import cv2
import numpy as np
def main():
if len(sys.argv) < 7:
print(f'Error: Expect more arguments.\n'
f'Usage: python {__file__} -s source.jpg -t target.jpg -o output.jpg\n'
f'if output filename is not provided, \'output.jpg\' is default.')
exit()
outfilename = ''
for i in range(len(sys.argv)):
if sys.argv[i] == '-s':
sourcefilename = sys.argv[i + 1]
if sys.argv[i] == '-t':
targetfilename = sys.argv[i + 1]
if sys.argv[i] == '-o':
outfilename = sys.argv[i + 1]
if outfilename == '':
outfilename = 'output.jpg'
sourcefile = cv2.imread(sourcefilename)
targetfile = cv2.imread(targetfilename)
outputfile = color_transfer(sourcefile, targetfile)
cv2.imwrite(outfilename, outputfile)
def color_transfer(source, target, sideinfodeci='sideinfodeci.txt'):
'''
source, target: both are np.ndarray
'''
source_b, source_g, source_r = cv2.split(source)
(source_mean_r, source_std_r, source_mean_g, source_std_g, source_mean_b, source_std_b) = _get_img_properties(
source)
(target_mean_r, target_std_r, target_mean_g, target_std_g, target_mean_b, target_std_b) = _get_img_properties(
target)
with open(sideinfodeci, 'w') as f:
f.write('%.4f\n%.4f\n%.4f\n%.4f\n%.4f\n%.4f\n'
'%.4f\n%.4f\n%.4f\n%.4f\n%.4f\n%.4f'
% (source_mean_r, source_mean_g, source_mean_b, source_std_r, source_std_g, source_std_b,
target_mean_r, target_mean_g, target_mean_b, target_std_r, target_std_g, target_std_b))
out_r = source_r.astype(dtype=np.uint8)
out_g = source_g.astype(dtype=np.uint8)
out_b = source_b.astype(dtype=np.uint8)
out_r = (target_std_r / source_std_r) * (out_r - source_mean_r) + target_mean_r
out_g = (target_std_g / source_std_g) * (out_g - source_mean_g) + target_mean_g
out_b = (target_std_b / source_std_b) * (out_b - source_mean_b) + target_mean_b
out = cv2.merge((out_b, out_g, out_r))
return out
def _get_img_properties(img):
'''
helper function for color_transform
'''
b, g, r = cv2.split(img)
mean_r = r.mean()
std_r = r.std()
mean_g = g.mean()
std_g = g.std()
mean_b = b.mean()
std_b = b.std()
return mean_r, std_r, mean_g, std_g, mean_b, std_b
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.13 on 2021-01-24 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('academics', '0012_auto_20210118_1633'),
]
operations = [
migrations.AlterField(
model_name='department',
name='batches',
field=models.ManyToManyField(blank=True, related_name='department_batches', to='academics.Batch'),
),
]
|
from app import db
from datetime import datetime
import mistune
class Post(db.Model):
post_title = db.Column(db.Text)
post_md = db.Column(db.Text)
post_html = db.Column(db.Text)
post_timestamp = db.Column(db.DateTime)
post_id = db.Column(db.Integer, primary_key=True,
unique=True)
def __init__(self, post_id, content, title):
self.post_id = id
self.post_title = post_title
self.post_md = self.get_md(content)
self.post_html = self.get_html(self.md_content)
self.post_timestamp = datetime.utcnow()
def __repr__(self):
return '<Title %r>' % (self.title)
def get_md(self, content):
with open(content, 'r') as f:
md = f.read()
return md
def get_html(self, md_string):
m = mistune.Markdown()
return m(md_string)
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
from typing import Tuple
from pyflink.ml.param import Param, FloatArrayArrayParam
from pyflink.ml.wrapper import JavaWithParams
from pyflink.ml.feature.common import JavaFeatureTransformer
from pyflink.ml.common.param import HasInputCols, HasOutputCols, HasHandleInvalid
class _BucketizerParams(
JavaWithParams,
HasInputCols,
HasOutputCols,
HasHandleInvalid
):
"""
Params for :class:`Bucketizer`.
"""
SPLITS_ARRAY: Param[Tuple[float, ...]] = FloatArrayArrayParam(
"splits_array",
"Array of split points for mapping continuous features into buckets.",
None)
def __init__(self, java_params):
super(_BucketizerParams, self).__init__(java_params)
def set_splits_array(self, value: Tuple[Tuple[float, ...]]):
return typing.cast(_BucketizerParams, self.set(self.SPLITS_ARRAY, value))
def get_split_array(self) -> Tuple[Tuple[float, ...]]:
return self.get(self.SPLITS_ARRAY)
@property
def split_array(self):
return self.get_split_array()
class Bucketizer(JavaFeatureTransformer, _BucketizerParams):
"""
A Transformer that maps multiple columns of continuous features to multiple
columns of discrete features, i.e., buckets indices. The indices are in
[0, numSplitsInThisColumn - 1].
The `keep` option of HasHandleInvalid means that we put the invalid data in the last
bucket of the splits, whose index is the number of the buckets.
"""
def __init__(self, java_model=None):
super(Bucketizer, self).__init__(java_model)
@classmethod
def _java_transformer_package_name(cls) -> str:
return "bucketizer"
@classmethod
def _java_transformer_class_name(cls) -> str:
return "Bucketizer"
|
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier,VotingClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn import preprocessing
from sklearn.metrics import precision_recall_curve, average_precision_score, precision_recall_fscore_support, classification_report
from sklearn.metrics import roc_curve, auc, roc_auc_score, f1_score, log_loss, precision_score, recall_score
import matplotlib.pyplot as plt
from sklearn import metrics
seed = 1603
np.random.seed(seed)
adaData = pd.read_csv("./validationDatasets/ADASYN_validationData.csv", index_col = 0)
adaData = adaData.sort_values('Group')
smoteData = pd.read_csv("./validationDatasets/SMOTE_validationData.csv", index_col = 0)
smoteData = smoteData.sort_values('Group')
expData = pd.read_csv("./validationDatasets/Original_validationData.csv", index_col = 0)
expData = expData.sort_values('Group')
print(expData.head())
rf_default = RandomForestClassifier(random_state=seed)
svm_default = SVC(probability=True, random_state=seed)
lg_default = LogisticRegression(random_state=seed)
knn_default = KNeighborsClassifier()
lda_default = LinearDiscriminantAnalysis()
vote = VotingClassifier(estimators=[('SVM', svm_default), ('Random Forests', rf_default), ('LogReg', lg_default), ('KNN', knn_default), ('LDA',lda_default)], voting='soft')
##LDA
rf_ADA = RandomForestClassifier(n_estimators = 11,random_state=seed)
svm_ADA = SVC(kernel='poly', gamma = 'auto',C =4.12481631472 ,probability=True, random_state=seed)
lg_ADA = LogisticRegression(solver = 'newton-cg',max_iter = 235,C = 135.6688, random_state=seed)
knn_ADA = KNeighborsClassifier(n_neighbors = 5)
lda_ADA = LinearDiscriminantAnalysis(tol = 0.000584259)
adasyn = VotingClassifier(estimators=[('SVM', svm_ADA), ('Random Forests', rf_ADA), ('LogReg', lg_ADA), ('KNN', knn_ADA), ('LDA',lda_ADA)], voting='soft')
rf_SMOTE = RandomForestClassifier(n_estimators = 84,random_state=seed)
svm_SMOTE = SVC(kernel='poly', gamma = 'auto',C =12.6360380346 ,probability=True, random_state=seed)
lg_SMOTE = LogisticRegression(solver = 'newton-cg',max_iter = 432,C = 50.99227570850435, random_state=seed)
knn_SMOTE = KNeighborsClassifier(n_neighbors = 5)
lda_SMOTE = LinearDiscriminantAnalysis(tol = 9.25895394348e-06)
smote = VotingClassifier(estimators=[('SVM', svm_SMOTE), ('Random Forests', rf_SMOTE), ('LogReg', lg_SMOTE), ('KNN', knn_SMOTE), ('LDA',lda_SMOTE)], voting='soft')
rf_Amazon = RandomForestClassifier(n_estimators = 389,random_state=seed)
svm_Amazon = SVC(kernel='poly', gamma = 'auto',C = 2.48906112826 ,probability=True, random_state=seed)
lg_Amazon = LogisticRegression(solver = 'newton-cg',max_iter = 1022,C = 0.0224618581186563, random_state=seed)
knn_Amazon = KNeighborsClassifier(n_neighbors = 5)
lda_Amazon = LinearDiscriminantAnalysis(tol = 0.000785350859773)
pso = VotingClassifier(estimators=[('SVM', svm_Amazon), ('Random Forests', rf_Amazon), ('LogReg', lg_Amazon), ('KNN', knn_Amazon), ('LDA',lda_Amazon)], voting='soft')
##LDA
def metrics(exp, clf, name, imp, cv):
y = exp['Group'].values
print(y)
#le = preprocessing.LabelEncoder()
# Converting string labels into numbers.
#y=le.fit_transform(group)
# unique, counts = np.unique(y, return_counts=True)
# print(dict(zip(unique, counts)))
# unique, counts = np.unique(y, return_counts=True)
#print(dict(zip(unique, counts)))
#yDF = expData['Group']
X = exp.drop('Group', axis=1)
skf = StratifiedKFold(n_splits = cv,random_state = seed)
out = open("./validationDatasets/cvResults/"+str(cv)+"_"+str(name)+"_"+str(imp)+".csv", "w")
print("logloss")
logloss = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='neg_log_loss')
print(str(logloss.mean())+"\n")
out.write("logloss")
for i in logloss:
out.write(","+str(i))
out.write("\n")
print("accuracy")
acc = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='accuracy')
print(str(acc.mean())+"\n")
out.write("accuracy")
for i in acc:
out.write(","+str(i))
out.write("\n")
print("f1")
f1 = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='f1')
print(str(f1.mean())+"\n")
out.write("f1")
for i in f1:
out.write(","+str(i))
out.write("\n")
print("precision")
precision = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='precision')
print(str(precision.mean())+"\n")
out.write("precision")
for i in precision:
out.write(","+str(i))
out.write("\n")
print("recall")
recall = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='recall')
print(str(recall.mean())+"\n")
out.write("recall")
for i in recall:
out.write(","+str(i))
out.write("\n")
print("roc")
roc = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='roc_auc')
print(str(roc.mean())+"\n")
out.write("roc_auc")
for i in roc:
out.write(","+str(i))
out.write("\n")
print("balanced accuracy")
ba = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='balanced_accuracy')
print(str(ba.mean())+"\n")
out.write("balanced accuracy")
for i in ba:
out.write(","+str(i))
out.write("\n")
print("average_precision")
ap = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='average_precision')
print(str(ap.mean())+"\n")
out.write("average_precision")
for i in ap:
out.write(","+str(i))
out.write("\n")
out.close()
print("ADASYN PSO")
metrics(adaData, adasyn, "LDA_ADASYN", "PSO",16)
print("ADASYN Default")
metrics(adaData, vote, "LDA_ADASYN", "default",16)
print("SMOTE PSO")
metrics(smoteData, smote, "LDA_SMOTE", "PSO",16)
print("SMOTE Default")
metrics(smoteData, vote, "LDA_SMOTE", "default",16)
print("Vote")
metrics(expData, vote, "LDA_Vote", "default",16)
print("PSO")
metrics(expData, pso, "LDA_PSO", "1603",16)
|
import sys
N = int(input())
D = [list(map(int,input().split())) for _ in range(N)]
x = 0
for i in range(N):
if D[i][0] == D[i][1]:
x += 1
else:
x *= 0
if x == 3:
print("Yes")
sys.exit()
print("No")
|
from flask import render_template, redirect, url_for, request, flash, session
from flask_login import login_user, current_user, logout_user, login_required
from projectmanagement import app, db
from projectmanagement import bcrypt
from projectmanagement.forms import (LoginForm, RegistrationForm, ProjectForm, TaskForm,
UpdateAccountForm, UpdateStatusForm, RequestResetForm,
ResetPasswordForm)
from projectmanagement.models import User, Project, Task, Attendance
from datetime import date, datetime, timedelta
from projectmanagement import mail
from flask_mail import Message
@app.route('/')
@app.route('/home', methods=['GET','POST'])
def home():
projects = Project.query.all()
users = User.query.all()
tasks = Task.query.all()
if request.method == 'POST':
user = Attendance.query.filter_by(user_id=session['user_id'], date=date.today()).first()
result = request.form.to_dict()
if result.get('pause'):
session['start'] = ''
session['stop'] = ''
session['pause'] = result.get('pause')
session['break_start_time'] = datetime.now().time().isoformat()
session['break_start_time'] = session['break_start_time'][:8]
print(session['break_start_time'])
if result.get('start'):
session['stop'] = ''
session['pause'] = ''
session['start'] = result.get('start')
if user:
pass
else:
add_user = Attendance(user_id=session['user_id'],date=date.today(),
start_time=datetime.now().time())
db.session.add(add_user)
db.session.commit()
if session.get('break_start_time'):
if user.break_time:
bst = datetime.strptime(session['break_start_time'], '%H:%M:%S').time()
bet = datetime.now().time()
h1, m1, s1 = bet.hour, bet.minute, bet.second
h2, m2, s2 = bst.hour, bst.minute, bst.second
t1_secs = s1 + 60 * (m1 + 60*h1)
t2_secs = s2 + 60 * (m2 + 60*h2)
breaktime = t1_secs - t2_secs
bt = user.break_time
h, m, s = bt.hour, bt.minute, bt.second
add_time = s + 60 * (m + 60*h)
user.break_time = timedelta(seconds=add_time + breaktime)
db.session.commit()
session.pop('break_start_time')
else:
bst = datetime.strptime(session['break_start_time'], '%H:%M:%S').time()
bet = datetime.now().time()
h1, m1, s1 = bet.hour, bet.minute, bet.second
h2, m2, s2 = bst.hour, bst.minute, bst.second
t1_secs = s1 + 60 * (m1 + 60*h1)
t2_secs = s2 + 60 * (m2 + 60*h2)
user.break_time = timedelta(seconds= t1_secs - t2_secs)
db.session.commit()
session.pop('break_start_time')
if result.get('stop'):
session['start'] = ''
session['pause'] = ''
session['stop'] = result.get('stop')
if user.end_time:
pass
else:
user.end_time = datetime.now().time()
db.session.commit()
if user.total_time:
pass
else:
h1, m1, s1 = user.end_time.hour, user.end_time.minute, user.end_time.second
h2, m2, s2 = user.start_time.hour, user.start_time.minute, user.start_time.second
t1_secs = s1 + 60 * (m1 + 60*h1)
t2_secs = s2 + 60 * (m2 + 60*h2)
user.total_time = timedelta(seconds= t1_secs - t2_secs)
db.session.commit()
return render_template('home.html',title='Home', projects=projects, users=users, tasks=tasks)
else:
return render_template('home.html',title='Home', projects=projects, users=users, tasks=tasks)
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
if form.role.data == 'admin':
role = 1
elif form.role.data == 'operations':
role = 2
elif form.role.data == 'team_lead':
role = 3
elif form.role.data == 'developer':
role = 4
user = User(username=form.username.data, email=form.email.data, password=hashed_password, priority=role)
db.session.add(user)
db.session.commit()
flash('Congratulations, Your account has been created! You are now able to log in!', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
session['email'] = user.email
session['user_id'] = user.id
session['username'] = user.username
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful! Please check your email and password.', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
session.clear()
logout_user()
return redirect(url_for('home'))
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
current_user.designation = form.designation.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
if current_user.designation:
form.designation.data = current_user.designation
return render_template('account.html', title='Account', form=form)
@login_required
@app.route("/project", methods=['GET', 'POST'])
def project():
form = ProjectForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.team_lead.data).first()
user_id = user.id
project = Project(title=form.title.data, description=form.description.data,
team_lead=user_id, start_date=form.start_date.data,
end_date=form.end_date.data, status=form.status.data)
db.session.add(project)
db.session.commit()
flash('Congratulations, Your project has been added!', 'success')
return redirect(url_for('home'))
return render_template('project.html', title='Project', form=form)
@login_required
@app.route("/project/<int:project_id>", methods=['GET', 'POST'])
def project_task(project_id):
project = Project.query.filter_by(id=project_id).first()
tasks = Task.query.filter_by(project_id=project_id)
users = User.query.all()
return render_template('project_task.html', title='Task', tasks=tasks, users=users, project=project)
@login_required
@app.route("/project_update/<int:project_id>", methods=['GET', 'POST'])
def project_update(project_id):
form = ProjectForm()
project = Project.query.get(project_id)
if form.validate_on_submit():
user = User.query.filter_by(username=form.team_lead.data).first()
user_id = user.id
project.title = form.title.data
project.description = form.description.data
project.team_lead = user_id
project.start_date = form.start_date.data
project.end_date = form.end_date.data
project.status = form.status.data
db.session.commit()
flash('Your project has been updated!', 'success')
return redirect(url_for('home'))
elif request.method == 'GET':
user = User.query.filter_by(id=project.team_lead).first()
form.title.data = project.title
form.description.data = project.description
form.team_lead.data = user.username
form.start_date.data = project.start_date
form.end_date.data = project.end_date
form.status.data = project.status
return render_template('project_update.html', title='Project Update', form=form)
@login_required
@app.route("/task", methods=['GET', 'POST'])
def task():
form = TaskForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.developer.data).first()
user_id = user.id
project = Project.query.filter_by(title=form.project.data).first()
project_id = project.id
task = Task(project_id=project_id ,title=form.title.data, details=form.detail.data,
developer=user_id, start_date=form.start_date.data,
end_date=form.end_date.data, status=form.status.data)
db.session.add(task)
db.session.commit()
flash('Congratulations, Your task has been added!', 'success')
return redirect(url_for('home'))
return render_template('task.html', title='Task', form=form)
@login_required
@app.route("/task_update/<int:task_id>", methods=['GET', 'POST'])
def task_update(task_id):
form = TaskForm()
task = Task.query.get(task_id)
project = Project.query.filter_by(id=task.project_id).first()
if form.validate_on_submit():
user = User.query.filter_by(username=form.developer.data).first()
user_id = user.id
project = Project.query.filter_by(title=form.project.data).first()
project_id = project.id
task.project = project_id
task.title = form.title.data
task.details = form.detail.data
task.developer = user_id
task.start_date = form.start_date.data
task.end_date = form.end_date.data
task.status = form.status.data
db.session.commit()
flash('Your task has been updated!', 'success')
return redirect(url_for('project_task', project_id=project_id))
elif request.method == 'GET':
user = User.query.filter_by(id=task.developer).first()
project1 = Project.query.get(project.id)
project_title = project1.title
form.project.data = project_title
form.title.data = task.title
form.detail.data = task.details
form.developer.data = user.username
form.start_date.data = task.start_date
form.end_date.data = task.end_date
form.status.data = task.status
return render_template('task_update.html', title='Task Update', form=form)
@login_required
@app.route('/status_update/<int:task_id>', methods=['GET', 'POST'])
def status_update(task_id):
form = UpdateStatusForm()
task = Task.query.get(task_id)
project = Project.query.get(task.project_id)
if form.validate_on_submit():
task.status = form.status.data
db.session.commit()
flash('Your status has been updated!', 'success')
return redirect(url_for('home'))
elif request.method == 'GET':
form.status.data = task.status
return render_template('status_update.html', title='Update Status', form=form, task=task, project=project)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com',
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password.', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title='Reset Password', form=form)
@app.route("/reset_password/<token>", methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash('Your password has been updated! You are now able to log in', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title='Reset Password', form=form)
@login_required
@app.route('/attendance')
def attendance():
attendance = Attendance.query.all()
return render_template('attendance.html', title='Attendance', attendance=attendance) |
#Loading the required libraries
import pandas as pd
import numpy as np
import joblib
import seaborn as sns
import streamlit as st
import sklearn
# loading joblib files
asd_svm = joblib.load("asd_svm.joblib")
#Creating the UI for the application:
st.markdown("<h1 style='text-align: center; color: red;'>Analysis of Autism Trend data</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: black;'>Modeller : Archana Girinath </h3>", unsafe_allow_html=True)
st.markdown("<h4 style='text-align: center; color: blue;'><href>https://github.com/gsarchu/autismML</href></h4>",unsafe_allow_html=True)
st.markdown("<h2 style='text-align: center; color: black;'>Autism Screening Questionnaire </h2>", unsafe_allow_html=True)
A1= st.selectbox("1. Does your child look at you when you call his/her name? ", [False, True])
A2= st.selectbox("2. How easy is it for you to get eye contact with your child? ", [False, True])
A3= st.selectbox("3. Does your child point to indicate that s/he wants something? (e.g. a toy that is out of reach) ", [False, True])
A4= st.selectbox("4. Does your child point to share interest with you? (e.g. poin9ng at an interes9ng sight) ", [False, True])
A5= st.selectbox("5. Does your child pretend? (e.g. care for dolls, talk on a toy phone) ", [False, True])
A6= st.selectbox("6. Does your child follow where you’re looking? ", [False, True])
A7= st.selectbox("7. If you or someone else in the family is visibly upset, does your child show signs of wan9ng to comfort them? ", [False, True])
A8= st.selectbox("8. Would you describe your child’s first words as: ", [False, True])
A9= st.selectbox("9. Does your child use simple gestures? (e.g. wave goodbye) ", [False, True])
A10= st.selectbox("10. Does your child stare at nothing with no apparent purpose? ", [False, True])
if not A1:
A1=0
else:
A1=1
if not A2:
A2=0
else:
A2=1
if not A3:
A3=0
else:
A3=1
if not A4:
A4=0
else:
A4=1
if not A5:
A5=0
else:
A5=1
if not A6:
A6=0
else:
A6=1
if not A7:
A7=0
else:
A7=1
if not A8:
A8=0
else:
A8=1
if not A9:
A9=0
else:
A9=1
if not A10:
A10=0
else:
A10=1
submit = st.button('Submit')
if submit:
prediction = asd_svm.predict([[A2, A5, A6, A9, A7, A1, A4, A3, A8]])
if prediction ==1:
st.write("Your child could be Autistic (with an accuracy of 96.2%) ")
else:
st.write("Analysis shows that your child is not autistic (with an accuracy of 96.2%)")
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Authour:Dreamer
# Tmie:2018.6.22
# 发行量避免程序出错
# 异常处理的目的就 代码可能出错,让程序出错后不要直接崩溃退出,而是提示错误信息,继续后续执行
# 如果用了try语法,后面必须跟except或finally,else可以不写。
try: # 如果代码很可能出错,就把此代码放在try里面
path = input("请输入文件路径:")
with open(path, "r") as f:
content = f.read
print(content)
# print(a)
# except FileNotFoundError: # 只拦截文件未找到的错误,别的什么也不管
# print("文件未找到!")
# except NameError as error:
# print("%s名字出错" % error)
except Exception as error: # 可以拦截到所有用户类型,as可以获取到错误的详细信息
print("%s出错了!" % error)
else:
print("如果try里面的代码正常执行,没有出错就会走else")
print("后续处理!")
|
def stream_kline_to_struct_kline(bar):
klineList = [float(bar['t']), float(bar['o']), float(bar['h']), float(bar['l']), float(bar['c']), float(bar['v'])]
return klineList
|
#-*-coding:utf-8-*-
import time
import random
import sys
from multiprocessing import Process
#多线程
def run(name):
print('%s running' % name)
time.sleep(random.randrange(2, 6))
print('%s running end' % name)
# 必须加,号
p1 = Process(target=run, args=('anne',))
p2 = Process(target=run, args=('alice',))
p3 = Process(target=run, args=('bb',))
p4 = Process(target=run, args=('hh',))
def main():
p1.start()
p2.start()
p3.start()
p4.start()
time.sleep(1)
p1.terminate()
print('主线程')
if __name__ == '__main__':
main()
|
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
import asyncio
import urllib3
import traceback
from urllib.parse import urlparse
from ipaddress import ip_address
from typing import Dict, Tuple, Any
from jarm.scanner.scanner import Scanner
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
DEFAULT_PORT = 443
""" CLIENT CLASS """
class Client:
def jarm_fingerprint(self, host: str, port: int) -> Tuple[str, str, int]:
return asyncio.run(Scanner.scan_async(host, port, suppress=True))
""" HELPER FUNCTIONS """
def parse_hostname(hostname: str, port: Optional[int]) -> Dict[str, Any]:
"""
Parses a target hostname. Supports multiple ipv4/fqdn with and without port formats.
"""
target: Dict[str, Any] = {}
if not hostname.startswith('https://'):
hostname = 'https://' + hostname
parsed_url = urlparse(hostname)
if port:
target['port'] = port
elif parsed_url.port:
target['port'] = parsed_url.port
else:
target['port'] = DEFAULT_PORT
try:
ip = ip_address(parsed_url.hostname) # type: ignore[arg-type]
target['target_host'] = str(ip)
target['target_type'] = 'ip'
except ValueError:
target['target_host'] = parsed_url.hostname
target['target_type'] = 'fqdn'
return target
""" COMMAND FUNCTIONS """
def test_module(client: Client) -> str:
return "ok"
def jarm_fingerprint_command(
client: Client, args: Dict[str, Any]
) -> CommandResults:
class JARMDBotScore(Common.Indicator):
def __init__(self, output: Dict[str, Any]):
self._jarm = output.get('Fingerprint')
def to_context(self) -> Dict[str, Any]:
return {
"DBotScore": {
"Indicator": self._jarm,
"Type": "jarm",
"Vendor": "JARM",
"Score": Common.DBotScore.NONE,
}
}
host = args.get("host")
if not host:
raise ValueError("Host name (IP or domain) not specified")
port = arg_to_number(args.get("port"))
target = parse_hostname(host, port)
target_type = target.get('target_type')
if not target_type:
raise ValueError('Cannot determine scan target')
target_host = target.get('target_host')
if not target_host:
raise ValueError('Cannot determine scan target')
port = target.get('port')
if not port:
raise ValueError('Invalid port provided')
result = client.jarm_fingerprint(target_host, port)
output = {}
output['Fingerprint'] = result[0]
output['Target'] = f'{target_host}:{port}'
output['Port'] = port
if target_type == 'ip':
output['IP'] = target_host
elif target_type == 'fqdn':
output['FQDN'] = target_host
return CommandResults(
outputs_prefix="JARM", outputs_key_field=['FQDN', 'IP', 'Port'], outputs=output,
indicator=JARMDBotScore(output=output)
)
""" MAIN FUNCTION """
def main() -> None:
command = demisto.command()
demisto.debug(f"Command being called is {command}")
try:
handle_proxy()
client = Client()
if command == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command == "jarm-fingerprint":
return_results(jarm_fingerprint_command(client, demisto.args()))
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
from werkzeug.utils import redirect
from books_app.config.mysqlconnection import connectToMySQL
from books_app.models import books
class Author:
def __init__(self, data):
self.id = data['id']
self.name = data['name']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.favorite_books = []
@classmethod
def make_author(cls, data):
query = "INSERT INTO authors (name, created_at, updated_at) VALUES (%(name)s, NOW(), NOW());"
return connectToMySQL('books_schema').query_db(query, data)
@classmethod
def get_all_authors(cls):
query = "SELECT * FROM authors;"
results = connectToMySQL('books_schema').query_db(query)
authors = []
for author in results:
authors.append(cls(author))
return authors
@classmethod
def get_author_info_by_id(cls, data):
query = "SELECT * from authors WHERE id = %(id)s;"
results = connectToMySQL('books_schema').query_db(query, data)
return results
@classmethod
def insert_data(cls, data):
query = "INSERT INTO favorites (book_id, author_id, created_at, updated_at) VALUES (%(book_id)s, %(author_id)s, NOW(), NOW());"
return connectToMySQL('books_schema').query_db(query, data)
@classmethod
def get_favorite_authors(cls, data):
query = "SELECT * FROM authors JOIN favorites ON favorites.author_id = authors.id WHERE favorites.book_id = %(id)s"
return connectToMySQL('books_schema').query_db(query, data)
# @classmethod
# def get_authors_favorites(cls, data):
# query = "SELECT * FROM authors LEFT JOIN favorites ON favorites.author_id = authors.id LEFT JOIN books ON favorites.book_id = books.id WHERE favorites.author_id = %(author_id)s"
# results = connectToMySQL('books_schema').query_db(query, data)
# favorite = cls(results[0])
# print("ppppprint:", favorite)
# for result in results:
# favorite_data = {
# "id": result['books.id'],
# "title": result['title'],
# "num_of_pages": result['num_of_pages'],
# "created_at": result['created_at'],
# "updated_at": result['updated_at']
# }
# favorite.favorite_books.append(books.Book(favorite_data))
# return favorite
|
"""
Its pretty imprortant if multiple models are being used for Ensembling, Stacking / Blending
For each of them its very important to have the same folds
"""
import pandas as pd
from sklearn import model_selection
if __name__ == '__main__':
df = pd.read_csv('../input/labeledTrainData.tsv', sep="\t")
df.loc[:, 'kfold'] = -1
df = df.sample(frac=1).reset_index(drop=True)
y = df.sentiment.values
skf = model_selection.StratifiedKFold(n_splits=5, random_state=42)
for f, (t_, v_) in enumerate(skf.split(X=df, y = y)):
df.loc[v_, "kfold"] = f
df.to_csv('../input/train_folds.csv', index=False) |
from django.contrib import admin
from blog.models import Article
from django.contrib.auth.models import User
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'decription', 'likes', 'views')
admin.site.register(Article)
|
fullTeam_shortCity_map = {
'49ers':'SFO',
'Bears':'CHI',
'Bengals':'CIN',
'Bills':'BUF',
'Broncos':'DEN',
'Browns':'CLE',
'Buccaneers': 'TAM',
'Cardinals': 'ARI',
'Chargers': 'LAC',
'Chiefs': 'KAN',
'Colts': 'IND',
'Cowboys': 'DAL',
'Dolphins': 'MIA',
'Eagles': 'PHI',
'Falcons': 'ATL',
'Giants': 'NYG',
'Jaguars': 'JAX',
'Jets': 'NYJ',
'Lions': 'DET',
'Packers': 'GNB',
'Panthers': 'CAR',
'Patriots': 'NWE',
'Raiders': 'LVR',
'Rams': 'LAR',
'Ravens': 'BAL',
'Redskins': 'WAS',
'Football_Team':'WAS',
'Saints': 'NOR',
'Seahawks': 'SEA',
'Steelers': 'PIT',
'Texans': 'HOU',
'Titans': 'TEN',
'Vikings': 'MIN'
}
fullTeam_shortTeam_map = {
'San Francisco 49ers':'49ers',
'Chicago Bears':'Bears',
'Cincinnati Bengals':'Bengals',
'Buffalo Bills':'Bills',
'Denver Broncos':'Broncos',
'Cleveland Browns':'Browns',
'Tampa Bay Buccaneers':'Buccaneers',
'Arizona Cardinals':'Cardinals',
'Los Angeles Chargers':'Chargers',
'Kansas City Chiefs':'Chiefs',
'Indianapolis Colts':'Colts',
'Dallas Cowboys':'Cowboys',
'Miami Dolphins':'Dolphins',
'Philadelphia Eagles':'Eagles',
'Atlanta Falcons':'Falcons',
'New York Giants':'Giants',
'Jacksonville Jaguars':'Jaguars',
'New York Jets':'Jets',
'Detroit Lions':'Lions',
'Green Bay Packers':'Packers',
'Carolina Panthers':'Panthers',
'New England Patriots':'Patriots',
'Las Vegas Raiders':'Raiders',
'Los Angeles Rams':'Rams',
'Baltimore Ravens':'Ravens',
'Washington Football Team':'Football_Team',
'New Orleans Saints':'Saints',
'Seattle Seahawks':'Seahawks',
'Pittsburgh Steelers':'Steelers',
'Houston Texans':'Texans',
'Tennessee Titans':'Titans',
'Minnesota Vikings':'Vikings'
} |
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaxBlurPool(torch.nn.Module):
"""
Simplified implementation of MaxBlurPool based on Adobe's antialiased CNNs.
"""
def __init__(self, n):
super().__init__()
self.maxpool = nn.MaxPool2d(2, 1)
self.padding = nn.ReflectionPad2d(1)
f = torch.tensor([1, 2, 1])
f = (f[None, :] * f[:, None]).float()
f /= f.sum()
f = f[None, None].repeat((n, 1, 1, 1))
self.register_buffer('f', f)
def forward(self, x):
x = self.maxpool(x)
x = self.padding(x)
x = F.conv2d(x, self.f, stride=2, groups=x.shape[1])
return x
|
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QVBoxLayout, QLCDNumber
import sys
from PyQt5 import QtGui
import random
class Window(QWidget):
def __init__(self):
super().__init__()
self.title = "This is first thing"
self.height = 700
self.width = 1100
self.top = 100
self.left = 200
self.iconName = "plioky.ico"
self.lcd = QLCDNumber()
self.vbox = QVBoxLayout()
self.init_window()
def init_window(self):
self.setWindowIcon(QtGui.QIcon(self.iconName))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.vbox.addWidget(self.lcd)
self.lcd.setStyleSheet("background-color:orange")
self.setStyleSheet("background-color:#212f3d")
button = QPushButton("Generate random number")
button.clicked.connect(self.generate)
button.setStyleSheet("background-color:white")
self.vbox.addWidget(button)
self.setLayout(self.vbox)
self.show()
def generate(self):
rand = random.randint(0, 100)
self.lcd.display(rand)
if __name__ == "__main__":
myapp = QApplication(sys.argv)
window = Window()
sys.exit(myapp.exec()) |
from django.contrib import admin
from .models import Article, Author
#adminka
#qwerty123
admin.site.register(Article)
admin.site.register(Author)
# Register your models here.
|
import numpy as np
from numpy import float32,int32
np.random.seed(42)
import tensorflow as tf
from keras.layers import TimeDistributed
from keras.layers import Bidirectional
tf.set_random_seed(42)
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1
)
from keras import backend as K
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense, Dropout,Flatten
from data import load_data
from utils import confusion_matrix
from keras.callbacks import TensorBoard, ModelCheckpoint
import os
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
LABELS = ['WALKING','WALKING_UPSTAIRS','WALKING_DOWNSTAIRS','SITTING','STANDING','LAYING']
lables=np.array(LABELS)
print(lables.shape)
print(lables)
CHECK_ROOT = 'checkpoint/'
if not os.path.exists(CHECK_ROOT):
os.makedirs(CHECK_ROOT)
epochs = 20 # 30
batch_size = 16
n_hidden = 32
def one_hot(y_):
"""
Function to encode output labels from number indexes.
E.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
"""
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
def _count_classes(y):
return len(set([tuple(category) for category in y]))
X_train, X_test, Y_train, Y_test = load_data()
y_test=Y_test.argmax(1)
timesteps = len(X_train[0])
input_dim = len(X_train[0][0])
n_classes = _count_classes(Y_train)
print("n_classes",n_classes)
# LSTM
#model = Sequential()
#model.add(LSTM(n_hidden, input_shape=(timesteps, input_dim)))
#model.add(LSTM(n_hidden, input_shape=(timesteps, input_dim)))
#model.add(Dropout(0.5))
#model.add(Dense(n_classes, activation='sigmoid'))
#model.compile(loss='categorical_crossentropy',
# optimizer='adam',
# metrics=['accuracy'])
# bidLSTM
model = Sequential()
model.add(Bidirectional(LSTM(n_hidden, return_sequences=True), input_shape=(timesteps, input_dim)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(n_classes, activation='sigmoid')) #n_classes
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# callback: draw curve on TensorBoard
tensorboard = TensorBoard(log_dir='log', histogram_freq=0, write_graph=True, write_images=True)
# callback: save the weight with the highest validation accuracy
filepath=os.path.join(CHECK_ROOT, 'weights-improvement-{val_acc:.4f}-{epoch:04d}.hdf5')
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=2, save_best_only=True, mode='max')
model.fit(X_train,
Y_train,
batch_size=batch_size,
validation_data=(X_test, Y_test),
epochs=epochs,callbacks=[tensorboard, checkpoint])
# Evaluate
print(confusion_matrix(Y_test, model.predict(X_test)))
predict=model.predict(X_test)
print("predict#################",predict)
pred_index_total=[]
for pred in predict:
pred_index = []
pred_list=pred.tolist()
index_max=pred_list.index(max(pred_list))
pred_index.append(index_max)
pred_index_total.append(np.array(pred_index))
print(pred_index_total)
one_hot_predictions=one_hot(np.array(pred_index_total))
print("one_hot_predictions%%%%%%%%%",one_hot_predictions)
prediction=one_hot_predictions.argmax(1)
confusion_matrix = metrics.confusion_matrix(y_test, prediction)
print("%%%%%%%%%%%%%%%",confusion_matrix)
# Plot Results:
width = 12
height = 12
normalised_confusion_matrix = np.array(confusion_matrix, dtype=float32)/np.sum(confusion_matrix)*100
plt.figure(figsize=(width, height))
plt.imshow(
normalised_confusion_matrix,
interpolation='nearest',
cmap=plt.cm.rainbow
)
plt.title("Confusion matrix \n(normalised to % of total test data)")
plt.colorbar()
tick_marks = np.arange(n_classes)
plt.xticks(tick_marks,lables,rotation=90)
plt.yticks(tick_marks,lables)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show() |
def has_cycle(head):
slow = fast = head
while slow and fast.next and fast.next.next:
if slow = fast.next:
return True
slow = slow.next
fast = fast.next.next
return False
def main():
pass
if __name__ == "__main__":
main()
|
from pymongo import MongoClient
import json
input_path = 'resources/mock_flights.json'
with open(input_path, 'r') as input_file:
snapshots = json.loads(input_file.read())['snapshots']
client = MongoClient('localhost', 27017)
db = client['recordings']
collection = db['mockFlights']
for snapshot in snapshots:
collection.insert_one(snapshot)
|
from __future__ import print_function
from model import *
flags = tf.app.flags
flags.DEFINE_integer('num_units', 24, 'Number of units in LSTM layer')
flags.DEFINE_integer('num_unrollings', 20, 'Input sequence length')
flags.DEFINE_integer('batch_size', 1000, 'The size of training batch')
flags.DEFINE_integer('train_size', 10000, 'The size of training dataset')
flags.DEFINE_integer('epochs', 3130, 'Epochs to train')
flags.DEFINE_integer('checkpoint_step', 50, 'Step on which checkpoint is created')
flags.DEFINE_boolean('train', True, 'True for training, False for testing')
flags.DEFINE_string('dataset_dir', 'data', 'Directory name to save the dataset')
flags.DEFINE_string('checkpoint_dir', 'checkpoint', 'Directory name to save the checkpoint')
flags.DEFINE_string('batch_dataset_type', 'train_dataset', 'Dataset used for generating training batches')
flags.DEFINE_string('accuracy_dataset_type', 'test_dataset', 'Dataset used for generating accuracy')
flags.DEFINE_string('model_name', 'rnn-lstm', 'Name of the model')
flags.DEFINE_integer('restore_model', 3110, 'Model to restore to calculate accuracy')
flags.DEFINE_boolean('random_prediction', True, 'Show random prediction')
FLAGS = flags.FLAGS
def main(_):
# Validating flags
assert FLAGS.num_units > 0, 'Number of units in LSTM layer should be greater than 0'
assert FLAGS.num_unrollings > 0, 'Input Sequence length should be greater than 0'
assert FLAGS.train_size < 2**FLAGS.num_unrollings, 'The size of training dataset should be less than %d' % 2**FLAGS.num_unrollings
assert FLAGS.train_size % FLAGS.batch_size == 0, 'Train size should be divisible by batch size'
assert FLAGS.epochs > 0, 'Epochs should be greater than 0'
assert FLAGS.checkpoint_step % (FLAGS.train_size / FLAGS.batch_size) == 0, 'Checkpoint step should be an Epoch'
model = Model(FLAGS)
if FLAGS.train:
model.train()
else:
vals = model.accuracy()
if FLAGS.random_prediction:
print('Model accuracy: %.2f' % vals[0])
print('Random binary string: %s' % str(vals[1]))
print('Count prediction: %d' % vals[2])
else:
print('Model accuracy: %.2f' % vals[0])
if __name__ == '__main__':
tf.app.run()
|
from dipy.tracking.utils import length
from dipy.tracking.streamline import Streamlines, cluster_confidence
from FT.single_fascicle_vizualization import *
from dipy.viz import window, actor
from FT.all_subj import all_subj_folders, all_subj_names
import numpy as np
from FT.weighted_tracts import *
main_folder = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V6\after_file_prep'
folder_name = main_folder + all_subj_folders[0]
n = all_subj_names[0]
nii_file = load_dwi_files(folder_name)[5]
tract_path = folder_name + r'\streamlines' + n + '_wholebrain_3d_plus_new.trk'
streamlines = load_ft(tract_path, nii_file)
lab_labels_index, affine = nodes_by_index_mega(folder_name)
masked_streamlines = choose_specific_bundle(streamlines, affine, folder_name, mask_type='cc')
streamline_dict = create_streamline_dict(masked_streamlines, lab_labels_index, affine)
# streamline_dict = clean_non_cc(streamline_dict) ##
mat_medians = load_mat_of_median_vals(mat_type='w_plus')
index_to_text_file = r'C:\Users\Admin\my_scripts\aal\megaatlas\megaatlas2nii.txt'
idx = nodes_labels_mega(index_to_text_file)[1]
id = np.argsort(idx)
mat_medians = mat_medians[id]
mat_medians = mat_medians[:, id]
vec_vols = []
s_list = []
'''new func:'''
for i in range(id.__len__()): #
for j in range(i + 1): #
edge_s_list = []
# print(i,j)
if (i + 1, j + 1) in streamline_dict and mat_medians[i, j] > 0:
edge_s_list += streamline_dict[(i + 1, j + 1)]
if (j + 1, i + 1) in streamline_dict and mat_medians[i, j] > 0:
edge_s_list += streamline_dict[(j + 1, i + 1)]
edge_vec_vols = [mat_medians[i, j]] * edge_s_list.__len__()
s_list = s_list + edge_s_list
vec_vols = vec_vols + edge_vec_vols
s = Streamlines(s_list)
cci = cluster_confidence(s)
keep_streamlines = Streamlines()
for i, sl in enumerate(s):
if cci[i] >= 1:
keep_streamlines.append(sl)
# Visualize the streamlines we kept
ren = window.Renderer()
keep_streamlines_actor = actor.line(keep_streamlines, linewidth=0.1)
ren.add(keep_streamlines_actor)
interactive = True
if interactive:
window.show(ren)
|
'''
Given a binary matrix, find there exists any
rectangle or square in the given matrix whose all
four corners are equal to 1.
'''
def find_rect(m):
x_edges = []
for i in range(len(m)):
y_corners = set()
for j in range(len(m[i])):
if m[i][j] == 1:
y_corners.add(j)
if len(y_corners)>1:
x_edges += [y_corners]
else:
x_edges += [set()]
for i in range(len(x_edges)):
for j in range(i+1, len(x_edges)):
y_edges = x_edges[i].intersection(x_edges[j])
if len(y_edges) > 1:
print('\nYes')
return [x[min(y_edges):max(y_edges)+1] for x in m[i:j+1]]
print('\nNo')
return [[]]
# for i in range(x_edges):
# for j in range(i+1, x_edges):
def print_rect(r):
print('[' + str(r[0]) + ',')
for i in range(1, len(r)-1):
print(' ' + str(r[i]) + ',')
print(' ' + str(r[len(r)-1]) + ']')
mat1 = [[1, 0, 0, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[1, 0, 1, 0, 1]]
print_rect(find_rect(mat1))
mat2 = [[]]
print_rect(find_rect(mat2))
mat3 = [[1]]
print_rect(find_rect(mat3))
mat4 = [[1, 0, 0, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[1, 0, 1, 1, 0]]
print_rect(find_rect(mat4))
mat5 = [[1, 0, 0, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[1, 0, 1, 0, 0]]
print_rect(find_rect(mat5)) |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import UInt8MultiArray
# function that publishes a message on the topic ,on which the
# arduino is listening
def publish(msg):
pub = rospy.Publisher('servo_actuator', UInt8MultiArray, queue_size=10)
pub.publish(data=msg)
# a new message has been published on the topic /move_group/fake_controller_joint_states
# this message is a sensor_msg/JointStatmessage type
# get the data and publish those on the connected arduino as joint angles
def callback(data):
anglesRad = data.position
# conversion between radiants and percentage
# output of this is a number between 0 and 180
joint_1 = int(((anglesRad[0]+1.57075)*180)/3.1415)
joint_2 = 180-int(((anglesRad[1]+1.57075)*180)/3.1415)
joint_3 = int(((anglesRad[2]+1.57075)*180)/3.1415)
anglesPerc = [joint_1, joint_2, joint_3]
print "Angles %s ", anglesPerc
publish(anglesPerc)
def listener():
rospy.init_node('request_handler', anonymous=True)
# init the subscriber
rospy.Subscriber("/joint_states", JointState, callback)
if __name__ == '__main__':
listener()
# keep this going
rospy.spin() |
from enum import IntEnum, unique
@unique
class HardestGameMove(IntEnum):
up = 0
down = 1
left = 2
right = 3
stay = 4
|
import urllib
import urllib2
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
file = open("print.txt",'w+');
page = 1
url = 'http://www.qiushibaike.com/hot/page/' + str(page)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {"User-Agent" : user_agent}
try:
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
#print response.read()
content = response.read()
pattern = re.compile('<div class="author.*?>.*?<a.*? alt="(.*?)".*?</a>.*?<div.*?content">(.*?)</div>', re.S)
items = re.findall(pattern,content)
for item in items:
print item[0].decode()
print item[1].decode()
file.writelines(item[0] + '\n')
except urllib2.URLError, e:
if hasattr(e, "code"):
print e.code
if hasattr(e, "reason"):
print e.reason
|
# encoding: utf-8
# Regular expressions and auxilary functions used in this script are taken from O'Connor et al.'s Tweetmotif
# see: https://github.com/brendano/tweetmotif
import re
def regex_or(*items):
r = '|'.join(items)
r = '(' + r + ')'
return r
def pos_lookahead(r):
return '(?=' + r + ')'
def neg_lookahead(r):
return '(?!' + r + ')'
def optional(r):
return '(%s)?' % r
# Build URL
PunctChars = r'[\'“".?!,:;]'
html_entity = '&(amp|lt|gt|quot);'
UrlStart1 = regex_or('https?://', r'www\.')
CommonTLDs = regex_or('com','co\\.uk','org','net','info','ca')
UrlStart2 = r'[a-z0-9\.-]+?' + r'\.' + CommonTLDs + pos_lookahead(r'[/ \W\b]')
UrlBody = r'[^ \t\r\n<>]*?'
UrlExtraCrapBeforeEnd = '%s+?' % regex_or(PunctChars, html_entity)
UrlEnd = regex_or( r'\.\.+', r'[<>]', r'\s', '$')
url = (r'\b' +
regex_or(UrlStart1, UrlStart2) +
UrlBody +
pos_lookahead( optional(UrlExtraCrapBeforeEnd) + UrlEnd))
# Build emoticon
NormalEyes = r'[:=]'
Wink = r'[;]'
NoseArea = r'(|o|O|-)'
HappyMouths = r'[D\)\]]'
SadMouths = r'[\(\[]'
Tongue = r'[pP]'
OtherMouths = r'[doO/\\]'
emoticon = (
"("+NormalEyes+"|"+Wink+")" +
NoseArea +
"("+Tongue+"|"+OtherMouths+"|"+SadMouths+"|"+HappyMouths+")"
)
|
#!/usr/bin/python
import datetime
import lugar
import Persona
#*****************************************************************************
# Clase : Evento
#
# Descripcion : Clase que implementa cada evento en el CLEI
#
# Autores :
# David Lilue # carnet: 09-10444
# Veronica Linayo # carnet: 08-10615
# Audry Morillo # carnet: 07-41253
# Vanessa Rivas # carnet: 10-10608
# Michael Woo # carnet: 09-10912
#
# Grupo :1, 3, 4
# Seccion : 1
#
#*****************************************************************************
class Evento(object):
def __init__(self, nombre, fecha_ini, fecha_fin, hora_ini, hora_fin,
lugar):
# hacer esto antes del constructor
# dia_ini, mes_ini, anio_ini, hora_ini, min_ini, dia_fin, mes_fin,
# anio_fin, hora_fin, min_fin
# self.fecha_fin = datetime.date(anio_fin,mes_fin,dia_fin)
# self.hora_ini = datetime.time(hora_ini,min_ini,0)
self.nombre = nombre
self.fecha_ini = fecha_ini
self.fecha_fin = fecha_fin
self.hora_ini = hora_ini
self.hora_fin = hora_fin
self.lugar = lugar
def __str__(self):
keys = self.__dict__.keys()
keys = sorted(keys, key=str.lower)
datos = ""
for n in keys:
datos += "\n%s: %s"%(n,str(self.__dict__[n]))
datos += "\n"
return datos
#*****************************************************************************
# Clase : SesionDePonencias
#
# Descripcion : Clase que hereda de Evento e implementa las sesiones de
# ponencias que se daran en el CLEI
#
# Autores :
# David Lilue # carnet: 09-10444
# Veronica Linayo # carnet: 08-10615
# Audry Morillo # carnet: 07-41253
# Vanessa Rivas # carnet: 10-10608
# Michael Woo # carnet: 09-10912
#
# Grupo :1, 3, 4
# Seccion : 1
#
#*****************************************************************************
class SesionDePonencias(Evento):
def __init__(self, nombre, fecha_ini, fecha_fin, hora_ini, hora_fin,lugar,
ponencia1, ponencia2, ponencia3 = None, ponencia4 = None):
super(SesionDePonencias, self).__init__(nombre, fecha_ini, fecha_fin,
hora_ini, hora_fin, lugar)
self.ponencias = []
self.ponencias.append(ponencia1)
self.ponencias.append(ponencia2)
if ponencia3 != None:
self.ponencias.append(ponencia3)
if ponencia4 != None:
self.ponencias.append(ponencia4)
def agregar_ponencia(self, ponencia):
if len(self.ponencias) < 4:
self.ponencias.append(ponencia)
return True
return False
#*****************************************************************************
# Clase : CharlaInvitada
#
# Descripcion : Clase que hereda de Evento e implementa las charlas invitadas
# que se daran en el CLEI
#
# Autores :
# David Lilue # carnet: 09-10444
# Veronica Linayo # carnet: 08-10615
# Audry Morillo # carnet: 07-41253
# Vanessa Rivas # carnet: 10-10608
# Michael Woo # carnet: 09-10912
#
# Grupo :1, 3, 4
# Seccion : 1
#
#*****************************************************************************
class CharlaInvitada(Evento):
def __init__(self, nombre, fecha_ini, fecha_fin, hora_ini, hora_fin, lugar,
moderador, charlista, resumen, palabras_claves, topico):
#if not(topico in moderador.Experticia):
# raise Exception
super(CharlaInvitada, self).__init__(nombre, fecha_ini, fecha_fin,
hora_ini, hora_fin, lugar)
self.moderador = moderador
self.charlista = charlista
self.resumen = resumen
self.palabras_claves = palabras_claves
self.topico = topico
#*****************************************************************************
# Clase : Taller
#
# Descripcion : Clase que hereda de Evento e implementa los talleres a ser
# impartidos en CLEI
#
# Autores :
# David Lilue # carnet: 09-10444
# Veronica Linayo # carnet: 08-10615
# Audry Morillo # carnet: 07-41253
# Vanessa Rivas # carnet: 10-10608
# Michael Woo # carnet: 09-10912
#
# Grupo :1, 3, 4
# Seccion : 1
#
#*****************************************************************************
class Taller(Evento):
def __init__(self, nombre, fecha_ini, fecha_fin, hora_ini, hora_fin,
lugar, articulo):
super(Taller, self).__init__(nombre, fecha_ini, fecha_fin, hora_ini,
hora_fin, lugar)
self.articulo = articulo
|
from pyphocorehelpers.DataStructure.dynamic_parameters import DynamicParameters # to replace simple PlacefieldComputationParameters
from pyphoplacecellanalysis.PhoPositionalData.analysis.interactive_placeCell_config import build_configs # TODO: should be replaced by a better and internal config
# ==================================================================================================================== #
# PIPELINE STAGE (MIXIN HERE?)
# ==================================================================================================================== #
class FilterablePipelineStage:
"""
Adds the self.filtered_sessions, self.filtered_epochs, self.active_configs, self.computation_results properties:
"""
def select_filters(self, active_session_filter_configurations, clear_filtered_results=True, progress_logger=None):
if clear_filtered_results:
# if clear_filtered_results is True, initialize the filtered_* properties. Otherwise just continue with the extant values (they must exist)
self.filtered_sessions = dict()
self.filtered_epochs = dict()
self.filtered_contexts = DynamicParameters()
self.active_configs = dict() # active_config corresponding to each filtered session/epoch
self.computation_results = dict()
if progress_logger is not None:
progress_logger.info(f'select_filters(...) with: {list(active_session_filter_configurations.values())}')
for a_filter_config_name, a_select_config_filter_function in active_session_filter_configurations.items():
print(f'Applying session filter named "{a_filter_config_name}"...')
if progress_logger is not None:
progress_logger.info(f'\tApplying session filter named "{a_filter_config_name}"...')
self.filtered_sessions[a_filter_config_name], self.filtered_epochs[a_filter_config_name], self.filtered_contexts[a_filter_config_name] = a_select_config_filter_function(self.sess)
## Add the filter to the active context (IdentifyingContext)
# self.filtered_contexts[a_filter_config_name] = active_identifying_session_ctx.adding_context('filter', filter_name=a_filter_config_name) # 'bapun_RatN_Day4_2019-10-15_11-30-06_maze'
# build the active filter config from the session's config and the filtered epoch
self.active_configs[a_filter_config_name] = build_configs(self.filtered_sessions[a_filter_config_name].config, self.filtered_epochs[a_filter_config_name])
self.computation_results[a_filter_config_name] = None # Note that computation config is currently None because computation hasn't been performed yet at this stage.
self.active_configs[a_filter_config_name].filter_config = {'filter_function': a_select_config_filter_function} # add the makeshift filter config (which is currently just a dictionary)
# ==================================================================================================================== #
# PIPELINE MIXIN #
# ==================================================================================================================== #
class FilteredPipelineMixin:
""" To be added to the pipeline to enable conveninece access ot its pipeline stage post Filtered stage. """
## Filtered Properties:
@property
def filtered_epochs(self):
"""The filtered_sessions property, accessed through the stage."""
return self.stage.filtered_epochs
@property
def filtered_sessions(self):
"""The filtered_sessions property, accessed through the stage."""
return self.stage.filtered_sessions
@property
def filtered_session_names(self):
"""The names that identify each filtered session in the self.stage.filtered_sessions dictionary. Should be the same as self.active_config_names I believe."""
return list(self.stage.filtered_sessions.keys())
@property
def filtered_contexts(self):
""" filtered_contexts holds the corresponding contexts for each filtered config."""
return self.stage.filtered_contexts
@filtered_contexts.setter
def filtered_contexts(self, value):
self.stage.filtered_contexts = value
@property
def active_config_names(self):
"""The names of the active configs that can be used to index into the other properties (which are dictionaries)."""
return list(self.stage.active_configs.keys())
@property
def active_configs(self):
"""The active_configs property corresponding to the InteractivePlaceCellConfig obtained by filtering the session. Accessed through the stage."""
return self.stage.active_configs
|
#!/usr/bin/python
import sys
import glob
line = "%s\t" % (sys.argv[1])
files = glob.glob("*-server-memory.txt")
data = [int(x.strip()) for x in file(files[0]).readlines()]
data = max(data)
line = line + str(data) + "\t"
files = glob.glob("*-network.txt")
for f in files:
if not f.endswith("-server-network.txt"):
data = file(f).read()
data = data.strip()
line = line + data
fp = file(sys.argv[2], "a")
fp.write(line + "\n")
|
from Bateria import Bateria
from collections import defaultdict
import numpy as np
from matplotlib import pyplot as plt
load = True
angles = [0,3,6,9,12,15,18]
folderpath = "Domingo (31-03-2019)"
CL_dict = defaultdict(list)
CD_dict = defaultdict(list)
CM_dict = defaultdict(list)
test_number = 1
has_more_tests = True
for incidency in angles:
has_more_tests = True
while has_more_tests:
if load:
filename = "C{}-{}.txt".format(incidency, test_number)
else:
filename = "S{}-{}.txt".format(incidency, test_number)
filepath = "Logs/{}/{}".format(folderpath, filename)
bateria = Bateria(
filepath = filepath,
skipable_rows = [0,1,2,3,4,5,7],
bancada_incidency = incidency,
wing_area_ref = 0.45,
wing_chord_ref = 0.25,
toller_acc_horz=1.5,
toller_acc_vert=5,
bancada_susp_mass=5,
ok_min_velocity=9,
)
print()
if bateria.test_exists:
print('Incidency: {} // Test: {}'.format(incidency, test_number))
print(bateria.CL_mean)
print(bateria.CD_mean)
print(bateria.CM_mean)
CL_dict[incidency].append(bateria.CL_mean)
CD_dict[incidency].append(bateria.CD_mean)
CM_dict[incidency].append(bateria.CM_mean)
test_number += 1
else:
has_more_tests = False
print('No more tests')
test_number = 1
print()
print('---------------------------------------------------------------')
CL_list = {}
CD_list = {}
CM_list = {}
CL_mean = {}
CD_mean = {}
CM_mean = {}
CL_std = {}
CD_std = {}
CM_std = {}
for inc in angles:
if CL_dict[inc]:
CL_list[inc] = CL_dict[inc]
CD_list[inc] = CD_dict[inc]
CM_list[inc] = CM_dict[inc]
CL_mean[inc] = np.mean(CL_dict[inc])
CD_mean[inc] = np.mean(CD_dict[inc])
CM_mean[inc] = np.mean(CM_dict[inc])
CL_std[inc] = np.std(CL_dict[inc])
CD_std[inc] = np.std(CD_dict[inc])
CM_std[inc] = np.std(CM_dict[inc])
print('Incidency angle: {}'.format(inc))
print('CL list: {} \t CL mean: {} \t CL std deviation: {}'.format(CL_list[inc], CL_mean[inc], CL_std[inc]))
print('CD list: {} \t CD mean: {} \t CD std deviation: {}'.format(CD_list[inc], CD_mean[inc], CD_std[inc]))
print('CM list: {} \t CM mean: {} \t CM std deviation: {}'.format(CM_list[inc], CM_mean[inc], CM_std[inc]))
print()
print('---------------------------------------------------------------')
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
plt.suptitle('Coeeficient\'s Mean and Std for all tests')
ax1.set_xlabel('Angle of Attack')
ax1.set_ylabel('CL')
ax2.set_xlabel('Angle of Attack')
ax2.set_ylabel('CD')
ax3.set_xlabel('Angle of Attack')
ax3.set_ylabel('CM')
ax1.errorbar(CL_mean.keys(),CL_mean.values(), CL_std.values())
ax2.errorbar(CD_mean.keys(),CD_mean.values(), CD_std.values())
ax3.errorbar(CM_mean.keys(),CM_mean.values(), CM_std.values())
plt.show() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.BatchRoyaltyDetail import BatchRoyaltyDetail
class AlipayTradeBatchTransferQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayTradeBatchTransferQueryResponse, self).__init__()
self._out_request_no = None
self._royalty_detail = None
self._settle_no = None
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def royalty_detail(self):
return self._royalty_detail
@royalty_detail.setter
def royalty_detail(self, value):
if isinstance(value, list):
self._royalty_detail = list()
for i in value:
if isinstance(i, BatchRoyaltyDetail):
self._royalty_detail.append(i)
else:
self._royalty_detail.append(BatchRoyaltyDetail.from_alipay_dict(i))
@property
def settle_no(self):
return self._settle_no
@settle_no.setter
def settle_no(self, value):
self._settle_no = value
def parse_response_content(self, response_content):
response = super(AlipayTradeBatchTransferQueryResponse, self).parse_response_content(response_content)
if 'out_request_no' in response:
self.out_request_no = response['out_request_no']
if 'royalty_detail' in response:
self.royalty_detail = response['royalty_detail']
if 'settle_no' in response:
self.settle_no = response['settle_no']
|
# -*- coding: utf-8 -*-
# Description:
# Created: 邵鲁玉 2019/10/07
from _datetime import datetime, timedelta
import json
from test.test_model import update_cargo_management, test_end_window, test_allocation
import os
from config import ExperimentalConfig
def test_main(start_day, start_day_str, days, start_time, times):
'''
测试:7月2日至7月6日每20分钟获取一次库存信息,单车分货所有真实的车辆装车清单(包含对已分货物的标记)
'''
i = start_day
basedir = os.path.abspath(os.path.dirname(__file__))
current_time = datetime.strptime(start_day_str, "%Y%m%d%H%M%S")
while i < start_day + days:
file_name = "070" + str(i) + ".json"
file_path = os.path.join(basedir, "", "json_data", file_name)
j = start_time
with open(file_path, 'r', encoding='utf-8') as load_f:
all_cars = json.load(load_f)
while j < start_time + times:
cars = []
end_time = current_time + timedelta(minutes=20)
print(current_time)
# print(end_time)
for index, one_car in enumerate(all_cars):
car_time = datetime.strptime(one_car['create_time'], "%Y%m%d%H%M%S")
if current_time <= car_time < end_time:
cars.append(one_car)
# print("cars length:"+str(len(cars)))
test_one_time_bin(cars, current_time)
test_end_window()
current_time = end_time
j += 1
i += 1
def test_one_time_bin(cars, start_time):
'''
一个20分钟的测试
:param cars: 测试的车辆json
:param start_time: 测试案例的开始时间 type:datetime
:return:
'''
# end_time = datetime.strptime(start_time, ) + timedelta(minutes=20)
# cargo_management.init_goods_list(start_time.strftime("%Y%m%d%H%M%S"))
ExperimentalConfig.current_time_str = start_time
update_cargo_management(start_time.strftime("%Y%m%d%H%M%S"))
test_allocation(cars)
def test_demo(file_name, start_time):
'''
一个20分钟内的测试
:param start_time:
:return:
'''
basedir = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(basedir, "", "json_data", file_name)
# , encoding = 'utf-8'
with open(file_path, 'r') as load_f:
cars = json.load(load_f)
test_end_window()
test_end_window()
test_one_time_bin(cars, start_time)
test_end_window()
if __name__ == '__main__':
# 0740-->18+2
test_main(4, "20190704000000", 3, 0, 3 * 24)
# test_demo("debug.json", datetime.strptime("20190705050000", "%Y%m%d%H%M%S"))
# test_main(5, "20190705000000", 1, 0, 1 * 24)
|
from django.shortcuts import render, redirect
from posts.models import Post
from bugs.models import Bug
from features.models import Feature
from django.views.generic import ListView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.contrib import messages
# Create your views here.
def search(request):
""" view to render search template """
type_session = request.session.get('type', None)
selections = ["bugs", "features", "posts"]
search_type = request.GET.get('type')
if search_type is None:
search_type = type_session
if len(request.GET['q']) < 2 :
messages.info(request, "Type an expression to search")
return redirect('search')
if search_type == "bugs":
posts = Bug.objects.filter(
Q(tag__icontains=request.GET['q']) |
Q(description__icontains=request.GET['q']) |
Q(title__icontains=request.GET['q'])
).distinct().order_by('-id')
if search_type == "features":
posts = Feature.objects.filter(
Q(tag__icontains=request.GET['q']) |
Q(description__icontains=request.GET['q']) |
Q(title__icontains=request.GET['q'])
).distinct().order_by('-id')
if search_type == "posts":
posts = Post.objects.filter(
Q(tag__icontains=request.GET['q']) |
Q(content__icontains=request.GET['q']) |
Q(title__icontains=request.GET['q'])
).distinct().order_by('-id')
page = request.GET.get('page', 1)
paginator = Paginator(posts, 5)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
"object_list": posts,
"selections": selections,
"type": search_type
}
ordering = ['-id']
request.session['type'] = search_type
return render(request, "search_results.html", context)
def search_results(request):
""" view to render search results """
context = {
"selections": [
"features",
"bugs",
"posts",
]
}
return render(request, "search.html", context)
|
from __future__ import division, absolute_import, print_function
from six.moves import range
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.linalg as la
import pyopencl as cl
import pyopencl.array # noqa
import pyopencl.clmath # noqa
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl
as pytest_generate_tests)
import pytest
import logging
logger = logging.getLogger(__name__)
def test_circle_mesh(do_plot=False):
from meshmode.mesh.io import generate_gmsh, FileSource
print("BEGIN GEN")
mesh = generate_gmsh(
FileSource("circle.step"), 2, order=2,
force_ambient_dim=2,
other_options=[
"-string", "Mesh.CharacteristicLengthMax = 0.05;"]
)
print("END GEN")
print(mesh.nelements)
from meshmode.mesh.processing import affine_map
mesh = affine_map(mesh, A=3*np.eye(2))
if do_plot:
from meshmode.mesh.visualization import draw_2d_mesh
draw_2d_mesh(mesh, fill=None, draw_connectivity=True,
set_bounding_box=True)
import matplotlib.pyplot as pt
pt.show()
def test_boundary_interpolation(ctx_getter):
cl_ctx = ctx_getter()
queue = cl.CommandQueue(cl_ctx)
from meshmode.mesh.io import generate_gmsh, FileSource
from meshmode.discretization import Discretization
from meshmode.discretization.poly_element import \
InterpolatoryQuadratureSimplexGroupFactory
from meshmode.discretization.connection import make_boundary_restriction
from pytools.convergence import EOCRecorder
eoc_rec = EOCRecorder()
order = 4
for h in [1e-1, 3e-2, 1e-2]:
print("BEGIN GEN")
mesh = generate_gmsh(
FileSource("blob-2d.step"), 2, order=order,
force_ambient_dim=2,
other_options=[
"-string", "Mesh.CharacteristicLengthMax = %s;" % h]
)
print("END GEN")
vol_discr = Discretization(cl_ctx, mesh,
InterpolatoryQuadratureSimplexGroupFactory(order))
print("h=%s -> %d elements" % (
h, sum(mgrp.nelements for mgrp in mesh.groups)))
x = vol_discr.nodes()[0].with_queue(queue)
f = 0.1*cl.clmath.sin(30*x)
bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction(
queue, vol_discr, InterpolatoryQuadratureSimplexGroupFactory(order))
bdry_x = bdry_discr.nodes()[0].with_queue(queue)
bdry_f = 0.1*cl.clmath.sin(30*bdry_x)
bdry_f_2 = bdry_connection(queue, f)
err = la.norm((bdry_f-bdry_f_2).get(), np.inf)
eoc_rec.add_data_point(h, err)
print(eoc_rec)
assert eoc_rec.order_estimate() >= order-0.5
def test_element_orientation():
from meshmode.mesh.io import generate_gmsh, FileSource
mesh_order = 3
mesh = generate_gmsh(
FileSource("blob-2d.step"), 2, order=mesh_order,
force_ambient_dim=2,
other_options=["-string", "Mesh.CharacteristicLengthMax = 0.02;"]
)
from meshmode.mesh.processing import (perform_flips,
find_volume_mesh_element_orientations)
mesh_orient = find_volume_mesh_element_orientations(mesh)
assert (mesh_orient > 0).all()
from random import randrange
flippy = np.zeros(mesh.nelements, np.int8)
for i in range(int(0.3*mesh.nelements)):
flippy[randrange(0, mesh.nelements)] = 1
mesh = perform_flips(mesh, flippy, skip_tests=True)
mesh_orient = find_volume_mesh_element_orientations(mesh)
assert ((mesh_orient < 0) == (flippy > 0)).all()
def test_merge_and_map(ctx_getter, visualize=False):
from meshmode.mesh.io import generate_gmsh, FileSource
mesh_order = 3
mesh = generate_gmsh(
FileSource("blob-2d.step"), 2, order=mesh_order,
force_ambient_dim=2,
other_options=["-string", "Mesh.CharacteristicLengthMax = 0.02;"]
)
from meshmode.mesh.processing import merge_dijsoint_meshes, affine_map
mesh2 = affine_map(mesh, A=np.eye(2), b=np.array([5, 0]))
mesh3 = merge_dijsoint_meshes((mesh2, mesh))
if visualize:
from meshmode.discretization import Discretization
from meshmode.discretization.poly_element import \
PolynomialWarpAndBlendGroupFactory
cl_ctx = ctx_getter()
queue = cl.CommandQueue(cl_ctx)
discr = Discretization(cl_ctx, mesh3,
PolynomialWarpAndBlendGroupFactory(3))
from meshmode.discretization.visualization import make_visualizer
vis = make_visualizer(queue, discr, 1)
vis.write_vtk_file("merged.vtu", [])
@pytest.mark.parametrize("dim", [2, 3])
@pytest.mark.parametrize("order", [1, 3])
def test_sanity_single_element(ctx_getter, dim, order, visualize=False):
pytest.importorskip("pytential")
cl_ctx = ctx_getter()
queue = cl.CommandQueue(cl_ctx)
from modepy.tools import UNIT_VERTICES
vertices = UNIT_VERTICES[dim].T.copy()
center = np.empty(dim, np.float64)
center.fill(-0.5)
import modepy as mp
from meshmode.mesh import SimplexElementGroup, Mesh
mg = SimplexElementGroup(
order=order,
vertex_indices=np.arange(dim+1, dtype=np.int32).reshape(1, -1),
nodes=mp.warp_and_blend_nodes(dim, order).reshape(dim, 1, -1),
dim=dim)
mesh = Mesh(vertices, [mg])
from meshmode.discretization import Discretization
from meshmode.discretization.poly_element import \
PolynomialWarpAndBlendGroupFactory
vol_discr = Discretization(cl_ctx, mesh,
PolynomialWarpAndBlendGroupFactory(order+3))
# {{{ volume calculation check
vol_x = vol_discr.nodes().with_queue(queue)
vol_one = vol_x[0].copy()
vol_one.fill(1)
from pytential import norm, integral # noqa
from pytools import factorial
true_vol = 1/factorial(dim) * 2**dim
comp_vol = integral(vol_discr, queue, vol_one)
rel_vol_err = abs(true_vol - comp_vol) / true_vol
assert rel_vol_err < 1e-12
# }}}
# {{{ boundary discretization
from meshmode.discretization.connection import make_boundary_restriction
bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction(
queue, vol_discr, PolynomialWarpAndBlendGroupFactory(order + 3))
# }}}
# {{{ visualizers
from meshmode.discretization.visualization import make_visualizer
#vol_vis = make_visualizer(queue, vol_discr, 4)
bdry_vis = make_visualizer(queue, bdry_discr, 4)
# }}}
from pytential import bind, sym
bdry_normals = bind(bdry_discr, sym.normal())(queue).as_vector(dtype=object)
if visualize:
bdry_vis.write_vtk_file("boundary.vtu", [
("bdry_normals", bdry_normals)
])
from pytential import bind, sym
normal_outward_check = bind(bdry_discr,
sym.normal()
|
(sym.Nodes() + 0.5*sym.ones_vec(dim)),
)(queue).as_scalar() > 0
assert normal_outward_check.get().all(), normal_outward_check.get()
# python test_meshmode.py 'test_sanity_balls(cl._csc, "disk-radius-1.step", 2, 2, visualize=True)' # noqa
@pytest.mark.parametrize(("src_file", "dim"), [
("disk-radius-1.step", 2),
("ball-radius-1.step", 3),
])
@pytest.mark.parametrize("mesh_order", [1, 2])
def test_sanity_balls(ctx_getter, src_file, dim, mesh_order,
visualize=False):
pytest.importorskip("pytential")
logging.basicConfig(level=logging.INFO)
ctx = ctx_getter()
queue = cl.CommandQueue(ctx)
from pytools.convergence import EOCRecorder
vol_eoc_rec = EOCRecorder()
surf_eoc_rec = EOCRecorder()
# overkill
quad_order = mesh_order
from pytential import bind, sym
for h in [0.2, 0.14, 0.1]:
from meshmode.mesh.io import generate_gmsh, FileSource
mesh = generate_gmsh(
FileSource(src_file), dim, order=mesh_order,
other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h],
force_ambient_dim=dim)
logger.info("%d elements" % mesh.nelements)
# {{{ discretizations and connections
from meshmode.discretization import Discretization
from meshmode.discretization.poly_element import \
InterpolatoryQuadratureSimplexGroupFactory
vol_discr = Discretization(ctx, mesh,
InterpolatoryQuadratureSimplexGroupFactory(quad_order))
from meshmode.discretization.connection import make_boundary_restriction
bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction(
queue, vol_discr,
InterpolatoryQuadratureSimplexGroupFactory(quad_order))
# }}}
# {{{ visualizers
from meshmode.discretization.visualization import make_visualizer
vol_vis = make_visualizer(queue, vol_discr, 20)
bdry_vis = make_visualizer(queue, bdry_discr, 20)
# }}}
from math import gamma
true_surf = 2*np.pi**(dim/2)/gamma(dim/2)
true_vol = true_surf/dim
vol_x = vol_discr.nodes().with_queue(queue)
vol_one = vol_x[0].copy()
vol_one.fill(1)
from pytential import norm, integral # noqa
comp_vol = integral(vol_discr, queue, vol_one)
rel_vol_err = abs(true_vol - comp_vol) / true_vol
vol_eoc_rec.add_data_point(h, rel_vol_err)
print("VOL", true_vol, comp_vol)
bdry_x = bdry_discr.nodes().with_queue(queue)
bdry_one_exact = bdry_x[0].copy()
bdry_one_exact.fill(1)
bdry_one = bdry_connection(queue, vol_one).with_queue(queue)
intp_err = norm(bdry_discr, queue, bdry_one-bdry_one_exact)
assert intp_err < 1e-14
comp_surf = integral(bdry_discr, queue, bdry_one)
rel_surf_err = abs(true_surf - comp_surf) / true_surf
surf_eoc_rec.add_data_point(h, rel_surf_err)
print("SURF", true_surf, comp_surf)
if visualize:
vol_vis.write_vtk_file("volume-h=%g.vtu" % h, [
("f", vol_one),
("area_el", bind(vol_discr, sym.area_element())(queue)),
])
bdry_vis.write_vtk_file("boundary-h=%g.vtu" % h, [("f", bdry_one)])
# {{{ check normals point outward
normal_outward_check = bind(bdry_discr,
sym.normal() | sym.Nodes(),
)(queue).as_scalar() > 0
assert normal_outward_check.get().all(), normal_outward_check.get()
# }}}
print("---------------------------------")
print("VOLUME")
print("---------------------------------")
print(vol_eoc_rec)
assert vol_eoc_rec.order_estimate() >= mesh_order
print("---------------------------------")
print("SURFACE")
print("---------------------------------")
print(surf_eoc_rec)
assert surf_eoc_rec.order_estimate() >= mesh_order
def test_rect_mesh(do_plot=False):
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh()
if do_plot:
from meshmode.mesh.visualization import draw_2d_mesh
draw_2d_mesh(mesh, fill=None, draw_connectivity=True)
import matplotlib.pyplot as pt
pt.show()
def test_box_mesh():
from meshmode.mesh.generation import generate_box_mesh
generate_box_mesh(3*(np.linspace(0, 1, 5),))
def test_as_python():
from meshmode.mesh.generation import make_curve_mesh, cloverleaf
mesh = make_curve_mesh(cloverleaf, np.linspace(0, 1, 100), order=3)
mesh.element_connectivity
from meshmode.mesh import as_python
code = as_python(mesh)
print(code)
exec_dict = {}
exec(compile(code, "gen_code.py", "exec"), exec_dict)
mesh_2 = exec_dict["make_mesh"]()
assert mesh == mesh_2
def test_lookup_tree(do_plot=False):
from meshmode.mesh.generation import make_curve_mesh, cloverleaf
mesh = make_curve_mesh(cloverleaf, np.linspace(0, 1, 1000), order=3)
from meshmode.mesh.tools import make_element_lookup_tree
tree = make_element_lookup_tree(mesh)
from meshmode.mesh.processing import find_bounding_box
bbox_min, bbox_max = find_bounding_box(mesh)
extent = bbox_max-bbox_min
for i in range(20):
pt = bbox_min + np.random.rand(2) * extent
print(pt)
for igrp, iel in tree.generate_matches(pt):
print(igrp, iel)
if do_plot:
with open("tree.dat", "w") as outf:
tree.visualize(outf)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from py.test.cmdline import main
main([__file__])
# vim: fdm=marker
|
import numpy
import json
import cv2
import numpy as np
import os
import scipy.misc as misc
def show(Im):
cv2.imshow("show",Im.astype(np.uint8))
cv2.waitKey()
cv2.destroyAllWindows()
###############################################################################################
def FindIntersection(InDir,MatDir, VesselDir):
pp=0
for DirName in os.listdir(InDir):
pp+=1
print(pp)
DirName=InDir+"/"+DirName
MSgDir = DirName + "/" + MatDir + "//"
VSgDir = DirName + "/" + VesselDir + "//"
if not os.path.isdir(MSgDir):
print(MSgDir)
continue
# listfile=[]
# for fl in os.listdir(MSgDir):
# if ".png" in fl:
# listfile.append(fl)
# l=len(listfile)
k=0
Im = cv2.imread(DirName+"/Image.png")
#for i in range(l):
for mfile in os.listdir(MSgDir):
NVessels = 0
path1=MSgDir+"/"+mfile
if not os.path.exists(path1):continue
msg = cv2.imread(path1,0)
if msg.sum()==0:
os.remove(path1)
print(path1+"File Removed!")
continue
# CatName=listfile[i][listfile[i].find("Class__")+7:listfile[i].find("__ClasID__")]
# CatID=listfile[i][listfile[i].find("ClasID__")+8:listfile[i].find(".png")]
emsg=np.expand_dims(msg,axis=2)
for vfile in os.listdir(VSgDir):
path2 = VSgDir + "/" + vfile
if not os.path.exists(path2): continue
vsg = cv2.imread(path2, 0)
inter=((vsg*msg)>0)#.astype(np.uint8)
print(path1)
print(path2)
if (inter).sum()/((msg>0).sum())<0.8:
if (inter).sum()/((msg>0).sum())>0.01:
#..........................................
Txt=" i(in vessel) f(front of vessel) a(after vessel)"
Im1=Im.copy()
Im1[:,:,0] *= 1-vsg
Im1[:, :, 2] *= 1 - msg
cv2.imshow(Txt+"2", cv2.resize(Im1,(500,500)))
cv2.imshow(Txt, cv2.resize(np.concatenate([vsg, msg], axis=1) * 250,(1000,500)))
while (True):
ch = chr(cv2.waitKey())
if ch=='i' or ch=='f' or ch=='a': break
cv2.destroyAllWindows()
if ch=='i':
emsg = np.concatenate([emsg, np.expand_dims(vsg, axis=2)], axis=2)
NVessels+=1
if ch=='a':
msg[inter > 0]=5
emsg[:,:,0]=msg
else:
emsg = np.concatenate([emsg, np.expand_dims(vsg,axis=2)],axis=2)
NVessels += 1
if NVessels>2:
print("error")
print(path1)
print(path2)
show(Im)
show(msg*50)
exit(0)
if emsg.shape[2]==2:
emsg = np.concatenate([emsg, np.expand_dims(vsg*0,axis=2)],axis=2)
cv2.imwrite(path1, emsg)
###############################################################################################################
# sg = cv2.imread(path1)
# Im = cv2.imread(DirName + "/Image.png")
# cv2.imshow("results", sg*50)
# Im[:,:,0]*=1-(sg[:,:,0]>0).astype(np.uint8)
# Im[:, :, 1] *= 1 - (sg[:, :, 1] > 0).astype(np.uint8)
# cv2.imshow("im",Im)
#
#
# for i in range(sg.shape[2]):
# print("------------------------------------------------------------------------")
# print(str(i))
# print(np.unique(sg[:,:,i]))
# cv2.imshow(str(i) +" ", sg[:,:,i] * 35)
#
# cv2.waitKey()
# cv2.destroyAllWindows()
###########################################################################################################################
os.rename(MSgDir,MSgDir.replace(MatDir,MatDir+"V"))
InDir=r"C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\Temp\\"##C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\Instance\\"
MatDir=r"PartsVi"
VesselDir=r"VesselV"
# FindIntersection(InDir, SubDir)
FindIntersection(InDir,MatDir, VesselDir) |
"""
Error tests
The tests in this file should test errors occurring in log files. These
should be genuine error messages from LaTeX log files, possibly including
BLANK lines in the lines iterable.
"""
import pytest
import texoutparse
@pytest.fixture
def parser():
return texoutparse.LatexLogParser()
def test_package_not_found_error(parser):
lines = [
"! LaTeX Error: File `foobar.sty' not found.",
" BLANK",
" BLANK",
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err['message'] == "File `foobar.sty' not found."
assert err.context_lines == lines
def test_undefined_control_seq_tex_error(parser):
lines = [
"! Undefined control sequence.",
"l.6 \\dtae",
"{December 2004}"
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err.context_lines == lines
assert err['message'] == "Undefined control sequence."
def test_too_many_braces_tex_error(parser):
lines = [
"! Too many }'s.",
"l.6 \\date December 2004}"
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err.context_lines == lines
assert err['message'] == "Too many }'s."
def test_missing_math_mode_tex_error(parser):
lines = [
"! Missing $ inserted",
" BLANK",
" BLANK"
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err.context_lines == lines
assert err['message'] == "Missing $ inserted"
def test_package_error(parser):
lines = [
"! Package babel Error: Unknown option `latin'. Either you misspelled it",
"(babel) or the language definition file latin.ldf was not found.",
" BLANK"
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err.context_lines == lines
assert err['type'] == 'Package'
assert err['package'] == 'babel'
assert err['message'] == "Unknown option `latin'. Either you misspelled it"
def test_pdftex_error(parser):
lines = [
"! pdfTeX error (\\pdfsetmatrix): Unrecognized format..",
" BLANK",
" BLANK"
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err.context_lines == lines
assert err['type'] == 'pdfTeX'
assert err['extra'] == '\\pdfsetmatrix'
assert err['message'] == "Unrecognized format.."
def test_class_error(parser):
lines = [
"! Class article Error: Unrecognized argument for \\macro.",
" BLANK",
" BLANK"
]
parser.process(lines)
assert len(parser.errors) == 1
assert len(parser.warnings) == 0
assert len(parser.badboxes) == 0
err = parser.errors[0]
assert err.context_lines == lines
assert err['type'] == 'Class'
assert err['class'] == 'article'
assert err['message'] == "Unrecognized argument for \\macro."
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" a command line interface to the python tool for leap year check
"""
import click # pylint: disable=import-error
from is_leap import is_leap
from logger import LOGGER
@click.group()
def cli() -> None:
""" part of cli implementation via click"""
pass
@cli.command("is_leap")
@click.option("--year", help="the year to check", nargs=1, required=True, type=(int))
def is_leap_cli(year: int) -> None:
"""function to call the is_leap module
"""
LOGGER.info("is_leap_cli({}) is {}".format(year, is_leap(year)))
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python
from TTAtom import Atom
import csv
def get_xyz(filename):
"""Given an Tinker XYZ coordinate file, this function will extract all of
the information and place it all within an Atom object.
"""
atoms = []
count = 0
for line in csv.reader(open(filename), delimiter=" ",
skipinitialspace=True):
if count > 0:
atoms.append(Atom(
count,
float(line[2]),
float(line[3]),
float(line[4]),
[line[1],int(line[5])],
[a-1 for a in map(int,line[6:])]
)
)
count += 1
return atoms
def write_xyz(atoms,filename):
"""Will write a Tinker XYZ coordinate file named "filename" based on the
information stored within each Atom class within the atoms array
"""
outfile = open(filename,"w")
outfile.write("%6s" % len(atoms)+"\n")
for atom in atoms:
writeLine = "%6s" % str(atom.n)
writeLine = writeLine + "%3s" % atom.type[0]
writeLine = writeLine + "%14s" % str("{:.6f}".format(atom.x))
writeLine = writeLine + "%12s" % str("{:.6f}".format(atom.y))
writeLine = writeLine + "%12s" % str("{:.6f}".format(atom.z))
writeLine = writeLine + "%6s" % str(atom.type[1])
for connectedAtomIndex in atom.connectivity:
writeLine = writeLine + "%6s" % str(connectedAtomIndex+1)
outfile.write(writeLine+"\n")
outfile.close()
def make_filename(torComb):
for i in torComb.combination:
if i < 0:
lead = "n"
else:
lead = ""
frag = lead + str("%0.0f" % abs(i))
torComb.filename = torComb.filename + "_" + frag
torComb.filename = torComb.filename + ".xyz" |
pk=list(map(str,input()))
v=t=0
for i in range(0,len(pk)-1):
q=pk[i]
if int(q)!=0:
for j in range(i+1,i+2):
q=q+pk[j]
if int(q)<27 and int(q)>0: v=v+1
elif int(q)==0: v=v-1
else: break
if v!=1: t=v%2
print(v+t+1)
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index2(request):
var1 = {'helpvar':'Help Page fromhelp.html'}
return render(request,'second_app/help.html',context=var1) |
import dash_table
import data
table = dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in data.data.columns],
data = data.data.head().to_dict('records'),
page_size=10,
sort_action='native',
filter_action='native'
)
tab_table_children = [table]
|
import itertools
def create_new_lists(base_list):
result_list = []
for i in range(0, len(base_list)):
for j in range(i, len(base_list)):
if base_list[i] + base_list[j] > base_list[-1]:
aux = [x for x in base_list]
aux.append(base_list[i] + base_list[j])
result_list.append(aux)
return result_list
def find_chain(length):
base_list = [[1, 2]]
result_list = []
size = 1
while size < length:
for i in base_list:
aux = create_new_lists(i)
for j in aux:
result_list.append(j)
base_list = [x for x in result_list]
size += 1
result_list.sort()
return list(result_list for result_list, _ in itertools.groupby(result_list))
def check_if_solution(list_to_look):
count = 0
for i in list_to_look:
if i[-1] == 81:
count += 1
print(i)
if count == 0:
print(False)
else:
print("Number of solutions: " + str(count))
# print(find_chain(7))
check_if_solution(find_chain(8))
|
"""
For every binding event, it reports the minimum analyte-gold inter-COM distance and the binding residence time.
"""
XTC = "NP22sp-53_PRO1-10_FIX.xtc"
TPR = "NP22sp-53_PRO1.tpr"
NAME = XTC[:-8]
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from MDAnalysis import *
U = Universe(TPR, XTC)
print(len(U.trajectory))
DT = U.trajectory[0].dt
sel = {
"all_gold" : U.select_atoms("name AU AUS AUL"),
"mono_H" : U.select_atoms("resname L22 and name H* and not name H18"),
"SER_H" : U.select_atoms("resname SER and name H* and not name H5 H6 H7 H9 H13"),
"PHE_H" : U.select_atoms("resname PHE and name H* and not name H9 H10 H11"),
"SOL" : U.select_atoms("resname SOL")
}
props_bind_time = {
'anchor' : 'all_gold', #the script reports the minimum distance respect to this group
'ref' : "mono_H",
'targets' : ["SER_H", "PHE_H"],
'solvent' : "SOL",
'start_ps' : 0,
'stop_ps' : 1000000,
'd_max' : 4, #A, threshold distance for magnetization transfer
}
flatten_list = lambda l: [item for sublist in l for item in sublist]
def flatten_group_list(l):
res = []
for sublist in l:
group = sublist[0]
for i, item in enumerate(sublist[1:]):
group = group.union(item)
res.append(group)
return res
class BindingEvent:
def __init__(self, frameini, framefin, Residue, target, props):
g_sol = sel[props['solvent']]
g_anchor = sel[props['anchor']]
g_res_hs = Residue.atoms.intersection(sel[target])
self.a = frameini
self.b = framefin
self.resid = Residue.resid+1
self.duration = (self.b - self.a)*DT
meddist, self.waters = [], AtomGroup([], U)
for ts in U.trajectory[self.a:self.b]:
meddist.append(np.linalg.norm(Residue.atoms.center_of_mass() - g_anchor.center_of_mass()))
g_wat_now = U.select_atoms("(around {} group AN) and group SOL".format(props['d_max']), AN=g_res_hs, SOL=g_sol, updating=True)
self.waters = self.waters.union(g_wat_now)
self.meddist = np.median(meddist)
self.v_wat = self.waters.n_residues/self.duration
def bind_time(props):
g_anchor = sel[props['anchor']]
g_ref = sel[props['ref']]
all_events = {}
for target in props['targets']:
res_target = sel[target].residues
g_residues = [res.atoms for res in sel[target].residues]
res_Hs = [res.intersection(sel[target]) for res in g_residues]
bound = np.zeros((len(U.trajectory)+2, len(res_Hs)), dtype='int') #+2 to start and finish with False
for t, ts in enumerate(U.trajectory,1):
if ts.time >= props['stop_ps']:
break
elif ts.time >= props['start_ps']:
print(ts.time, end='\r')
dists = [cdist(g_ref.positions, res.positions) for res in res_Hs]
close = np.array([np.any(dist<= props['d_max']) for dist in dists])
bound[t,close] = 1
gates = bound[1:,:] - bound[:-1,:]
on_switch, off_switch = [list(np.where(gate==1)[0]) for gate in gates.T], [list(np.where(gate==-1)[0]) for gate in gates.T]
events = []
n_events = len(flatten_list(on_switch))
ev = 1
print('')
for r, res in enumerate(res_target):
for on, off in zip(on_switch[r], off_switch[r]):
print("{:d}/{:d}".format(ev, n_events), end="\r")
event = BindingEvent(on, off, res, target=target, props=props)
events.append(event)
ev += 1
all_events[target] = events
return all_events
def write_bind_time(props, all_events):
f = open(NAME + "_btimes.sfu", 'w')
values = []
f.write("#Binding residence time (ps) from (nonpolar) H-H contacts\n")
for key, val in props.items():
f.write("#{:<10} {:<20}\n".format(key, str(val)))
f.write("#*** means that the binding event reached the end of the simulation\n")
f.write("#Binding residence time (ps) \t Median inter-COM distance between anchor and target during the binding event (nm) \t Number of unique waters around target per unit time (ps-1) \t VMD resid \n")
for target in props['targets']:
f.write("#TARGET GROUP: {}\n".format(target))
events = all_events[target]
for event in events:
f.write("{:<10.1f} {:>10.3f} {:>10.3f} {:>7d}".format(event.duration, event.meddist, event.v_wat, event.resid))
if event.b*DT == props['stop_ps']:
f.write(" ***")
f.write("\n")
f.close()
all_events = bind_time(props_bind_time)
write_bind_time(props_bind_time, all_events)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 11:32:40 2018
@author: mihan
"""
import json
#função para testar se uma strig é numero:
def isnumber(valor):
try:
float(valor)
except ValueError:
return False
return True
#abrindo arquivo JSON
with open ('estoque.json','r') as entrada:
a = json.loads(entrada.read())
#Estoque recebe JSON:
estoque=a
#pede nome da loja:
loja=input("digite o nome loja: ")
#se loja for chave em estoque, modifica, se não, cria:
if loja in estoque:
print ("modificando a loja {0}".format(loja))
elif loja not in estoque:
estoque[loja]={}
print ("criando loja {0}".format(loja))
#definindo escolha do menu
escolha=1
#menuzinho:
print("0-Sair")
print("1-Adicionar produto")
print("2-Remover produto")
print("3-Modificar produto")
print("4-Mostrar estoque completo")
print("5-Produtos que estão em falta")
print("6-Valor monetário total do estoque")
print("7-ver menu de opções")
#enquanto não escolherem 0, o programa fica repetindo
while escolha!="0":
escolha=input("faça sua escolha: ")
#opção de menu 1
if escolha=="1":
#pede o nome do novo produto:
print ("para cancelar digite: 0")
produto=input("nome do produto: ")
#teste se o produto é válido(está no estoque de loja):
while produto in estoque[loja] and produto!="0":
print ("este produto já existe")
produto=input("nome do produto: ")
#teste se a quantidade inicial é válida (numeral e positivo):
if produto !="0":
quantidade_inicial=input("quantidade de {0}: ".format(produto))
while not quantidade_inicial.isdigit():
print ("digite apenas numeros!")
quantidade_inicial=input("quantidade: ")
quantidade_inicial=int(quantidade_inicial)
while quantidade_inicial<0:
print ('digite um valor maior que 0')
quantidade_inicial=input("quantidade de {0}: ".format(produto))
#pede preço do produto:
preco=input("digite o preço unitário de {0}: ".format(produto))
#testa se prço inicial do produto é válido (numero positivo):
while not isnumber(preco):
print ("digite apenas numeros!")
preco=input("preço de {0}: ".format(produto))
preco=float(preco)
while preco<0:
print ('digite um valor maior que 0')
preco=input("preço de {0}: ".format(produto))
#cria dicionario caracteristica como valor de produto, com preço e quantidade como chaves:
caracteristica={"quantidade":quantidade_inicial,'valor unitario':preco}
estoque[loja][produto]=caracteristica
#printa informações do novo produto:
print ("{0} {1}s foram adicionadas, custando R${2} reais cada".format(quantidade_inicial, produto, "%.2f"%preco))
#cancelamento
else:
print("operação cancelada")
#salva modificações em JSON:
estoque_json = json.dumps(estoque, sort_keys=True, indent=4)
with open ('estoque.json','w') as saida:
saida.write(estoque_json)
#opção de menu 2
elif escolha == "2":
#pede um produto para remover:
print ("para cancelar digite: 0")
remover= input("Digite o nome do produto que deseja remover: ")
#testa se produto a ser removido é válido:
while remover not in estoque[loja] and remover!="0":
print ("Produto não encontrado")
remover = input ("Digite um produto válido: ")
#caso não seja cancelado deleta produto:
if remover !="0":
if remover in estoque[loja]:
del estoque [loja][remover]
print ("{0} foi removido".format(remover))
#cancelamento:
else:
print ("operação cancelada")
#salva em JSON:
estoque_json = json.dumps(estoque, sort_keys=True, indent=4)
with open ('estoque.json','w') as saida:
saida.write(estoque_json)
#opção de menu 3
elif escolha == "3":
#pede produto a ser modificado:
print ("para cancelar digite: 0")
produto=input('digite o nome do produto: ')
#testa se produto é válido (está no estoque da loja):
while produto not in estoque[loja] and produto!="0":
print ('elemento não encontrado')
produto= input ("digite o nome do produto: ")
if produto !="0":
#pede opção de mudança (preço ou quantidade):
opcao_mudanca= input("Para cancelar digite 0; Para mudar o preço digite 1; Para mudar a quantidade digite 2: ")
#testa se opção é válida:
while opcao_mudanca!="0" and opcao_mudanca!="1" and opcao_mudanca!="2":
print("comando inválido")
opcao_mudanca= input("Para mudar o preço digite 1; Para mudar a quantidade digite 2: ")
#opçao preço:
if opcao_mudanca=="1":
#pede mudança de preço:
alteracao_preco= input('novo preço unitário de {0}: '.format(produto))
#testa se prço inicial do produto é válido (numero positivo):
while not isnumber(alteracao_preco):
print ("digite apenas numeros!")
alteracao_preco=input("Novo preço unitário de {0}: ".format(produto))
alteracao_preco=float(alteracao_preco)
while alteracao_preco<0:
print ('digite um valor maior que 0')
alteracao_preco=input("preço de {0}: ".format(produto))
#preço do produto de uma loja do estoque muda para o novo preço:
estoque[loja][produto]['valor unitario'] = alteracao_preco
#opçao quantidade:
elif opcao_mudanca=="2":
#pede um valor adicional:
valor_adicional =input('quantidade adicional de {0}: '.format(produto))
#testa se valor adicional é numero:
while not isnumber(valor_adicional):
print ("digite apenas numeros!")
valor_adicional=input("quantidade adicional de {0}: ".format(produto))
valor_adicional=int(valor_adicional)
#quantidade de um produto de uma loja é alterado:
estoque[loja][produto]['quantidade'] += valor_adicional
#cancelamento:
else:
print ("operaçao cancelada")
#printa o novo estoque:
print ('novo estoque de {0} é {1}, e seu novo preço é de R${2}'.format(produto,estoque[loja][produto]['quantidade'],"%.2f"%estoque[loja][produto]['valor unitario']))
#cancelamento
else:
print("operação cancelada")
estoque_json = json.dumps(estoque, sort_keys=True, indent=4)
with open ('estoque.json','w') as saida:
saida.write(estoque_json)
#opção de menu 4
elif escolha == "4":
#printa o nome da loja:
print("Estoque da loja {0}:".format(loja))
#vai printando todos os produtos da loja e seus preços e quantidades:
for chave, valor in estoque[loja].items():
print ("{0} : {1} , R${2}".format(chave, valor["quantidade"], "%.2f"%valor['valor unitario']))
#opção de menu 5
elif escolha == "5":
#print inicial:
print("Produtos que estão em falta no estoque:")
#vai printando itens em falta (quantidade<0):
for chave,valor in estoque[loja].items():
if valor["quantidade"] < 0 :
print (chave)
#opção de menu 6
elif escolha == "6":
#cria listinha:
listinha=[]
#print inicial:
print("valor monetário total do estoque:")
#adiciona valores na listinha:
for valor in estoque[loja].values():
if valor["quantidade"] <= 0:
listinha.append(0)
elif valor["quantidade"] > 0:
v = valor["quantidade"] * valor["valor unitario"]
listinha.append(v)
#print soma de listinha:
print('R${0}'.format("%.2f"%sum(listinha)))
#opção de menu 7:
elif escolha == "7":
#printa o menuzinho:
print("0-Sair")
print("1-Adicionar produto")
print("2-Remover produto")
print("3-Modificar produto")
print("4-Mostrar estoque completo")
print("5-Produtos que estão em falta")
print("6-Valor monetário total do estoque")
print("7-ver menu de opções")
#comando inválido:
elif escolha != "0":
print ("Comando inválido")
#saudaçoes:
print ("Até a próxima, amigo!") |
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
import tensorflow as tf
from cleverhans.devtools.checks import CleverHansTest
from runner import RunnerMultiGPU
class TestRunnerMultiGPU(CleverHansTest):
def setUp(self):
super(TestRunnerMultiGPU, self).setUp()
self.sess = tf.Session()
inputs = []
outputs = []
self.niter = 10
niter = self.niter
# A Simple graph with `niter` sub-graphs.
with tf.variable_scope(None, 'runner'):
for i in range(niter):
v = tf.get_variable('v%d' % i, shape=(100, 10))
w = tf.get_variable('w%d' % i, shape=(100, 1))
inputs += [{'v': v, 'w': w}]
outputs += [{'v': v, 'w': w}]
self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess)
def help_test_runner(self, ninputs, niter):
"""
Tests the MultiGPU runner by feeding in random Tensors for `ninputs`
steps. Then validating the output after `niter-1` steps.
"""
v_val = []
w_val = []
for i in range(ninputs):
v_val += [np.random.rand(100, 10)]
w_val += [np.random.rand(100, 1)]
fvals = self.runner.run({'v': v_val[i], 'w': w_val[i]})
self.assertTrue(len(fvals) == 0)
self.assertFalse(self.runner.is_finished())
for i in range(niter-ninputs-1):
self.assertFalse(self.runner.is_finished())
fvals = self.runner.run()
self.assertTrue(len(fvals) == 0)
self.assertFalse(self.runner.is_finished())
for i in range(ninputs):
self.assertFalse(self.runner.is_finished())
fvals = self.runner.run()
self.assertTrue('v' in fvals and 'w' in fvals)
self.assertTrue(np.allclose(fvals['v'], v_val[i]))
self.assertTrue(np.allclose(fvals['w'], w_val[i]))
self.assertTrue(self.runner.is_finished())
def test_queue_full(self):
self.help_test_runner(self.niter-1, self.niter)
def test_queue_half(self):
self.help_test_runner(self.niter//2, self.niter)
if __name__ == '__main__':
unittest.main()
|
import logging
from tqdm import tqdm
from src.commons.pytorch.evaluation.RecognizeCommands import RecognizeCommands
logger = logging.getLogger(__name__)
class StreamingAccuracyStats(object):
def __init__(self):
self.how_many_ground_truth_words = 0
self.how_many_ground_truth_matched = 0
self.how_many_false_positives = 0
self.how_many_correct_words = 0
self.how_many_wrong_words = 0
def get_percentages(self):
return dict(
any_match_percentage=(self.how_many_ground_truth_matched * 100) / self.how_many_ground_truth_words,
correct_match_percentage=(self.how_many_correct_words * 100) / self.how_many_ground_truth_words,
wrong_match_percentage=(self.how_many_wrong_words * 100) / self.how_many_ground_truth_words,
false_positive_percentage=(self.how_many_false_positives * 100) / self.how_many_ground_truth_words
)
def get_accuracy_stats(ground_truth, found_words, time_tolerence_ms, up_to_time_ms=-1):
if up_to_time_ms == -1:
latest_possible_time = float('inf')
else:
latest_possible_time = up_to_time_ms + time_tolerence_ms
stats = StreamingAccuracyStats()
for _, truth_time in ground_truth:
if truth_time > latest_possible_time:
break
stats.how_many_ground_truth_words += 1
has_ground_truth_been_matched = []
for found_word, found_time in tqdm(found_words, desc='Compute stats'):
earliest_time = found_time - time_tolerence_ms
latest_time = found_time + time_tolerence_ms
has_match_been_found = False
for truth_word, truth_time in ground_truth:
if truth_time > latest_time or truth_time > latest_possible_time:
break
if truth_time < earliest_time:
continue
if truth_word == found_word and truth_time not in has_ground_truth_been_matched:
stats.how_many_correct_words += 1
else:
stats.how_many_wrong_words += 1
has_ground_truth_been_matched.append(truth_time)
has_match_been_found = True
break
if not has_match_been_found:
stats.how_many_false_positives += 1
stats.how_many_ground_truth_matched = len(has_ground_truth_been_matched)
return stats
def print_accuracy_stats(stats):
assert stats.how_many_ground_truth_words > 0
msg = '{any_match_percentage}% matched, ' \
'{correct_match_percentage}% correctly, ' \
'{wrong_match_percentage}% wrongly, ' \
'{false_positive_percentage}% false positives'
logger.info(msg.format(**stats.get_percentages()))
class StreamingAccuracy(object):
def __init__(self, labels, clip_duration_ms=1000, clip_stride_ms=30, average_window_ms=500, time_tolerance_ms=750,
suppression_ms=1500, minimum_count=3, detection_threshold=0.7):
self.labels = labels
self.clip_duration_ms = clip_duration_ms
self.clip_stride_ms = clip_stride_ms
self.time_tolerance_ms = time_tolerance_ms
# self.average_window_ms = average_window_ms
# self.suppression_ms = suppression_ms
# self.detection_threshold = detection_threshold
self.command_recognizer = RecognizeCommands(labels, average_window_ms, detection_threshold, suppression_ms,
minimum_count)
def compute_accuracy(self, predictions, ground_truth, up_to_time_ms):
"""
:param predictions:
:param ground_truth:
:return:
"""
found_words = []
for pred, time in tqdm(predictions, desc='Recognize commands'):
found_command, score, is_new_command = self.command_recognizer.process_latest_results(latest_results=pred,
current_time_ms=time)
if is_new_command and found_command != '_silence_':
found_words.append((found_command, time))
stats = get_accuracy_stats(ground_truth, found_words, self.time_tolerance_ms, up_to_time_ms)
print_accuracy_stats(stats)
return stats
|
# Generated by Django 3.1.5 on 2021-02-22 13:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0054_auto_20210222_1842'),
]
operations = [
migrations.AddField(
model_name='employee',
name='address',
field=models.CharField(max_length=50, null=True, verbose_name='Address'),
),
migrations.AddField(
model_name='employee',
name='bank_account_number',
field=models.CharField(max_length=50, null=True, verbose_name='Bank Account Number'),
),
migrations.AddField(
model_name='employee',
name='branch_addresss',
field=models.CharField(max_length=50, null=True, verbose_name='Branch Address'),
),
migrations.AddField(
model_name='employee',
name='branch_name',
field=models.CharField(max_length=50, null=True, verbose_name='Branch Name'),
),
migrations.AddField(
model_name='employee',
name='city',
field=models.CharField(max_length=50, null=True, verbose_name='City'),
),
migrations.AddField(
model_name='employee',
name='company_name',
field=models.CharField(max_length=50, null=True, verbose_name='Company Name'),
),
migrations.AddField(
model_name='employee',
name='ifsc_code',
field=models.CharField(max_length=50, null=True, verbose_name='IFSC COde'),
),
migrations.AddField(
model_name='employee',
name='permission',
field=models.CharField(max_length=50, null=True, verbose_name='Permission'),
),
migrations.AddField(
model_name='employee',
name='pincode',
field=models.CharField(max_length=50, null=True, verbose_name='Pincode'),
),
]
|
from setuptools import setup
setup(
name='ilse',
version='0.6.0',
py_modules=['ilse'],
install_requires=[
'click',
'requests',
],
entry_points='''
[console_scripts]
ilse=ilse:cli
''',
)
|
# USAGE
# python index_images_parallel.py --images ..\..\datasets\caltech101 --output temp_output --hashes hashes.pickle
from pyimagesearch.parallel_hashing import process_images, chunk
from multiprocessing import Pool, cpu_count
from imutils import paths
import numpy as np
import argparse
import pickle
import os
# check to see if this is the main thread of execution
if __name__ == '__main__':
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--images', required=True, type=str, help='Path to input directory of images')
ap.add_argument('-o', '--output', required=True, type=str, help='Path to output directory to store intermediate files')
ap.add_argument('-a', '--hashes', required=True, type=str, help='Path to output hashes directory')
ap.add_argument('-p', '--procs', type=int, default=-1, help='# of processes to spin up')
args = vars(ap.parse_args())
# determine the number of concurrent processes to launch when distributing the load across the system, then create the list of process IDs
procs = args['procs'] if args['procs'] > 0 else cpu_count()
proc_ids = list(range(procs))
# grab the paths to the input images, then determine the number of images each process will handle
print('[INFO] grabbing image paths...')
all_image_paths = list(paths.list_images(args['images']))
num_images_per_proc = int(np.ceil(len(all_image_paths) / float(procs)))
# chunk the image paths into N equal sets, one set of image paths for each individual process
chunked_paths = list(chunk(all_image_paths, num_images_per_proc))
# initialize the list of payloads
payloads = []
# loop over the set chunked image paths
for i, image_paths in enumerate(chunked_paths):
# construct the path to the output intermediary file for the current process
output_path = os.path.sep.join([args['output'], f'proc_{i}.pickle'])
# construct a dictionary of data for the payload, then add it to the payloads list
data = {
'id': i,
'input_paths': image_paths,
'output_path': output_path
}
payloads.append(data)
# construct and launch the processing pool
print(f'[INFO] launching pool using {procs} processes...')
pool = Pool(processes=procs)
pool.map(process_images, payloads)
# close the pool and wait for all processes to finish
print('[INFO] waiting for processes to finish...')
pool.close()
pool.join()
print('[INFO] multiprocessing complete')
# initialize our combined hashes dictionary
print('[INFO] combining hashes...')
hashes = {}
for path in paths.list_files(args['output'], validExts=('.pickle')):
# load the contents of the dictionary
data = pickle.loads(open(path, 'rb').read())
# loop over the hashes and image paths in the dictionary
for temp_h, temp_paths in data.items():
# grab all image paths with current hash, add in the image paths for the current pickle file, and then update our hashes dictionary
image_paths = hashes.get(temp_h, [])
image_paths.extend(temp_paths)
hashes[temp_h] = image_paths
# serialize the hashes dictionary to disk
print('[INFO] serializing hashes...')
f = open(args['hashes'], 'wb')
f.write(pickle.dumps(hashes))
f.close()
|
#-----------------------------------------
# contructeur et accesseurs
#-----------------------------------------
def Matrice(nbLignes,nbColonnes,valeurParDefaut=0):
"""
crée une matrice de nbLignes lignes sur nbColonnes colonnes en mettant
valeurParDefaut dans chacune des cases
paramètres:
nbLignes un entier strictement positif qui indique le nombre de lignes
nbColonnes un entier strictement positif qui indique le nombre de colonnes
valeurParDefaut la valeur par défaut
résultat la matrice ayant les bonnes propriétés
"""
listevaleur=[]
for i in range(nbLignes):
listevaleur.append([valeurParDefaut]*nbColonnes)
matrice={}
matrice["nbLignes"]=nbLignes
matrice["nbColonnes"]=nbColonnes
matrice["valeurs"]=listevaleur
return matrice
def getNbLignes(matrice):
"""
retourne le nombre de lignes de la matrice
paramètre: matrice la matrice considérée
"""
return matrice["nbLignes"]
def getNbColonnes(matrice):
"""
retourne le nombre de colonnes de la matrice
paramètre: matrice la matrice considérée
"""
return matrice["nbColonnes"]
def getVal(matrice,ligne,colonne):
"""
retourne la valeur qui se trouve en (ligne,colonne) dans la matrice
paramètres: matrice la matrice considérée
ligne le numéro de la ligne (en commençant par 0)
colonne le numéro de la colonne (en commençant par 0)
"""
return matrice["valeurs"][ligne][colonne]
def setVal(matrice,ligne,colonne,valeur):
"""
met la valeur dans la case se trouve en (ligne,colonne) de la matrice
paramètres: matrice la matrice considérée
ligne le numéro de la ligne (en commençant par 0)
colonne le numéro de la colonne (en commençant par 0)
valeur la valeur à stocker dans la matrice
cettepass fonction ne retourne rien mais modifie la matrice
"""
matrice["valeurs"][ligne][colonne]=valeur
#------------------------------------------
# decalages
#------------------------------------------
def decalageLigneAGauche(matrice, numLig, nouvelleValeur=0):
"""
permet de décaler une ligne vers la gauche en insérant une nouvelle
valeur pour remplacer la premiere case à droite de cette ligne
le fonction retourne la valeur qui a été éjectée
paramèteres: matrice la matrice considérée
numLig le numéro de la ligne à décaler
nouvelleValeur la valeur à placer
résultat la valeur qui a été ejectée lors du décalage
"""
ejecte=getVal(matrice,numLig,0)
for i in range(1,getNbColonnes(matrice)):
setVal(matrice,numLig,i-1,getVal(matrice,numLig,i))
setVal(matrice,numLig,getNbColonnes(matrice)-1,nouvelleValeur)
return ejecte
def decalageLigneADroite(matrice, numLig, nouvelleValeur=0):
"""
decale la ligne numLig d'une case vers la droite en insérant une nouvelle
valeur pour remplacer la premiere case à gauche de cette ligne
paramèteres: matrice la matrice considérée
numLig le numéro de la ligne à décaler
nouvelleValeur la valeur à placer
résultat: la valeur de la case "ejectée" par le décalage
"""
ejecte=getVal(matrice,numLig,getNbColonnes(matrice)-1)
nouvelleLigne=[nouvelleValeur]
for i in range (1,getNbColonnes(matrice)):
nouvelleLigne.append(getVal(matrice,numLig,i-1))
for i in range (0,getNbColonnes(matrice)):
setVal(matrice,numLig,i,nouvelleLigne[i])
return ejecte
def decalageColonneEnHaut(matrice, numCol, nouvelleValeur=0):
"""
decale la colonne numCol d'une case vers le haut en insérant une nouvelle
valeur pour remplacer la premiere case en bas de cette ligne
paramèteres: matrice la matrice considérée
numCol le numéro de la colonne à décaler
nouvelleValeur la valeur à placer
résultat: la valeur de la case "ejectée" par le décalage
"""
ejecte=getVal(matrice,0,numCol)
for i in range(1,getNbLignes(matrice)):
setVal(matrice,i-1,numCol,getVal(matrice,i,numCol))
setVal(matrice,getNbLignes(matrice)-1,numCol,nouvelleValeur)
return ejecte
def decalageColonneEnBas(matrice, numCol, nouvelleValeur=0):
"""
decale la colonne numCol d'une case vers le bas en insérant une nouvelle
valeur pour remplacer la premiere case en haut de cette ligne
paramèteres: matrice la matrice considérée
numCol le numéro de la colonne à décaler
nouvelleValeur la valeur à placer
résultat: la valeur de la case "ejectée" par le décalage
"""
ejecte=getVal(matrice,getNbLignes(matrice)-1,numCol)
nouvelleColonne=[nouvelleValeur]
for i in range (1,getNbLignes(matrice)):
nouvelleColonne.append(getVal(matrice,i-1,numCol))
for i in range (0,getNbLignes(matrice)):
setVal(matrice,i,numCol,nouvelleColonne[i])
return ejecte |
"""Tests for service.GiphySnapBot."""
import unittest
from unittest.mock import (
MagicMock,
patch,
)
import pytest
from slackclient import SlackClient
from service import base
from service.base import GiphySnapBotBase
TEST_CONFIG = {
"slack_bot_token": "baby shark",
"default_channel": "foo_channel",
}
class GiphySnapBotBaseTestCase(unittest.TestCase):
"""Tests for GiphySnapBotBase."""
@patch.object(base.SlackClient, "rtm_connect", return_value=True)
@patch.object(GiphySnapBotBase, "_get_user_id")
def test_connect_will_return_slack_client(self, *args):
"""If Slack connection is ok, _connect() will return the client."""
client = GiphySnapBotBase(TEST_CONFIG)
self.assertEqual(client._slack_client.__class__, SlackClient)
@patch.object(base.SlackClient, "rtm_connect", return_value=False)
def test_connection_error_will_raise_connection_error(self, *args):
"""If rtm_connect() fails, raise a ConnectionError."""
with pytest.raises(ConnectionError):
GiphySnapBotBase(TEST_CONFIG)
@patch.object(GiphySnapBotBase, "_connect", return_value=MagicMock())
def test_get_user_id_will_return_user_id(self, mock_client):
"""_get_user_id() will return the user of the bot."""
mock_client.return_value.api_call = MagicMock(
return_value={"ok": True, "user_id": "mort"})
client = GiphySnapBotBase(TEST_CONFIG)
self.assertEqual(client.bot_id, "mort")
@patch.object(GiphySnapBotBase, "_connect", return_value=MagicMock())
def test_get_user_id_will_raise_permission_error_if_response_not_ok(
self, mock_client):
"""_get_user_id() will raise a PermissionError if response not ok."""
mock_client.return_value.api_call = MagicMock(
return_value={"ok": False, "error": "abc123"})
with pytest.raises(PermissionError):
GiphySnapBotBase(TEST_CONFIG)
@patch.object(GiphySnapBotBase, "_connect", return_value=MagicMock())
def test_send_messge_will_send_post_message_api_call(self, mock_client):
"""send_message() will send message using postMessage api call."""
# Get the right mock for client
mock_client = mock_client.return_value
client = GiphySnapBotBase(TEST_CONFIG)
mock_client.reset_mock()
client.send_message("bar_message")
mock_client.api_call.assert_called_once_with(
"chat.postMessage", channel="foo_channel", text="bar_message")
@patch.object(GiphySnapBotBase, "_connect", return_value=MagicMock())
def test_send_image_will_send_post_message_api_call(self, mock_client):
"""send_image() will send attachment via postMessage api call."""
# Get the right mock for client
mock_client = mock_client.return_value
client = GiphySnapBotBase(TEST_CONFIG)
mock_client.reset_mock()
client.send_image("image_title", "image_url")
expected_attachment = {
"title": "image_title",
"image_url": "image_url",
"title_link": "image_url",
}
mock_client.api_call.assert_called_once_with(
"chat.postMessage", channel="foo_channel",
attachments=[expected_attachment])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.