text stringlengths 38 1.54M |
|---|
from os import listdir
from os import walk
from os import stat
from datetime import datetime, timezone
from os.path import isfile, join
from PIL import Image
import piexif
import os
import exifread
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("year")
args = parser.parse_args()
YEAR = ''
if len(args.year) != 4:
print('Invalid arg: {}. Exiting program'.format(args.year))
sys.exit(0)
else:
print('Valid arg: {}'.format(args.year))
YEAR = args.year
path = 'F:\\BILDER NY\\By year\\' + YEAR
DEFAULT_DATE = datetime.strptime('{}:01:01 12:00:00'.format(YEAR), '%Y:%m:%d %H:%M:%S')
DEFAULT_DATE_STR = DEFAULT_DATE.strftime('%Y:%m:%d %H:%M:%S')
DTO_KEY = piexif.ExifIFD.DateTimeOriginal
MISSING_DATE_PATTERN = '0000:00:00 00:00:00'
print(DEFAULT_DATE)
def getFiles(directory):
files = []
for (dirpath, dirnames, filenames) in walk(directory):
for file in filenames:
fullpath = "{}\\{}".format(dirpath, file)
files.append(fullpath)
for dir in dirnames:
for file in getFiles(dir):
fullpath = "{}\\{}\\{}".format(dirpath, dir, file)
files.append(fullpath)
return files
def adjustDates(files):
missing_date = []
for filepath in files:
try:
print(' ')
print(filepath)
im = Image.open(filepath)
exif_dict = piexif.load(im.info["exif"])
date_taken = DEFAULT_DATE_STR
if DTO_KEY in exif_dict["Exif"]:
dto = exif_dict["Exif"][DTO_KEY].decode()
if dto != MISSING_DATE_PATTERN:
date_taken = dto
exif_dict["Exif"].update({DTO_KEY: date_taken.encode()})
exif_bytes = piexif.dump(exif_dict)
im.save(filepath, exif=exif_bytes)
st = os.stat(filepath)
mtime = st[8]
ctime = st[9]
new_timestamp = datetime.strptime(date_taken, '%Y:%m:%d %H:%M:%S').timestamp()
os.utime(filepath, (mtime, new_timestamp))
except:
print('Error when processing {}'.format(filepath))
missing_date.append(filepath)
return missing_date
files = np.array(getFiles(path))
files_error = adjustDates(files)
print(' ')
print(' ')
print('=== Files with error ===')
for f in files_error:
print(f)
|
# script takes json returned by google search and stores links and meta description
import json
import re
class my_dictionary(dict): # class of dictionary
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
def google_results(formatedJson):
data = json.loads(formatedJson)
snip=""
searchResults = my_dictionary()
for a in data['items']:
link = a["link"] # store links
snip = a["htmlSnippet"] # store meta description
clean = re.compile('<.*?>')
searchResults.key = link
searchResults.value = re.sub(clean, '', snip)
searchResults.add(searchResults.key, searchResults.value)
return searchResults
|
from __future__ import print_function
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.cosmology import FlatLambdaCDM
buzzard_cosmo = FlatLambdaCDM(68.81,.295)
from scipy.stats import binned_statistic
import subprocess
import pandas as pd
import treecorr
import sys
import numpy as np
import yaml
import os
outdir = '/nfs/slac/des/fs1/g/sims/mbaumer/3pt_sims/new_triplet_counts/'
plotdir = '/nfs/slac/des/fs1/g/sims/mbaumer/3pt_sims/plots/'
def computeXvsAngle(ddd,var,stat='mean',scale=6,ratio=.5,tolerance=.1,nbins=15,**kwargs):
transition_angle = np.arccos(.25)/np.pi*180 #angle at which elongate becomes collapsed
N_low_bins = np.floor(transition_angle/180*nbins)
coll_bins = np.linspace(0,transition_angle,num=N_low_bins)
elong_bins = np.linspace(transition_angle,180,num=nbins-N_low_bins)
collapsed_angles = computeAngularBins(np.exp(ddd.logr),ddd.u,ddd.v,collapsed=True)
elongated_angles = computeAngularBins(np.exp(ddd.logr),ddd.u,ddd.v,collapsed=False)
isRightSize = (np.exp(ddd.logr)*ddd.u > scale*ratio-scale*ratio*tolerance) & (np.exp(ddd.logr)*ddd.u < scale*ratio+scale*ratio*tolerance)
isCollapsed = (((ddd.u*np.abs(ddd.v))*np.exp(ddd.logr)+np.exp(ddd.logr) > scale-scale*tolerance) & ((ddd.u*np.abs(ddd.v))*np.exp(ddd.logr)+np.exp(ddd.logr) < scale+scale*tolerance))
isElongated = ((np.exp(ddd.logr) > scale-scale*tolerance) & (np.exp(ddd.logr) < scale+scale*tolerance))
out1,bins1,_ = binned_statistic(elongated_angles[np.where(isRightSize & isElongated)],var[np.where(isRightSize & isElongated)],bins=elong_bins,statistic=stat)
out2,bins2,_ = binned_statistic(collapsed_angles[np.where(isRightSize & isCollapsed)],var[np.where(isRightSize & isCollapsed)],bins=coll_bins,statistic=stat)
full_var = np.concatenate((out2,out1))
bins1 += (bins1[1]-bins1[0])/2 #make edges centers
bins2 += (bins2[1]-bins2[0])/2
full_bins = np.concatenate((bins2[:-1],bins1[:-1]))
return full_var, full_bins
def compute_x_vs_side_length(ddd,var,stat='mean',nbins=15,tolerance=.1,**kwargs):
isEquilateral = (ddd.u > 1-tolerance) & (np.abs(ddd.v) < tolerance)
res, b, _ = binned_statistic(ddd.logr[isEquilateral],var[isEquilateral],bins=nbins,statistic=stat)
b += (b[1]-b[0])/2
b = b[:-1]
return res, b
def computeAngularBins(r,u,v,collapsed=False):
#if v < 0: collapsed = not collapsed
v = np.abs(v)
d2 = r
d3 = u*r
d1 = v*d3+d2
#law of cosines
if not collapsed:
cosine = (d2**2 + d3**2 - d1**2)/(2*d2*d3+1e-9)
else:
cosine = (d1**2 + d3**2 - d2**2)/(2*d1*d3+1e-9)
bins = np.arccos(cosine)/np.pi*180
return bins
class NNNPlotter (object):
def __init__(self,zvar,min_z,delta_z,metric):
self.zvar = zvar
self.min_z = min_z
self.delta_z = delta_z
self.max_z = self.min_z + self.delta_z
self.metric = metric
self.runname = self.zvar+str(self.min_z)+'_deltaz'+str(self.delta_z)+'_'+self.metric
with open(outdir+self.runname+'.yaml') as f:
self.config = yaml.load(f.read())
self.data = np.load(self.config['data_path'])
self.randoms = np.load(self.config['randoms_path'])
assert self.runname == self.config['runname']
def load_data_for_run(self):
if self.zvar == 'DISTANCE':
self.data = self.data[((self.data[self.zvar] > (buzzard_cosmo.h)*buzzard_cosmo.comoving_distance(self.min_z).value) & (self.data[self.zvar] < (buzzard_cosmo.h)*buzzard_cosmo.comoving_distance(self.max_z).value))]
self.randoms = self.randoms[((self.randoms[self.zvar] > (buzzard_cosmo.h)*buzzard_cosmo.comoving_distance(self.min_z).value) & (self.randoms[self.zvar] < (buzzard_cosmo.h)*buzzard_cosmo.comoving_distance(self.max_z).value))]
else:
self.data = self.data[((self.data[self.zvar] > self.min_z) & (self.data[self.zvar] < self.max_z))]
self.randoms = self.randoms[((self.randoms[self.zvar] > self.min_z) & (self.randoms[self.zvar] < self.max_z))]
self.ddd = np.load(outdir+self.runname+'_'+'ddd.npy')
self.ddr = np.load(outdir+self.runname+'_'+'ddr.npy')
self.drd = np.load(outdir+self.runname+'_'+'drd.npy')
self.rdd = np.load(outdir+self.runname+'_'+'rdd.npy')
self.rrd = np.load(outdir+self.runname+'_'+'rrd.npy')
self.drr = np.load(outdir+self.runname+'_'+'drr.npy')
self.rdr = np.load(outdir+self.runname+'_'+'rdr.npy')
self.rrr = np.load(outdir+self.runname+'_'+'rrr.npy')
def analyze_single_run(self,mode,**kwargs):
template = treecorr.NNNCorrelation(config=self.config)
if mode == 'angle':
get_binned_stat = computeXvsAngle
if mode == 'equi':
get_binned_stat = compute_x_vs_side_length
binned = {}
binned['ddd'], bins = get_binned_stat(template,self.ddd,stat='sum',**kwargs)
binned['ddr'], bins = get_binned_stat(template,self.ddr,stat='sum',**kwargs)
binned['drd'], bins = get_binned_stat(template,self.drd,stat='sum',**kwargs)
binned['rdd'], bins = get_binned_stat(template,self.rdd,stat='sum',**kwargs)
binned['rrd'], bins = get_binned_stat(template,self.rrd,stat='sum',**kwargs)
binned['drr'], bins = get_binned_stat(template,self.drr,stat='sum',**kwargs)
binned['rdr'], bins = get_binned_stat(template,self.rdr,stat='sum',**kwargs)
binned['rrr'], bins = get_binned_stat(template,self.rrr,stat='sum',**kwargs)
binned['d1'], bins = get_binned_stat(template,template.u*np.abs(template.v)*np.exp(template.logr)+np.exp(template.logr),**kwargs)
binned['d2'], bins = get_binned_stat(template,np.exp(template.logr),**kwargs)
binned['d3'], bins = get_binned_stat(template,template.u*np.exp(template.logr),**kwargs)
datatot = len(self.data)
randtot = len(self.randoms)
dddtot = float(datatot)**3/6
drrtot = float(datatot)*float(randtot)**2/6
rdrtot = float(datatot)*float(randtot)**2/6
rrdtot = float(datatot)*float(randtot)**2/6
ddrtot = float(datatot)**2*float(randtot)/6
drdtot = float(datatot)**2*float(randtot)/6
rddtot = float(datatot)**2*float(randtot)/6
rrrtot = float(randtot)**3/6
binned['zeta'] = (binned['ddd']+dddtot*(-binned['ddr']/ddrtot-binned['drd']/drdtot-binned['rdd']/rddtot+binned['rrd']/rrdtot+binned['rdr']/rdrtot+binned['drr']/drrtot-binned['rrr']/rrrtot))/(binned['rrr']*dddtot/rrrtot)
binned['denom'] = self.get_two_point_expectation(binned['d1'],binned['d2'],binned['d3'])
binned['q'] = binned['zeta']/binned['denom']
return bins, binned
def get_two_point_expectation(self,d1bins,d2bins,d3bins):
if self.metric == 'Euclidean':
cat = treecorr.Catalog(ra=self.data['RA'], dec=self.data['DEC'], ra_units='degrees', dec_units='degrees')
random_cat = treecorr.Catalog(ra=self.randoms['RA'], dec=self.randoms['DEC'], ra_units='degrees', dec_units='degrees')
dd = treecorr.NNCorrelation(min_sep=1,max_sep=30,nbins=30,bin_slop=0.1,sep_units='arcmin',metric=self.metric)
dr = treecorr.NNCorrelation(min_sep=1,max_sep=30,nbins=30,bin_slop=0.1,sep_units='arcmin',metric=self.metric)
rr = treecorr.NNCorrelation(min_sep=1,max_sep=30,nbins=30,bin_slop=0.1,sep_units='arcmin',metric=self.metric)
else:
raise ValueError('invalid metric specified')
dd.process(cat)
dr.process(cat,random_cat)
rr.process(random_cat)
xi, varxi = dd.calculateXi(rr=rr,dr=dr)
coeffs = np.polyfit(dd.logr,np.log(xi),deg=1)
poly = np.poly1d(coeffs)
yfit = lambda x: np.exp(poly(np.log(x)))
xi1 = yfit(d1bins)
xi2 = yfit(d2bins)
xi3 = yfit(d3bins)
denom_bins = (xi1*xi2+xi2*xi3+xi3*xi1)
return denom_bins
def plot_run(self):
results = pd.DataFrame()
self.load_data_for_run()
#make angular plots
for scale in [10,15,20,25,30]:
for ratio in [.5]:
for tolerance in [.1,.2,.3]:
for nbins in [8,16,100]:
print (scale,ratio,tolerance,nbins)
sys.stdout.flush()
if ratio == 1:
mode = 'equi'
else:
mode = 'angle'
bins, binned = self.analyze_single_run(mode,scale=scale,ratio=ratio,tolerance=tolerance,nbins=nbins)
this_res = pd.DataFrame.from_dict(binned)
this_res['bins'] = bins
this_res['scale'] = scale
this_res['ratio'] = ratio
this_res['tolerance'] = tolerance
this_res['nbins']= nbins
results = results.append(this_res)
if False:
for name,var in binned.iteritems():
fig = plt.figure()
plt.plot(bins,var)
if mode == 'angle':
plt.xlabel('Angle (degrees)')
else:
plt.xlabel('Scale (arcmin)')
plt.ylabel(name)
plt.title(str(self.min_z)+'<'+self.zvar+'<'+str(self.max_z)+' '+str(scale*ratio)+':'+str(scale)+' +/- '+str(100*tolerance)+'%')
fig.savefig(plotdir+name+'_'+mode+'_'+str(scale)+'_'+str(ratio)+'_'+str(tolerance)+'_'+str(nbins)+'.png')
results.to_csv(outdir+self.runname+'.csv')
def runall(min_z, max_z, delta_z, zvar, metric, do3D):
for lower_z_lim in np.arange(min_z,max_z,delta_z):
print ("bsub", "-W", "08:00", "python", "-c" ,"import autoplot; plotter = autoplot.NNNPlotter('"+zvar+"',"+str(lower_z_lim)+","+str(delta_z)+",'Euclidean'); plotter.plot_run()")
subprocess.call(["bsub", "-W", "08:00", "python", "-c" ,"import autoplot; plotter = autoplot.NNNPlotter('"+zvar+"',"+str(lower_z_lim)+","+str(delta_z)+",'Euclidean'); plotter.plot_run()"]) |
import webapp2
import json
import cgi
from utils.utilities import UtilityMixin, Organization, Driver
from utils.requirelogin import RequireLoginMixin
from google.appengine.api import users
from google.appengine.ext import ndb
class SaveDriverAjax(webapp2.RequestHandler, RequireLoginMixin, UtilityMixin):
def get(self, org):
lat = float(cgi.escape(self.request.get('lat')))
lng = float(cgi.escape(self.request.get('long')))
seats = int(cgi.escape(self.request.get('seats')))
user = users.get_current_user()
user_id = user.user_id()
email = user.email()
# try to find the record to see if it should be created or updated
driver = Driver.get_by_id(org, user_id)
if driver is None:
driver = Driver(parent = Organization.organization_key(org), id = user_id)
driver.email = email
driver.lat = lat
driver.lng = lng
driver.seats = seats
driver.put()
result = {
'success': True
}
result_json = json.dumps(result)
self.response.headers['Content-Type'] = 'text/json'
self.response.out.write(result_json)
|
import pygame as pg
# pygame ab Version 2.0 wird benötigt
# Installation im Terminal mit
# --> pip install pygame (windows)
# --> pip3 install pygame (mac)
# --> sudo apt-get install python3-pygame (Linux Debian/Ubuntu/Mint)
pg.init()
größe = breite, höhe = 1920,1080
fenster = pg.display.set_mode(größe)
clock = pg.time.Clock()
FPS = 40
# Zeichenschleife mit FPS Bildern pro Sekunde
while True:
clock.tick(FPS)
for ereignis in pg.event.get():
if ereignis.type == pg.QUIT or \
ereignis.type == pg.KEYDOWN and ereignis.key == pg.K_ESCAPE:
quit()
fenster.fill('black')
pg.display.flip()
|
import tensorflow as tf
from defines import WIDTH, HEIGHT
def cnn_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(HEIGHT, WIDTH, 3)))
model.add(tf.keras.layers.Conv2D(16, (4, 4), padding="valid"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.ReLU())
model.add(tf.keras.layers.Conv2D(32, (8, 8), padding="valid"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.ReLU())
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.Conv2D(128, (16, 16), padding="valid"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.ReLU())
# model.add(tf.keras.layers.Conv2D(128, (16, 16), padding="valid"))
# model.add(tf.keras.layers.BatchNormalization())
# model.add(tf.keras.layers.ReLU())
# model.add(tf.keras.layers.Conv2D(64, (32, 32), padding="valid", activation="relu"))
# model.add(tf.keras.layers.Conv2DTranspose(64, (32, 32), padding="valid", activation="relu"))
# model.add(tf.keras.layers.Conv2DTranspose(32, (16, 16), padding="valid", activation="relu"))
model.add(tf.keras.layers.Conv2DTranspose(16, (16, 16), padding="valid", activation="relu"))
model.add(tf.keras.layers.Conv2DTranspose(8, (9, 9), padding="valid", strides=(2, 2), activation="relu"))
model.add(tf.keras.layers.Conv2DTranspose(2, (4, 4), padding="valid"))
model.add(tf.keras.layers.ReLU(max_value=200, negative_slope=0))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=tf.keras.losses.mean_squared_error,
metrics=["accuracy"])
print(model.summary())
return model |
#dette programmet skal regne ut den samlede poengsummen for løpene, hvor brukeren fyller tiden, og distansen for hvert løp
def sammenlagt():
#her legger vi inn bruker-definisjon for navn
navn =(input("Navn: "))
#første løp
print("Første løp")
dist1 = eval(input("Distanse: "))
tid_min1 = eval(input("Minutter: "))
tid_sek1 = eval(input("Sekunder med komma: "))
#andre løp
print("Andre løp")
dist2 = eval(input("Distanse: "))
tid_min2 = eval(input("Minutter: "))
tid_sek2 = eval(input("Sekunder med komma: "))
#tredje løp
print("Tredje løp")
dist3 = eval(input("Distanse: "))
tid_min3 = eval(input("Minutter: "))
tid_sek3 = eval(input("Sekunder med komma: "))
#fjerde løp
print("Fjerde løp")
dist4 = eval(input("Distanse: "))
tid_min4 = eval(input("Minutter: "))
tid_sek4 = eval(input("Sekunder med komma: "))
#let the math-madness begin
L_1 = ((tid_min1*60)+tid_sek1)/(dist1/500)
L_2 = ((tid_min2*60)+tid_sek2)/(dist2/500)
L_3 = ((tid_min3*60)+tid_sek3)/(dist3/500)
L_4 = ((tid_min4*60)+tid_sek4)/(dist4/500)
P = L_1+L_2+L_3+L_4
#let the pinting comense
#print av løp1
print("Navn: ",navn)
print("Tid for ",navn,"på",dist1,"meter")
print("Minutter:",tid_min1,"\nSekunder og hundredeler:",tid_sek1)
#print av løp2
print("Tid for ",navn,"på",dist2,"meter")
print("Minutter:",tid_min2,"\nSekunder og hundredeler:",tid_sek2)
#print av løp3
print("Tid for ",navn,"på",dist3,"meter")
print("Minutter:",tid_min3,"\nSekunder og hundredeler:",tid_sek3)
#print av løp4
print("Tid for ",navn,"på",dist4,"meter")
print("Minutter:",tid_min4,"\nSekunder og hundredeler:",tid_sek4)
#print av salet poengsum
print("Den samlede poengsummen til",navn,":",(round(P,3)))
#her på slutten kaller jeg på definisjonen slik at når programmet kjøres så går det rett til å spørre om bruker-input, uten å måtte kalle det i shellet først
sammenlagt()
#note to self:høy poengsum=dårlig prestasjon, lav poengsum=god prestasjon
|
"""Checks for web services"""
from urllib import request
import urllib.error
from preflyt.base import BaseChecker
class WebServiceChecker(BaseChecker):
"""Verify that a webservice is reachable"""
checker_name = "web"
def __init__(self, url, statuses=None):
"""Initialize the checker
:param name: The URL of the endpoint to check
:param statuses: Acceptable HTTP statuses (other than 200 OK)
"""
super().__init__()
if not url.lower().startswith(("http://", "https://", "ftp://")):
url = "http://" + url
self._url = url
self._statuses = statuses or []
def check(self):
try:
request.urlopen(self._url)
except urllib.error.HTTPError as httpe:
if httpe.code in self._statuses:
return True, "{} is available, but with status: [{}] {}".format(
self._url, httpe.code, httpe.reason)
return False, "[{}] {}".format(httpe.code, httpe.reason)
except urllib.error.URLError as urle:
return False, urle.reason
except Exception as exc: # pylint: disable=broad-except
return False, "Unhandled error: {}".format(exc)
return True, "{} is available".format(self._url)
|
from __future__ import division
import os
import sys
import sfml as sf
DIRECT_DICT = {sf.Keyboard.LEFT : (-1, 0),
sf.Keyboard.RIGHT : ( 1, 0),
sf.Keyboard.UP : ( 0,-1),
sf.Keyboard.DOWN : ( 0, 1)}
SCREEN_SIZE = sf.Vector2(800, 600)
CAPTION = "Move me with the Arrow Keys."
class Player(object):
def __init__(self,position,radius,speed):
self.speed = speed
self.image = sf.CircleShape()
self.image.outline_thickness = 10
self.image.radius = radius
self.image.origin = (radius,radius)
self.image.position = sf.Vector2(*position)
self.image.outline_color = sf.Color.BLACK
self.image.fill_color = sf.Color(255, 100, 200)
def update(self,delta):
movement = sf.Vector2(0,0)
for key in DIRECT_DICT:
if sf.Keyboard.is_key_pressed(key):
movement[0] += DIRECT_DICT[key][0]*self.speed*delta
movement[1] += DIRECT_DICT[key][1]*self.speed*delta
self.image.move(movement)
self.clamp(SCREEN_SIZE)
def clamp(self,clamp_to):
with_rad = self.image.radius+self.image.outline_thickness
pos = [None,None]
for i in (0,1):
minny = max(self.image.position[i],with_rad)
pos[i] = min(clamp_to[i]-with_rad,minny)
self.image.position = pos
class Control(sf.RenderWindow):
def __init__(self):
sf.RenderWindow.__init__(self,sf.VideoMode(*SCREEN_SIZE), CAPTION)
## self.vertical_synchronization = True
self.framerate_limit = 60
self.active = True
self.clock = sf.Clock()
self.player = Player(SCREEN_SIZE/2,100,300)
self.done = False
def event_loop(self):
for event in self.events:
if type(event) is sf.CloseEvent:
self.close()
self.done = True
def main_loop(self):
while not self.done:
delta = self.clock.restart().seconds
self.event_loop()
self.player.update(delta)
self.clear(sf.Color(255, 255, 255))
self.draw(self.player.image)
self.display()
if __name__ == "__main__":
run_it = Control()
run_it.main_loop()
sys.exit()
|
import json
class AppendingDict(dict):
def __init__(self):
self.__data = {}
def __getattribute__(self, name):
print('Calling getattribute with %s' % name)
if name in ['setdefault', '_AppendingDict__data', 'json']:
return object.__getattribute__(self, name)
return None
def setdefault(self, name, value):
# TODO: I guess this is what it does?
self.__data[name] = [value]
def __getitem__(self, name):
print('Getting an item in the namespace %s' % name)
values = self.__data.get(name)
if values:
return values[-1]
else:
raise KeyError('No such key \'%s\'' % name)
def __setitem__(self, name, value):
print('Setting an item in the namespace %s => %s' % (name, value))
if name not in self.__data:
self.__data[name] = []
self.__data[name].append(value)
def __delitem__(self, name):
if name in self.__data:
return self.__data[name].pop()
if not self.__data:
del self.__data[name]
else:
raise KeyError('No such key \'%s\'' % name)
def json(self):
print('Converting to JSON')
return json.dumps(self.__data)
@property
def __dict__(self):
print('Being called')
return self
class CodeNamespace(AppendingDict):
def __init__(self):
super(AppendingDict, self).init()
class ContextModule(object):
'''
A ContextModule instance acts like a normal Python module, but maintains the globally-
distributed QDPy environment. Care must be taken to maintain consistency in managing basic
attribute ACLs, so as not to impede upon other QDPy clients. For now, this purely involves
enforcing that writes to the distributed context only overwrite locally-owned bindings. Any
attempts to overwrite a global binding results in "masking" the global binding with a local
one.
'''
def __init__(self, group=None):
self.__group = group
self.__locals = {}
@property
def __path__(self):
return ''
def __is_internal_attr(self, name):
return name.startswith('_')
def __get_internal_attr(self, name):
return object.__getattribute__(self, name)
def __set_internal_attr(self, name, value):
object.__setattr__(self, name, value)
@property
def __globals(self):
return {}
@property
def __merged_context(self):
merged = self.__globals.copy()
merged.update(self.__locals.copy())
return merged
def __getattr__(self, name):
print('__getattr__ being called with %s' % name)
if self.__is_internal_attr(name):
return self.__get_internal_attr(name)
if name in self.__merged_context:
return self.__merged_context[name]
else:
raise AttributeError('No such attributed \'%s\'' % name)
def __setattr__(self, name, value):
print('__setattr__ being called with %s => %s' % (name, value))
if self.__is_internal_attr(name):
self.__set_internal_attr(name, value)
else:
if name in self.__globals:
print('WARNING: masking distributed variable [%s]' % name)
# TODO: Update the distributed context
self.__locals[name] = value
def setdefault(self, name, value):
self.__setattr__(name, value)
def __dir__(self):
return self.__merged_context.keys()
@property
def __dict__(self):
print('Using dict')
return self.__locals
|
import random
print("The program is to simulate a cleaning robot.",end = "\n")
print("There will be m * n map when you type in.",end = "\n")
def init():
# Create an map m*n
print("Please input the first number M:")
m = int(input())
print("Then, input the second number N:")
n = int(input())
Map = [[0 for i in range(n)] for j in range(m)]
# Initial garbages
print("Please input how many garbages are (-1 will be random)")
garbage_count = int(input())
if garbage_count == -1:
garbage_count = random.randint(1,m*n-1)
else:
while garbage_count > m*n or garbage_count < 0:
print("There is too many garbages, please re-input:")
garbage_count = int(input())
if garbage_count == -1:
garbage_count = random.randint(1,m*n)
# Fill in garbages
while garbage_count > 0:
x = random.randint(0,m-1)
y = random.randint(0,n-1)
if Map[x][y] == 1:
continue
else:
Map[x][y] = 1
garbage_count -= 1
return Map
def countGarbage(Map):
garbage = 0
for i in range(len(Map)):
for j in range(len(Map[0])):
if Map[i][j] == 1:
garbage += 1
return garbage
def clean(x,y,Map,garbage):
if garbage == 0:
return
if Map[x][y] == 1:
print("There is a garbage in [",x,",",y,"]\nCleaning...")
Map[x][y] = 0
print("Success!,there are",garbage-1,"left")
return
else:
print("There is no garbage in [",x,",",y,"]")
return
def menu():
print("If you want to initial the map, press 'i'")
print("If you want to quit, press 'q'")
return
def cleanMenu():
Map = init()
print("If you want to clean automatically, press 'a' (notice: every computer has their own recursive deep,press ctrl+c to terminate)")
print("If you want to clean by yourself, press 'm'")
return Map
def Auto(m,n,x,y,Map,Record):
garbage = int(countGarbage(Map))
if x >= m or y >= n or x < 0 or y < 0:
return
if Record[x][y] == True:
return
Record[x][y] = True
clean(x,y,Map,garbage)
Auto(m,n,x+1,y,Map,Record)
Auto(m,n,x,y+1,Map,Record)
Auto(m,n,x-1,y,Map,Record)
Auto(m,n,x,y-1,Map,Record)
def Manual(Map,Record):
while countGarbage(Map) > 0:
# User input
print("input x (between 0 ~",len(Map)-1,") :")
x = int(input())
if x >= len(Map) or x < 0:
print("Illegal")
continue
print("input y (between 0 ~",len(Map[0])-1,") :")
y = int(input())
if y >= len(Map[0]) or y < 0:
print("Illegal")
continue
# Record it and not to go the same time
if Record[x][y] == True:
print("You have already clean it!")
continue
Record[x][y] = True
# Record
if Map[x][y] == 1:
Map[x][y] = 0
print("There is a garbage,and it's clear.",countGarbage(Map),"left.")
else:
print("There is no garbage,please input another position")
def action(c,Map):
m = len(Map)
n = len(Map[0])
Record = [[0 for i in range(n)] for j in range(m)]
if c == 'a':
a = random.randint(0,m-1)
b = random.randint(0,n-1)
print("Initial position:",a,b)
Auto(m,n,a,b,Map,Record)
elif c == 'm':
Manual(Map,Record)
print("The environment is clear!")
if __name__ == '__main__':
menu()
counter = 0
while True:
if counter != 0:
menu()
c = input()[0].lower()
if c == 'q':
break
if c != 'i':
print("please input it again:")
continue
Map = cleanMenu()
c = input()[0].lower()
action(c,Map)
counter += 1
|
import matplotlib.pyplot as plt
import PIL
import numpy
import scipy
import math
from PIL import Image
from matplotlib.pyplot import imread
from numpy import zeros
from numpy import r_
from scipy import fftpack
from numpy import pi
import sys
from huffman import *
# image = Image.open("input.jpg")
# witdh, height = image.size
# print(f" original width and hegiht is: {witdh}, {height}")
# #resize image. make image chia het cho 8
# if witdh % 8 !=0 or height % 8 !=0:
# image = image.resize((witdh - witdh%8,height - height%8))
# witdh, height = image.size
# rImage,gImage,bImage = image.convert('RGB').split()
# rMat = numpy.asarray(rImage).astype(int)
# gMat = numpy.asarray(gImage).astype(int)
# bMat = numpy.asarray(bImage).astype(int)
# #shift
# rMat = rMat - 128
# gMat = gMat - 128
# bMat = bMat - 128
# Quant_50 = [
# [16, 11, 10, 16, 24, 40, 51, 61],
# [12, 12, 14, 19, 26, 58, 60, 55],
# [14, 13, 16, 24, 40, 57, 69, 56],
# [14, 17, 22, 29, 51, 87, 80, 62],
# [18, 22, 37, 56, 68, 109, 103, 77],
# [24, 35, 55, 64, 81, 104, 113, 92],
# [49, 64, 78, 87, 103, 121, 120, 101],
# [72, 92, 95, 98, 112, 100, 103, 99]
# ]
# zigzagOrder = numpy.array([0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,
# 49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63])
# Cos_table = [
# [math.cos((2*i+1)*j * math.pi/16) for j in range(8)] for i in range(8)
# ]
# Range_list = [(i,j) for i in range(8) for j in range(8)]
# Root2_inv = 1 / math.sqrt(2)
# #RLE encoder - ma hoa nhung con sat nhau
# def rle(input):
# encodeRLE = ""
# p = 0
# while (p < 63):
# count = 1
# ch = input[p]
# q = p
# while (q < 63):
# if input[q] == input[q+1]:
# count += 1
# q += 1
# else:
# break
# encodeRLE = encodeRLE +" "+ str(count) + " " + str(ch)
# p = q + 1
# return encodeRLE
# #compute pixels
# pixels = int(witdh * height /64)
# rMat0 = rMat.flatten()
# gMat0 = gMat.flatten()
# bMat0 = bMat.flatten()
# #split array into 64-elements arrays
# rMat1 = numpy.array_split(rMat0, pixels)
# gMat1 = numpy.array_split(gMat0, pixels)
# bMat1 = numpy.array_split(bMat0, pixels)
# #reshape arrays to 8x8 blocks
# for m in range(pixels):
# rMat1[m] = rMat1[m].reshape(8,8)
# #compute DCT
# for m in range(pixels):
# for u in range (8):
# for v in range (8):
# r = 0
# for i,j in Range_list:
# r += rMat1[m][i][j] * Cos_table[i][u] * Cos_table[j][v]
# if u == 0: r *= Root2_inv
# if v == 0: r *= Root2_inv
# rMat1[m][u][v] = r*1/4
# for m in range(pixels):
# #Quantization
# rMat1[m] = numpy.rint(rMat1[m]/Quant_50)
# rMat1[m] = rMat1[m].reshape([64])[zigzagOrder].astype(int)
# for m in range (pixels):
# encodedStr = rle(rMat1[m])
# f = open("z.txt", "a")
# f.write(encodedStr)
# for m in range(pixels):
# gMat1[m] = gMat1[m].reshape(8,8)
# for m in range(pixels):
# for u in range (8):
# for v in range (8):
# r = 0
# for i,j in Range_list:
# r += gMat1[m][i][j] * Cos_table[i][u] * Cos_table[j][v]
# if u == 0: r *= Root2_inv
# if v == 0: r *= Root2_inv
# gMat1[m][u][v] = r*1/4
# for m in range(pixels):
# gMat1[m] = numpy.rint(gMat1[m]/Quant_50)
# gMat1[m] = gMat1[m].reshape([64])[zigzagOrder].astype(int)
# for m in range (pixels):
# encodedStr = rle(rMat1[m])
# f = open("z.txt", "a")
# f.write(encodedStr)
# for m in range(pixels):
# bMat1[m] = bMat1[m].reshape(8,8)
# for m in range(pixels):
# for u in range (8):
# for v in range (8):
# r = 0
# for i,j in Range_list:
# r += bMat1[m][i][j] * Cos_table[i][u] * Cos_table[j][v]
# if u == 0: r *= Root2_inv
# if v == 0: r *= Root2_inv
# bMat1[m][u][v] = r*1/4
# for m in range(pixels):
# bMat1[m] = numpy.rint(bMat1[m]/Quant_50)
# bMat1[m] = bMat1[m].reshape([64])[zigzagOrder].astype(int)
# for m in range (pixels):
# encodedStr = rle(rMat1[m])
# f = open("z.txt", "a")
# f.write(encodedStr)
def FindFrequency(input):
inputs = [2, 3, 5, 2, 6, 8, 5, 4, 2, 4, 9]
fl = dict()
f = open(input, 'r')
for x in f.read().split():
if x not in fl:
fl[x] = 1
else:
fl[x] +=1
return fl, f.read().split()
def createTree():
frequency, inputs = FindFrequency('z.txt')
frequency = sorted(frequency.items(), key=lambda x: x[1], reverse=True)
huffman = Huffman(frequency)
nodes = huffman.sort()
huffmanCode = huffman.huffman_code_tree(nodes[0][0])
huffman.printCode(inputs, huffmanCode)
def main():
inputFile = "input.jpg"
createTree()
if __name__ == "__main__":
main() |
import os
import argparse
from flask import request
from flask_api import FlaskAPI, status, exceptions
from werkzeug.utils import secure_filename
import io
import numpy as np
from PIL import Image
import cv2
from datetime import datetime
import re
import math
import apriltag
from flask_cors import CORS
from logzero import logger
import boto3
DB_CLUSTER = "database320"
DB_NAME = "db320"
ARN = "arn:aws:rds:us-east-2:007372221023:cluster:database320"
SECRET_ARN = "arn:aws:secretsmanager:us-east-2:007372221023:secret:rds-db-credentials/cluster-BZEL6PSDLGVBVJB6BIDZGZQ4MI/admin320-fsoCse"
REGION_NAME = "us-east-2"
IMG_FORMAT = ".jpg" # changing this is not handled very gracefully at the moment, probably
UPLOAD_FOLDER = "/temp/uploads"
ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg"}
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def create_app(config=None):
app = FlaskAPI(__name__)
app.config.update(dict(DEBUG=False))
app.config.update(config or {})
CORS(app)
@app.route("/tree", methods=["POST"])
def get_num_clusters():
logger.warning("POST /tree")
image = request.files["image"]
return "", status.HTTP_501_NOT_IMPLEMENTED
@app.route("/cluster", methods=["POST"])
@app.route("/cluster/<int:cluster_num>", methods=["POST"])
def label_apples(cluster_num=None):
logger.info("POST /cluster/{}".format(cluster_num if cluster_num is not None else ""))
if "cluster_img" not in request.files:
logger.error("missing_cluster_img")
return ret(error_message="missing_cluster_img"), status.HTTP_400_BAD_REQUEST
input_image = request.files["cluster_img"]
if input_image and allowed_file(input_image.filename):
filename = secure_filename(input_image.filename)
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
filename = os.path.join(UPLOAD_FOLDER, filename)
input_image.save(filename)
else:
logger.error("invalid_cluster_img")
return ret(error_message="invalid_cluster_img"), status.HTTP_400_BAD_REQUEST
# input_image = np.fromstring(input_image.read(), np.uint8)
# decode image
input_image = cv2.imread(filename)
os.remove(filename)
# input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
s3 = boto3.client("s3", region_name=REGION_NAME)
# STEP 1: Check if cluster_num is valid
if cluster_num is not None:
if not is_valid_cluster_num(cluster_num):
logger.error("invalid_cluster_img")
return ret(error_message="invalid_cluster_num"), status.HTTP_400_BAD_REQUEST
# STEP 2: ALIGNMENT CHECK
# get most recent img from /clusters/cluster_num
most_recent_image = get_last_picture(s3, cluster_num)
if most_recent_image is not None:
aligned = check_alignment(input_image, most_recent_image)
else:
# TODO: check for good tag positioning
aligned = 1
if aligned == -1:
logger.error("error, tag not present in input img")
return ret(error_message="no_tag"), status.HTTP_400_BAD_REQUEST
elif aligned == 0:
logger.error("input image not aligned")
return ret(error_message="not_aligned"), status.HTTP_400_BAD_REQUEST
else:
logger.info("successfully aligned")
else:
tag = detect_tag(input_image)
if not tag: # just check if the tag is there
logger.error("error, tag not present in input img")
return ret(error_message="no_tag"), status.HTTP_400_BAD_REQUEST
# rds = boto3.client("rds-data", region_name=REGION_NAME)
# cluster_ids = rds.execute_statement(
# secretArn=SECRET_ARN,
# database=DB_NAME,
# resourceArn=ARN,
# sql="SELECT cluster_id FROM Cluster",
# )
# print(cluster_ids)
# TODO: Do this as above, via database
# This is really gross and inefficient, and I apologize. See above comment.
existing_clusters = list(
get_matching_s3_objects(s3, "orchardwatchphotos", prefix="clusters")
)
if existing_clusters:
get_cluster_id = lambda key: int(re.findall("clusters/(\d*)/", key)[0])
highest_cluster = sorted(existing_clusters, key=lambda o: get_cluster_id(o["Key"]))[-1]
highest_cluster_id = get_cluster_id(highest_cluster["Key"])
else:
highest_cluster_id = 0
cluster_num = int(highest_cluster_id) + 1 # No race conditions here, no sir.
# STEP 3: if alignment check result == 1: name picture to 'cluster_num_date_time'
date = datetime.date(datetime.now())
time = datetime.time(datetime.now())
key = str(date) + "_" + str(time) + IMG_FORMAT
# STEP 4: send to S3 to be stored in /clusters/cluster_num
store_in_s3(s3, input_image, cluster_num, key)
# TODO: Measure the apple, and appropriately store the data in DB
# Get the measurements for the apple
# measurements = measure_image(input_image, most_recent_image)
# num_apples = len(measurements)
# Instantiate an rds
# rds = boto3.client("rds-data", region_name=REGION_NAME)
# Create a ClusterImage record
# time = time[:8]
# time_stamp = str(date) + " " + str(time)
# file_url = key
# sql_parameters = [
# {'name':'cluster_id', 'value':{'varchar': str(cluster_num)}},
# {'name':'time_stamp', 'value':{'timestamp': time_stamp}},
# {'name':'file_url', 'value':{'varchar': file_url}},
# ]
# rds.execute_statement(
# secretArn=SECRET_ARN,
# database=DB_NAME,
# resourceArn=ARN,
# sql="INSERT INTO ClusterImage (cluster_id, time_stamp, file_url) VALUES (:cluster_id, :time_stamp, :file_url)",
# parameters=sql_parameters
# )
# Get cluster_image_id
# sql_parameters = [
# {'name':'cluster_id', 'value':{'varchar': str(cluster_num)}},
# {'name':'time_stamp', 'value':{'timestamp': time_stamp}},
# ]
# cluster_image_id = int(rds.execute_statement(
# secretArn=SECRET_ARN,
# database=DB_NAME,
# resourceArn=ARN,
# sql="SELECT cluster_image_id FROM ClusterImage WHERE cluster_id = :cluster_id AND time_stamp=:time_stamp",
# parameters=sql_parameters
# ))
# Create a ClusterDataPoint record
# TODO: Figure out how to handle the rest of the data in the schema that is not provided during image upload
# TODO: Get all assumptions cleared up
# Create a FruitDataPoint per fruitlet
# Assume fruit_id to be the index of the measurement
# Assume model_id to be 0
# stem_color = 'green'
# for fruit_id in range(num_apples):
# sql_parameters = [
# {'name':'fruit_id', 'value':{'varchar': str(cluster_num)}},
# {'name':'cluster_image_id', 'value':{'cluster_image_id': cluster_image_id}},
# {'name':'model_id', 'value':{'model_id': 0}},
# {'name':'time_stamp', 'value':{'timestamp': time_stamp}},
# {'name':'measurement', 'value':{'measurement': measurements[fruit_id]}},
# {'name':'stem_color', 'value':{'stem_color': stem_color}},
# ]
# rds.execute_statement(
# secretArn=SECRET_ARN,
# database=DB_NAME,
# resourceArn=ARN,
# sql="INSERT INTO FruitDataPoint (fruit_id, cluster_image_id, model_id, time_stamp, measurement, stem_color) VALUES (:fruit_id, :cluster_image_id, :model_id, :time_stamp, :measurement, :stem_color)",
# parameters=sql_parameters
# )
logger.info("Success!")
return ret(cluster_num=cluster_num), status.HTTP_201_CREATED if cluster_num is None else status.HTTP_200_OK
# technically this can be consolidated into label_apples, but
# I put it separately for readability
@app.route("/cluster/<int:cluster_num>", methods=["GET"])
def get_cluster_data(cluster_num):
logger.info("GET /cluster/{}".format(cluster_num))
# well, get the data.
return "", status.HTTP_501_NOT_IMPLEMENTED
@app.route("/", methods=["GET"])
def hello():
return "Hi from the server!", status.HTTP_200_OK
return app
def ret(error_message=None, **kwargs):
"""
Make return JSON object
:param error_message: sets "error" field to given message string
:param kwargs: fields to set on the return JSON
"""
r = {}
if error_message is not None:
r["error"] = error_message
r.update(kwargs)
return r
def measure_image(input_image, most_recent_image):
# Returns: list of doubles corresponding to relative growth rate per apple
return dummy_measurement()
def dummy_measurement():
return [5.2, 3.1, 2.5]
def is_valid_cluster_num(cluster_num):
N_VALID_CLUSTERS = 10000
# checks input to see if cluster_num is valid
return isinstance(cluster_num, int) and 0 < cluster_num <= N_VALID_CLUSTERS
def make_s3_cluster_name(cluster_num):
bucket_name = "orchardwatchphotos"
folder_key = "clusters/{}".format(cluster_num)
return bucket_name, folder_key
def make_s3_datapoint_name(cluster_num, subkey):
bucket_name, folder_key = make_s3_cluster_name(cluster_num)
folder_key += "/" + str(subkey)
return bucket_name, folder_key
def get_last_picture(s3, cluster_num):
bucket_name, folder_key = make_s3_cluster_name(cluster_num)
cluster_photos = list(get_matching_s3_objects(s3, bucket_name, prefix=folder_key))
if not cluster_photos:
return None
s = boto3.resource("s3")
latest = sorted(cluster_photos, key=lambda o: o["Key"])[-1]
data = s.Object(bucket_name, latest["Key"]).get()["Body"].read()
img = Image.open(io.BytesIO(data))
img = np.asarray(img)
# buffer = BytesIO()
# s3.download_fileobj(bucket_name, key, buffer)
# buffer.seek(0)
return img
def store_in_s3(s3, image, cluster_num, subkey):
# store image in correct folder in s3
bucket_name, key = make_s3_datapoint_name(cluster_num, subkey)
bin_img = io.BytesIO(cv2.imencode(IMG_FORMAT, image)[1].tobytes())
s3.upload_fileobj(bin_img, bucket_name, key)
def compute_homography_distance(m1, m2):
diffs = []
result = 0
for i in range(len(m1)):
for j in range(len(m1[i])):
diffs.append(m1[i][j] - m2[i][j])
for d in diffs:
result = result + math.pow(d, 2)
result = math.sqrt(result)
return result
def detect_tag(image):
img_bw = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # convert to grayscale for apriltag library
(thresh, img_bw) = cv2.threshold(
img_bw, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU
) # threshold
detector = apriltag.Detector()
tag_info = detector.detect(img_bw)
return tag_info
# Params: l1 and l2 are color image matrices
# Returns: 1 if aligned, 0 otherwise, -1 on error
def check_alignment(l1, l2):
# Threshold for alignment
# VVV MAKE THIS NUMBER LARGER IF YOU NEED TO FAKE IT FOR THE DEMO
sim_thresh = 1
# # Read in image (l1 and l2 will most likely be paths leading to images loaded
# # application and S3 bucket)
# img1 = cv2.imread(l1, cv2.IMREAD_COLOR)
# img2 = cv2.imread(l2, cv2.IMREAD_COLOR)
# Convert to RGB
# img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
# img2 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img1 = l1
img2 = l2
r_1 = detect_tag(img1)
r_2 = detect_tag(img2)
# Ensure an AprilTag can be detected
if not r_1 or not r_2:
return -1
# Check similarity by checking threshold
metric = compute_homography_distance(r_1[0].homography, r_2[0].homography)
if metric <= sim_thresh:
return 1
else:
return 0
def get_matching_s3_objects(
s3, bucket, prefix="", suffix="", max_keys_per_request=100,
):
"""
List objects in an S3 bucket.
:param s3: boto.client("s3") client
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch objects whose key starts with
this prefix (optional).
:param suffix: Only fetch objects whose keys end with
this suffix (optional).
:param max_keys_per_request: number of objects to list down
"""
kwargs = {"Bucket": bucket}
# If the prefix is a single string (not a tuple of strings), we can
# do the filtering directly in the S3 API.
if isinstance(prefix, str):
kwargs["Prefix"] = prefix
else:
kwargs["Prefix"] = str(prefix)
kwargs["MaxKeys"] = max_keys_per_request
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
try:
contents = resp["Contents"]
except KeyError:
return
for obj in contents:
key = obj["Key"]
if key.startswith(prefix) and key.endswith(suffix):
yield obj
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (when this field is missing).
try:
kwargs["ContinuationToken"] = resp["NextContinuationToken"]
except KeyError:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", action="store", default="8000")
args = parser.parse_args()
port = int(args.port)
app = create_app()
app.run(host="0.0.0.0", port=port)
|
from typing import Dict
import mysql.connector
import json
from .goods import shop_list, Goods, shop_name_dict_getter
from typing import Type
from nonebot.adapters import Bot
from nonebot.adapters.cqhttp import GroupMessageEvent
mysql_connect_config = {
'user': 'root',
'password': '',
'host': '127.0.0.1',
'database': 'calenderbot',
}
create_user = "INSERT INTO backpack_table (qq_num, backpack) VALUES (%s, %s)"
query_user = "SELECT qq_num, backpack FROM backpack_table WHERE qq_num = %s"
update_user = "UPDATE backpack_table SET backpack = %s WHERE qq_num = %s"
shop_name_dict = shop_name_dict_getter()
class User:
def __init__(self, qq_num: str , backpack: Dict[Type[Goods], int] = {}):
self.qq_num : str = qq_num
self.backpack : Dict[Type[Goods], int] = backpack
@classmethod
def get_user(cls, qq_num: str):
cnx = mysql.connector.connect(**mysql_connect_config)
cursor = cnx.cursor()
cursor.execute(query_user, (qq_num,))
res = cursor.fetchone()
cursor.close()
cnx.close()
if not res:
return None
else:
return cls(qq_num, cls.backpack_deserializer(res[1]))
@classmethod
def create_user(cls, qq_num: str):
cnx = mysql.connector.connect(**mysql_connect_config)
cursor = cnx.cursor()
cursor.execute(create_user, (qq_num, cls(qq_num).backpack_serializer()))
cnx.commit()
cursor.close()
cnx.close()
@classmethod
def get_or_create_user(cls, qq_num: str):
if (res := cls.get_user(qq_num)) is None:
cls.create_user(qq_num)
return cls.get_user(qq_num)
else:
return res
async def use_item(self, item: Type[Goods], bot: Bot, event: GroupMessageEvent, param: str):
if item not in self.backpack:
return False
elif self.backpack[item] == 1:
self.backpack.pop(item)
else:
self.backpack[item] -= 1
if not await item().use(bot, event, param):
self.add_item(item)
cnx = mysql.connector.connect(**mysql_connect_config)
cursor = cnx.cursor()
cursor.execute(update_user, (self.backpack_serializer(), self.qq_num))
cnx.commit()
cursor.close()
cnx.close()
return True
def add_item(self, item: Type[Goods]):
if item not in self.backpack:
self.backpack[item] = 1
else:
self.backpack[item] += 1
cnx = mysql.connector.connect(**mysql_connect_config)
cursor = cnx.cursor()
cursor.execute(update_user, (self.backpack_serializer(), self.qq_num))
cnx.commit()
cursor.close()
cnx.close()
def get_desc(self):
msg = f"这是用户{self.qq_num}的背包信息:\n"
msg += '\n'.join(map(lambda x: f"{x[0].name}:{x[1]}件", self.backpack.items()))
return msg
def backpack_to_dict(self):
return dict(map(lambda x: (x[0].name, x[1]) ,self.backpack.items()))
def backpack_serializer(self):
return json.dumps(self.backpack_to_dict())
@classmethod
def backpack_deserializer(cls, data: str):
obj = json.loads(data)
obj = dict(map(lambda x: (shop_name_dict[x[0]], x[1]), obj.items()))
return obj |
# Generated by Django 2.2.6 on 2019-12-28 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_userprofile_qq'),
]
operations = [
migrations.AlterModelOptions(
name='employmentdetail',
options={'verbose_name': '工作详情', 'verbose_name_plural': '工作详情'},
),
migrations.AlterField(
model_name='borrower',
name='dwelling_condition',
field=models.CharField(blank=True, choices=[('1', '商品房'), ('2', '经济适用房'), ('3', '自建私有房'), ('4', '租赁房'), ('5', '单位福利分房'), ('6', '学生宿舍')], default='1', max_length=50, null=True, verbose_name='住宅状况'),
),
migrations.AlterField(
model_name='borrower',
name='highest_qualification',
field=models.CharField(blank=True, choices=[('NONE', '无'), ('college', '大专'), ('bachelor', '本科'), ('master', '硕士'), ('doctor', '博士')], default='NONE', max_length=50, null=True, verbose_name='教育层次'),
),
migrations.AlterField(
model_name='borrower',
name='marriage_state',
field=models.CharField(blank=True, choices=[('1', '未婚'), ('2', '已婚'), ('3', '丧偶'), ('4', '离婚')], default='1', max_length=50, null=True, verbose_name='婚姻状况'),
),
migrations.AlterField(
model_name='employmentdetail',
name='company_address',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='公司地址'),
),
migrations.AlterField(
model_name='employmentdetail',
name='company_name',
field=models.CharField(blank=True, max_length=40, null=True, verbose_name='单位名称'),
),
migrations.AlterField(
model_name='employmentdetail',
name='company_type',
field=models.CharField(blank=True, choices=[('1', '国家行政企业'), ('2', '公私合作企业'), ('3', '中外合资企业'), ('4', '社会组织机构'), ('5', '国际组织机构'), ('6', '外资企业'), ('7', '私营企业'), ('8', '集体企业'), ('9', '国防军事企业')], default='无', max_length=30, null=True, verbose_name='公司类型'),
),
migrations.AlterField(
model_name='employmentdetail',
name='employment_state',
field=models.CharField(blank=True, choices=[('employment', '已就业'), ('unemployment', '待业')], default='无', max_length=30, null=True, verbose_name='就业状态'),
),
migrations.AlterField(
model_name='employmentdetail',
name='receive_wage',
field=models.CharField(blank=True, choices=[('bank', '银行'), ('cash', '现金')], default='无', max_length=20, null=True, verbose_name='获得收入的方式'),
),
migrations.AlterField(
model_name='employmentdetail',
name='working_department',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='任职部门'),
),
migrations.AlterField(
model_name='employmentdetail',
name='working_life',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='工作年限'),
),
]
|
import os
from PIL import Image
import numpy as np
import cv2
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = "/home/anup/Pictures/StudentFaces"
face_cascade = cv2.CascadeClassifier(
'/home/anup/PycharmProjects/Imagemodulator/venv/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
# print(image_dir)
current_id = 0
label_id = {}
x_train = []
y_labels = []
for root, dirs, files in os.walk(image_dir):
# print("Root {}" .format(root))
# print("Dirs {}" .format(dirs))
# print("Files {}" .format(files))
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
# print(path)
labels = os.path.basename(file.replace(" ", "-").lower())
id = os.path.basename(root)
# print(labels)
# print(id)
dot = labels.find('.')
name = ""
for x in range(dot):
if labels[x].isalpha():
name += labels[x]
# print(name)
if not name in label_id:
label_id[name] = int(id)
print(label_id)
id_ = label_id[name]
# print("id_ {}".format(id_))
# print("id_type {}".format(type(id_)))
# print("Label IDS {}".format(label_id))
# print(name, path)
# # x_train.append(path)
# # y_labels.append(name)
pil_image = Image.open(path).convert("L")
size = (450, 450)
final_image = pil_image.resize(size, Image.ANTIALIAS)
image_array = np.array(final_image, "uint8")
# # print(image_array)
faces = face_cascade.detectMultiScale(image_array, 1.3, 5)
for x, y, w, h in faces:
roi = image_array[y:y + h, x:x + w]
x_train.append(roi)
y_labels.append(int(id_))
#
# print(" y_labels : {}".format(y_labels))
# print(x_train)
#
with open("labels.pickle", "wb") as f:
pickle.dump(label_id, f)
#
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainer.yml")
|
class BelajarClass:
i = 12345
def f(self):
return 'hello World'
# syntak
# class NamaKelas:
# pass # gantikan dengan pernyataan-pernyataan, misal: atribut atau metode |
import scrapy
import re
class EntertainmentSpider(scrapy.Spider):
name = "ent"
start_urls = (
'http://www.onlinekhabar.com/content/ent-news/page/%s' % page for page in xrange(1, 2)
)
def parse(self, response):
for link in response.css('a::attr(href)').extract():
self.log('Link_input %s' % link)
# match_pattern=re.match("http:\/\/www.onlinekhabar.com\/2017\/\d", link)
match_pattern=re.match("http:\/\/www.onlinekhabar.com\/2017\/.*[0-9]/$", link)
if match_pattern is not None:
# content_div = link.css('div.ok_single_content')
link = response.urljoin(link)
self.log('Link_input %s' % link)
yield scrapy.Request(link, callback=self.parse_page)
def parse_page(self, response):
# self.log('Herere %s' % response)
for page in response.css('div.ok-single-content'):
for paragraph in page.css('p::text').extract():
yield {
'content': paragraph
}
|
import math
l = math.log
res = []
fact = 0.0
pow = 0.0
j=1
for i in range(2,1000001):
while 1:
j += 1
fact += l(j)
pow = j*l(i)
if fact > pow:
res.append(j)
break
t = int(input())
while t>0:
t -= 1
a = int(input())
print (res[a-2]) |
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
module = bld.create_ns3_module('full', ['network', 'propagation','network', 'internet', 'applications'])
module.source = [
'model/full-wifi-information-element.cc',
'model/full-wifi-information-element-vector.cc',
'model/full-wifi-channel.cc',
'model/full-wifi-mode.cc',
'model/full-ssid.cc',
'model/full-wifi-phy.cc',
'model/full-wifi-phy-state-helper.cc',
'model/full-error-rate-model.cc',
'model/full-yans-error-rate-model.cc',
'model/full-nist-error-rate-model.cc',
'model/full-dsss-error-rate-model.cc',
'model/full-interference-helper.cc',
'model/full-yans-wifi-phy.cc',
'model/full-yans-wifi-channel.cc',
'model/full-wifi-mac-header.cc',
'model/full-wifi-mac-trailer.cc',
'model/full-mac-low.cc',
'model/full-wifi-mac-queue.cc',
'model/full-mac-tx-middle.cc',
'model/full-mac-rx-middle.cc',
'model/full-dca-txop.cc',
'model/full-supported-rates.cc',
'model/full-capability-information.cc',
'model/full-status-code.cc',
'model/full-mgt-headers.cc',
'model/full-random-stream.cc',
'model/full-dcf-manager.cc',
'model/full-wifi-mac.cc',
'model/full-regular-wifi-mac.cc',
'model/full-wifi-remote-station-manager.cc',
'model/full-ap-wifi-mac.cc',
'model/full-sta-wifi-mac.cc',
'model/full-adhoc-wifi-mac.cc',
'model/full-wifi-net-device.cc',
'model/full-arf-wifi-manager.cc',
'model/full-aarf-wifi-manager.cc',
'model/full-ideal-wifi-manager.cc',
'model/full-constant-rate-wifi-manager.cc',
'model/full-amrr-wifi-manager.cc',
'model/full-onoe-wifi-manager.cc',
'model/full-rraa-wifi-manager.cc',
'model/full-aarfcd-wifi-manager.cc',
'model/full-cara-wifi-manager.cc',
'model/full-minstrel-wifi-manager.cc',
'model/full-qos-tag.cc',
'model/full-qos-utils.cc',
'model/full-edca-txop-n.cc',
'model/full-msdu-aggregator.cc',
'model/full-amsdu-subframe-header.cc',
'model/full-msdu-standard-aggregator.cc',
'model/full-originator-block-ack-agreement.cc',
'model/full-dcf.cc',
'model/full-ctrl-headers.cc',
'model/full-qos-blocked-destinations.cc',
'model/full-block-ack-agreement.cc',
'model/full-block-ack-manager.cc',
'model/full-block-ack-cache.cc',
'helper/full-athstats-helper.cc',
'helper/full-wifi-helper.cc',
'helper/full-yans-wifi-helper.cc',
'helper/full-nqos-wifi-mac-helper.cc',
'helper/full-qos-wifi-mac-helper.cc',
'helper/full-duplex-library.cc',
]
module_test = bld.create_ns3_module_test_library('full')
module_test.source = [
'test/full-block-ack-test-suite.cc',
'test/full-dcf-manager-test.cc',
'test/full-tx-duration-test.cc',
'test/full-wifi-test.cc',
]
# headers = bld.new_task_gen(features=['ns3header'])
headers = bld(features='ns3header')
headers.module = 'full'
headers.source = [
'model/full-wifi-information-element.h',
'model/full-wifi-information-element-vector.h',
'model/full-wifi-net-device.h',
'model/full-wifi-channel.h',
'model/full-wifi-mode.h',
'model/full-ssid.h',
'model/full-wifi-preamble.h',
'model/full-wifi-phy-standard.h',
'model/full-yans-wifi-phy.h',
'model/full-yans-wifi-channel.h',
'model/full-wifi-phy.h',
'model/full-interference-helper.h',
'model/full-wifi-remote-station-manager.h',
'model/full-ap-wifi-mac.h',
'model/full-sta-wifi-mac.h',
'model/full-adhoc-wifi-mac.h',
'model/full-arf-wifi-manager.h',
'model/full-aarf-wifi-manager.h',
'model/full-ideal-wifi-manager.h',
'model/full-constant-rate-wifi-manager.h',
'model/full-amrr-wifi-manager.h',
'model/full-onoe-wifi-manager.h',
'model/full-rraa-wifi-manager.h',
'model/full-aarfcd-wifi-manager.h',
'model/full-cara-wifi-manager.h',
'model/full-minstrel-wifi-manager.h',
'model/full-wifi-mac.h',
'model/full-regular-wifi-mac.h',
'model/full-wifi-phy.h',
'model/full-supported-rates.h',
'model/full-error-rate-model.h',
'model/full-yans-error-rate-model.h',
'model/full-nist-error-rate-model.h',
'model/full-dsss-error-rate-model.h',
'model/full-wifi-mac-queue.h',
'model/full-dca-txop.h',
'model/full-wifi-mac-header.h',
'model/full-qos-utils.h',
'model/full-edca-txop-n.h',
'model/full-msdu-aggregator.h',
'model/full-amsdu-subframe-header.h',
'model/full-qos-tag.h',
'model/full-mgt-headers.h',
'model/full-status-code.h',
'model/full-capability-information.h',
'model/full-dcf-manager.h',
'model/full-mac-rx-middle.h',
'model/full-mac-low.h',
'model/full-originator-block-ack-agreement.h',
'model/full-dcf.h',
'model/full-ctrl-headers.h',
'model/full-block-ack-agreement.h',
'model/full-block-ack-manager.h',
'model/full-block-ack-cache.h',
'helper/full-athstats-helper.h',
'helper/full-wifi-helper.h',
'helper/full-yans-wifi-helper.h',
'helper/full-nqos-wifi-mac-helper.h',
'helper/full-qos-wifi-mac-helper.h',
'helper/full-duplex-library.h',
]
if bld.env['ENABLE_GSL']:
module.use.extend(['GSL', 'GSLCBLAS', 'M'])
module_test.use.extend(['GSL', 'GSLCBLAS', 'M'])
# if (bld.env['ENABLE_EXAMPLES']):
# bld.add_subdirs('examples')
if bld.env.ENABLE_EXAMPLES:bld.recurse('examples')
# bld.ns3_python_bindings()
|
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
if len(flowerbed)==1:
if flowerbed[0]==0 and n<=1:
return True
elif n==0:
return True
else:
return False
res=0
mark_1=0
temp = 0
for i in range(len(flowerbed)):
if mark_1==0:
if flowerbed[i]==1:
mark_1=1
if temp>=2:
x = temp//2
res+=x
temp = 0
else:
temp +=1
else:
if flowerbed[i]==1:
if temp>=3:
x = (temp-1)//2
res+=x
temp = 0
else:
temp +=1
if mark_1==0:
if temp>=1:
x = (temp+1)//2
res+=x
else:
if temp>=2:
x = temp//2
res+=x
print(res)
if n>res:
return False
else:
return True
def canPlaceFlowers(self, A, n):
count = 0
A = [0] + A + [0]
for i in range(1, len(A)-1):
if A[i]==1:
continue
if A[i - 1] != 1 and A[i + 1] != 1:
A[i] = 1
count += 1
return count >= n
flowerbed = [1,0,0,0,1,0,0]
n = 2
s=Solution()
print(s.canPlaceFlowers(flowerbed,n)) |
#!/usr/bin/env python
"""
Network analysis script
Parameters:
path: str <path-to-folder>
Usage:
network_smkk.py --path <path-to-folder>
Example:
$ python network_smkk.py --path data/labelled_data
"""
# to call path from command line
import os
from pathlib import Path
import argparse
# System tools
import os
# Data analysis
import pandas as pd
from collections import Counter
from itertools import combinations
from tqdm import tqdm
# NLP
import spacy
nlp = spacy.load("en_core_web_sm")
# drawing
import networkx as nx
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,20)
# unzip the zipfile with the images
# define path to the zip file
zip_path = os.path.join("..", "assignment_4", "data")
# set working directory to the zip path
os.chdir(zip_path)
print(zip_path)
# unzip the zipfile
!unzip 'fake_or_real_news.zip'
def main():
### Initial stuff with pathes ###
# Initialise ArgumentParser class
ap = argparse.ArgumentParser()
# CLI parameters
ap.add_argument("-i", "--path", required=True, help="Path to data folder")
ap.add_argument("-o", "--outfile", required=True, help="Output filename")
# Parse arguments
args = vars(ap.parse_args())
# Output filename
out_file_name = args["network"]
# Create directory called "viz" for the visualisation, if it doesn't exist
if not os.path.exists("viz"):
os.mkdir("viz")
# Output filepath
out_image = os.path.join("viz", out_file_name, ".png")
# Create directory called "output" for the csv, if it doesn't exist
if not os.path.exists("output"):
os.mkdir("output")
# Output filepath
out_file = os.path.join("output", out_file_name, ".csv")
# Create column headers
column_headers = "degree,betweenness,eigenvector_centrality"
# Write column headers to file
with open(out_file, "a", encoding="utf-8") as headers:
# add newling after string
headers.write(column_headers + "\n")
# Create explicit filepath variable
filepath = Path(args["path"])
# get the file
input_file = os.path.join(filepath, "fake_or_real_news.csv")
# read
data = pd.read_csv(input_file)
# make into dataframe
real_df = data[data["label"]=="REAL"]["text"]
### Now for the network analysis ###
# create empty list
text_entities = []
for text in tqdm(real_df):
# create temporary list
tmp_entities = []
# create doc object
doc = nlp(text)
# for every named entity
for entity in doc.ents:
# if that entity is a person
if entity.label_ == "PERSON":
# append to temp list
tmp_entities.append(entity.text)
# append temp list to main list
text_entities.append(tmp_entities)
# create empty list
edgelist = []
# iterate over every document
for text in text_entities:
# use itertools.combinations() to create edgelist
edges = list(combinations(text, 2))
# for each combination - i.e. each pair of 'nodes'
for edge in edges:
# append this to final edgelist
edgelist.append(tuple(sorted(edge)))
# create empty list
counted_edges = []
for key, value in Counter(edgelist).items():
source = key[0]
target = key[1]
weight = value
counted_edges.append((source, target, weight))
edges_df = pd.DataFrame(counted_edges, columns=["nodeA", "nodeB", "weight"])
filtered = edges_df[edges_df["weight"]>500]
G=nx.from_pandas_edgelist(filtered, 'nodeA', 'nodeB', ["weight"])
# Plot it
pos = nx.nx_agraph.graphviz_layout(G, prog="neato")
# draw
nx.draw(G, pos, with_labels = True, node_size = 20, font_size = 10)
# save using matplotlib
outpath_viz = os.path.join(outfile, 'network.png')
plt.savefig(outpath_viz, dpi = 300, bbox_inches = "tight")
#### SOMETHING GOES WRONG HERE WITH THE "WEIGHT" AND i CAN'T FIGURE OUT WHAT
# make dataframe
ev = nx.eigenvector_centrality(G)
bc = nx.betweenness_centrality(G)
pd.DataFrame(ev.items()).sort_values("weight", ascending=False)
pd.DataFrame(bc.items()).sort_values("weight", ascending=False)
# save the DataFrame panda as a csv file
DataFrame.to_csv(out_file)
# Define behaviour when called from command line
if __name__=="__main__":
main()
|
#chapter01-02
#파이썬 중급
#객체 지향 프로그래밍(OOP) --> 코드의 재사용, 코드중복 방지
#클래스 변수 심화 (final static ...)
#클래스 선언
class Car(object):
"""
author : taewon
date : 2020.01.15
comment : example
"""
#자동차의 개수
car_count = 0
클래스변수=5
def __init__(self, car_name, car_detail):
self.car_name = car_name
self.car_detail = car_detail
Car.car_count += 1
def __str__(self):
return 'Str: {} - {} - {}'.format(id(self), self.car_name, self.car_detail)
def __repr__(self):
return 'Repr: {} - {} - {}'.format(id(self), self.car_name, self.car_detail)
def price_info(self):
return '{} - {}'.format(self.car_name, self.car_detail.get('price'))
car1 = Car('BMW', {'horsepower': 600, 'color':'red', 'price':4000})
car2 = Car('Ferrari', {'horsepower': 400, 'color':'black', 'price':1000})
car3 = Car('Audi', {'horsepower': 800, 'color':'white', 'price':7000})
#id값 비교
print(id(car1), car1)
#메소드 호출(self)
print(car2.price_info())
#메소드 호출(class)
print(Car.price_info(car2))
#클래스 변수 출력
print(Car.car_count)
#인스턴스로 클래스 변수 접근 출력
print(car1.car_count)
car1.car_count = 10
#네임스페이스 메소드
print(car1.__dict__)
car1.car_count = 1000
print(car1.car_count*10)
# 인스턴스 네임스페이스를 먼저 검색후 없으면 클래스 변수를 출력
print(car1.클래스변수)
print(Car.클래스변수)
print(Car.__dict__) |
import requests
import urllib.parse
import aiohttp
import asyncio
import json
def get_request(link='', params=None,header=None):
"""
Asynchronous and parallel request to api link
Parameters:
link : api link
params : additional parameters to url
header : header to api
Returns:
res : response data and status code """
if params:
url=link + urllib.parse.urlencode(params)
else:
url=link
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(get(url))]
loop.run_until_complete(asyncio.wait(tasks))
for task in tasks :
res= task.result()
print("Request for {} is {}".format(url, res[1]))
print(res[0])
return res[0],res[1]
async def get(url=''):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
return await response.json(), response.status
|
# endcoding:utf-8
import sqlite3
import os
import json
import time
source_config = {
'type': 'design_pattern'
}
target_config = {
'file_path': 'pattern',
}
# types :对应的数据库的表
# kind :文件目录名称
# types = ['java_basic', 'design_pattern', 'java_advance', 'database', 'arithmetic', 'framework', 'java_ee', 'java_web']
# kinds = ['basic', 'pattern', 'advance', 'database', 'arithmetic', 'framework', 'java_ee', 'java_web']
# program_language = 'java-lang'
# Android
kinds = types = ['android_advance',
'android_basic',
'android_component',
'android_datastorage',
'android_device',
'android_games',
'android_interview',
'android_multimedia',
'android_network',
'android_source',
'android_userinterface']
# kinds = ['advance', 'basic']
program_language = 'android-tmp'
def get_lang_root_dir():
return os.path.join(os.path.split(os.getcwd())[0], program_language)
def remove_all_file():
"""
remove_all_file with `kind`
"""
for kind in kinds:
dirtory = os.path.join(os.path.split(os.getcwd())[0], program_language, kind)
print "on dirtory -->", dirtory
os.system('rm -rf %s' % dirtory)
os.system('mkdir %s ' % dirtory)
print "remove finish"
def get_num(num):
"""
1 -> 0001
12 -> 0012
123 -> 0123
1234 -> 01234
"""
return '0' * (4 - len(str(num))) + str(num)
def gerate_filename(index, title, kind, subkind):
"""
生成目标文件md的名称(路径) 如:~[program_language]/[kind]/[subkind]/[00xx]-[title].md
"""
path = os.path.join(os.path.split(os.getcwd())[0], program_language, kind, subkind,
get_num(index) + '-' + title + '.md')
return path
def gerate_file(index, title, content, kind, subKind):
"""
生成目标文件(.md)
"""
# 文件名不允许/
if '/' in title:
title = title.replace('/', ',')
filename = gerate_filename(index, title, kind, subKind)
print 'prepare to gerate_file -->', filename
with open(filename, mode='w') as f:
f.write(str(content))
print 'gerate_file ok -->', filename
def create_subkind(kind, subkind):
"""
生成子目录 如:~[program_language]/[kind]/[subkind]
"""
root = os.path.split(os.getcwd())[0]
print root, program_language, kind, subkind
path = os.path.join(root, program_language, kind, subkind)
os.makedirs(path)
print path, "|||| build"
def create_kind_info(kind, result):
"""
last_motify:
result:
"""
json_string = json.dumps({'last_motify': time.time() * 100,
'result': result
})
path_name = os.path.join(get_lang_root_dir(), kind)
print "----------------------"
print "create info data in -->", path_name
with open(os.path.join(path_name, 'info.json'), mode='w') as f:
f.write(json_string)
def create_md():
con = sqlite3.connect('source.db')
cursor = con.cursor()
index = 0
for t in types:
subkinds = []
print 'select title,content,importance from ' + t
for row in cursor.execute('select title,content,importance from ' + t):
# print row[0], '--importance===', row[2],
if row[2] == 9:
i = 0
subkind = row[0]
create_subkind(kinds[index], row[0])
subkinds.append(row[0])
else:
print i, row[0], kinds[index], subkind
gerate_file(i, row[0], row[1].encode('utf-8'), kinds[index], subkind)
i += 1
#表遍历完毕
print subkinds
# create_kind_info(kinds[index], subkinds)
index += 1
con.close()
print "create_md finish"
def create_root_file():
root = os.path.join(os.path.split(os.getcwd())[0], program_language)
if not os.path.exists(root):
os.mkdir(root)
for k in kinds:
if not os.path.exists(os.path.join(root, k)):
os.mkdir(os.path.join(root, k))
if __name__ == '__main__':
create_root_file()
remove_all_file()
create_md()
|
from atcoder.dsu import DSU
L, Q = (int(x) for x in input().split())
dsu = DSU(L)
ops = []
cut = set()
for _ in range(Q):
c, x = (int(x) for x in input().split())
x -= 1
ops.append((c,x))
if c == 1:
cut.add(x)
for i in range(L-1):
if i not in cut:
dsu.merge(i, i+1)
ans = []
for c, x in ops[::-1]:
if c == 1:
dsu.merge(x, x+1)
else:
ans.append(dsu.size(x))
for a in ans[::-1]:
print(a)
|
# Write classes for the following class hierarchy:
#
# [Vehicle]->[FlightVehicle]->[Starship]
# | |
# v v
# [GroundVehicle] [Airplane]
# | |
# v v
# [Car] [Motorcycle]
#
# Each class can simply "pass" for its body. The exercise is about setting up
# the hierarchy.
#
# e.g.
#
# class Whatever:
# pass
#
# Put a comment noting which class is the base class
# base/parent class of all vehicles
class Vehicle:
pass
# child of Vehicle class
class FlightVehicle(Vehicle):
pass
#child of FlightVehicle class
class Starship(FlightVehicle):
pass
# child of FlightVehicle
class Airplane(FlightVehicle):
pass
#child of Vehicle class
class GroundVehicle(Vehicle):
pass
#child of Ground Vehicle
class Car(GroundVehicle):
pass
#child of Ground Vehicle
class Motorcycle(GroundVehicle):
pass |
from .base_repository import BaseRepository
from web_app.models import UserPost
class PostRepo(BaseRepository[UserPost]):
model = UserPost
|
from dataclasses import dataclass
class Error(Exception):
pass
@dataclass
class ConfigError(Error):
code = 10000
desc = "Config file error."
@dataclass
class InputError(Error):
code = 20000
desc = "Input invalid"
@dataclass
class ParameterError(Error):
code = 20001
desc = "Parameter type invalid."
@dataclass
class OverstepError(Error):
code = 40001
desc = "Your input is overstepped."
@dataclass
class ServerError(Error):
code = 50000
desc = "NLM Layer Server internal error."
@dataclass
class DatabaseError(Error):
code = 50001
desc = "Database internal error."
@dataclass
class QueryError(Error):
code = 50002
desc = "Qeury error, please check your input."
|
# Generated by Django 3.2.5 on 2021-07-10 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0008_auto_20210708_1557'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='answers',
),
migrations.AddField(
model_name='question',
name='answers',
field=models.ManyToManyField(related_name='answers', to='profiles.Answers'),
),
]
|
import numpy as np
import math
from scipy.integrate import simps
from sfepy.linalg import norm_l2_along_axis
import scipy.interpolate as si
class RadialVector(object):
@staticmethod
def from_xy(x, y):
return RadialVector(ExplicitRadialMesh(x), y)
@staticmethod
def from_file(file):
array = np.genfromtxt(file)
mesh = ExplicitRadialMesh(array[:, 0])
return [RadialVector(mesh, array[:, r]) for r in xrange(1,
array.shape[1])]
def __init__(self, mesh, values):
if isinstance(mesh, np.ndarray):
mesh = ExplicitRadialMesh(mesh)
self.mesh = mesh
self.values = values
self.interpolated = None
self.precision = None
self.resultPrecision = None
self.running = None
def to_file(self, filename=None):
if filename is None:
import sys
filename = sys.stdout
vector = np.vstack([self.mesh.get_coors(), self.values])
np.savetxt(filename, vector.T)
def running_mean(self):
if self.running is None:
weights = np.array((1.0, 3, 4, 3, 1))
weights = weights / weights.sum()
wsize = int(weights.size - 1)
data = np.hstack([np.ones(wsize) * self.values[0],
self.values, np.ones(wsize / 2)
* self.values[-1]])
self.running = np.convolve(data, weights)[wsize:-(wsize
/ 2) - wsize]
return self.running
def integrate(self, precision=0.0001):
return self.mesh.integrate(self)
def get_interpolated(self, precision=0.0001, grade=10):
if precision is None:
return si.InterpolatedUnivariateSpline(self.mesh.get_coors(),
self.values, k=5)
if self.interpolated is None or self.precision \
== self.resultPrecision and precision < self.precision:
self.precision = precision
data = self.runningMean()
while True:
self.interpolated = \
si.UnivariateSpline(self.mesh.get_coors(),
data, k=5, s=precision)
der = self.interpolated(self.mesh.get_coors(), 1)
sig = np.sign(der)
if np.abs(sig[1:] - sig[:-1]).sum() <= grade:
break
precision = precision * 2
self.resultPrecision = precision
a = self.interpolated
return a
def interpolated_values(self, at=None, precision=0.0001, grade=10):
if at is None:
at = self.mesh.get_coors()
return self.getInterpolated(precision, grade)(at)
def interpolated_derivatives(self, at=None, precision=0.0001):
if at is None:
at = self.mesh.get_coors()
return self.get_interpolated(precision)(at, 1)
def radial_derivatives(self):
difference = np.convolve(self.values, [-1, 1])
factor = np.convolve(self.mesh.coors ** 2 * math.pi, [-1, 1])
parent = self.mesh.getParentMesh()
if parent is None:
return RadialVector.ByXY(self.mesh.get_midpoint_mesh(),
difference / factor)
return RadialVector(parent,
self.interpolated_values(parent.get_coors(),
None))
def slice(self, x, y):
if isinstance(x, float):
x = self.get_index(x)
if isinstance(y, float):
y = self.get_index(y)
return RadialVector(self.mesh.slice(x, y), self.values[x:y])
def extrapolate(self, x):
return self.mesh.extrapolate(self.values, x)
def extrapolate_3d(self, coors, centre=(0, 0, 0)):
return self.mesh.extrapolate_3d(self.values, coors, centre)
def output_vector(self, filename=None):
return self.mesh.output_vector(self, filename)
@staticmethod
def SparseMerge(vectors):
mesh = RadialMesh.Merge([v.mesh for v in vectors])
return [mesh.sparse_vector(v) for v in vectors]
class RadialMesh(object):
"""
Radial mesh.
"""
def extrapolate_3d(self, potential, coors, centre=None):
if not centre is None:
coors = coors - centre
r = norm_l2_along_axis(coors, axis=1)
return self.extrapolate(potential, r)
def integrate(self, vector):
"""
.. math::
\int f(r) r^2 dr
"""
return simps(vector * self.coors ** 2, self.coors)
def dot(self, vector_a, vector_b):
"""
.. math::
\int f(r) g(r) r^2 dr
"""
return self.integrate(vector_a * vector_b)
def norm(self, vector):
return np.sqrt(self.vdot(vector, vector))
def output_vector(self, vector, filename=None):
if filename is None:
import sys
filename = sys.stdout
if isinstance(vector, RadialVector):
vector = [vector]
if isinstance(vector[0], RadialVector):
vector = [v.values for v in vector]
vector = np.vstack([self.coors, vector])
np.savetxt(filename, vector.T)
@staticmethod
def Merge(meshes):
merged = np.concatenate(tuple(m.get_coors() for m in meshes))
return ExplicitRadialMesh(np.unique(merged))
class ExplicitRadialMesh(RadialMesh):
def __init__(self, coors):
self.coors = coors
self.midpointMesh = {}
self.parentMesh = None
@property
def shape(self):
return self.coors.shape
@property
def size(self):
return self.coors.size
def get_coors(self):
return self.coors
def last_point(self):
return self.coors[self.coors.size - 1]
def get_r(self, index):
return self.coors[index]
def get_index(self, r):
pos = self.coors.searchsorted(r)
return (pos if pos < self.coors.size else self.coors.size - 1)
def get_mixing(self, r):
pos = self.get_index(r)
if pos == self.coors.size - 1 and self.coors[pos] < r:
out = [(pos, 1.0)]
elif pos == 0 or self.coors[pos] == r:
out = [(pos, 1.0)]
else:
pos_c = (r - self.coors[pos - 1]) / (self.coors[pos]
- self.coors[pos - 1])
out = [(pos - 1, 1.0 - pos_c), (pos, pos_c)]
return out
def extrapolate(self, potential, r):
return np.interp(r, self.coors, potential, right=0)
def get_midpoint_mesh(self, to=None):
if to is None:
to = len(self.coors)
else:
if not isinstance(to, int):
to = self.get_r(to)
if self.midpointMesh.has_key(to):
return self.midpointMesh[to]
if to is None:
coors = self.coors
else:
coors = self.coors[0:to]
midpoints = np.convolve(coors, [0.5, 0.5], 'valid')
midmesh = ExplicitRadialMesh(midpoints)
self.midpointMesh[to] = midmesh
midmesh.parentMesh = self
return midmesh
def get_parent_mesh(self):
return self.parentMesh
def slice(self, x, y):
if isinstance(x, float):
x = self.get_index(x)
if isinstance(y, float):
y = self.get_index(y)
return ExplicitRadialMesh(self.coors[x:y])
def sparse_vector(self, vector):
values = np.tile(float('NAN'), self.size)
values[self.coors.searchsorted(vector.mesh.get_coors())] = \
vector.values
return RadialVector(self, values)
class RadialHyperbolicMesh(ExplicitRadialMesh):
size = None
def __init__(self, jm, ap=None, size=None, from_zero=False):
if size is None:
# range, number of points
self.size = (ap if not ap is None else jm)
self.ap = 1.0
self.jm = self.size / jm + self.size
else:
# clasical
self.size = size
self.jm = jm
self.ap = ap
coors = np.arange((0.0 if from_zero else 1.0), self.size + 1,
dtype=np.float64)
coors = self.ap * coors / (self.jm - coors)
super(RadialHyperbolicMesh,
self).__init__(np.asfortranarray(coors))
|
# -*- coding: utf-8 -*-
import scrapy
from .myselector import Selector as S
import json
from user_agent import generate_user_agent
from Sac.items import SacItem
import time
import urllib.parse
from spiders.localConfigs import *
maxtry = 3
#构造页面检查方法,用于页面的重试
def trytime_(response):
if response.meta.get('maxtrys'):
response.meta['maxtrys'] += 1
else:
response.meta['maxtrys'] = 1
def gettrytime(response,maxtry=10):
trytime_(response)
if response.meta['maxtrys']<maxtry:
return True
def checkTimeError(response,maxtry=3):
flag = gettrytime(response,maxtry)
if flag and 'setTimeout' in response.text:
request = response.request.replace(dont_filter = True)
return request
class SacPersonSpider(scrapy.Spider):
"""Spider Explain:this is for sac,
it has crawl person_info,
com_info,
inver_com_info,
Securities_info
爬虫说明如下:
该爬虫定向爬取sac证监会网站信息,
目前已完成部分为证券业从业资格信息,
1:证券公司基本信息,
2:以及证券投资咨询机构的基本信息,
3:证券评级机构基本信息
"""
name = "sac"
allowed_domains = ["sac.net.cn"]
start_urls = [
'http://person.sac.net.cn/pages/registration/train-line-register!orderSearch.action',
'http://jg.sac.net.cn/pages/publicity/resource!search.action']
custom_settings = {
'CONCURRENT_REQUESTS':8,
'DOWNLOAD_DELAY':0.2}
def start_requests(self):
print('Start Crawl Object : %s'%self.__class__.__name__)
print('the Object docment:%s'%self.__doc__)
for url in self.start_urls:
if url == 'http://person.sac.net.cn/pages/registration/train-line-register!orderSearch.action':
'''从业资格证书列表页入口'''
yield scrapy.FormRequest(url,
formdata=data1,
headers = {'User-Agent':generate_user_agent(os=('win',))},
priority=1000)
# if url == 'http://jg.sac.net.cn/pages/publicity/resource!search.action':
# '''证券公司信息列表页入口'''
# yield scrapy.FormRequest(url,
# formdata=data2,
# priority=True,
# callback = self.orgListParse,
# headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
# '''证券投资咨询公司信息列表页入口'''
# yield scrapy.FormRequest(url,
# formdata=data3,
# priority=True,
# callback = self.EQS_sacListParse,
# headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
# #
# '''证券评级机构信息列表页入口'''
# yield scrapy.FormRequest(url,
# formdata=data4,
# priority=True,
# callback = self.otcListParse,
# headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
def otcListParse(self, response):
request = checkTimeError(response)
if request:return request
js = json.loads(response.text)
for js_ in js:
otcid = js_['AOI_ID']
page = 1
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = {
'filter_EQS_aoi_id':str(otcid),
'sqlkey':'info',
'sqlval':'GET_ORG_INFO_AOIID'},
callback = self.otcInfoParse1,
meta = {'otcid':otcid},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux')),
'Connection':'keep-alive'},
)
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata={
'filter_EQS_aoi_id':str(otcid),
'sqlkey':'publicity',
'sqlval':'ZX_EXECUTIVE_LIST'},
callback = self.otcInfoParse4,
meta = {'otcid':otcid},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux')),
'Connection':'keep-alive'},
)
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!list.action',
formdata={
'filter_EQS_aoi_id':str(otcid),
'page.searchFileName':'publicity',
'page.sqlKey':'PAG_PRACTITIONERS',
'page.sqlCKey':'SIZE_PRACTITONERS',
'_search':'false',
'nd':str(int(time.time()*1000)),
'page.pageSize':'15',
'page.pageNo':str(page),
'page.orderBy':'MATO_UPDATE_DATE',
'page.order':'desc'},
callback = self.otcInfoParse5,
meta = {'otcid':otcid,'page':page},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux')),
'Connection':'keep-alive'},)
def otcInfoParse5(self, response):
request = checkTimeError(response)
if request:return request
'''证券评级机构--执业人员信息'''
item = SacItem()
page = response.meta['page']
orgid = response.meta['otcid']
js = json.loads(response.text)
if page==1:
totalPage = js['totalPages']
else:
totalPage = response.meta['totalPage']
configs = otcInfoConfigs
for js_ in js['result']:
result = dict()
result['orgid'] = orgid
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
if page<totalPage:
page+=1
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!list.action',
formdata={
'filter_EQS_aoi_id':str(orgid),
'page.searchFileName':'publicity',
'page.sqlKey':'PAG_PRACTITIONERS',
'page.sqlCKey':'SIZE_PRACTITONERS',
'_search':'false',
'nd':str(int(time.time()*1000)),
'page.pageSize':'15',
'page.pageNo':str(page),
'page.orderBy':'MATO_UPDATE_DATE',
'page.order':'desc'},
callback = self.otcInfoParse5,
meta = {'otcid':orgid,'page':page,'totalPage':totalPage},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux')),
'Connection':'keep-alive'},)
def otcInfoParse1(self, response):
'''证券评级机构--基本信息1'''
request = checkTimeError(response)
if request:return request
js = json.loads(response.text)
# otcid = response.meta['otcid']
configs = otcInfoBaseconfigs
for js_ in js:
result=dict()
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = {
'filter_EQS_aoi_id':str(result['orgid']),
'sqlkey':'publicity',
'sqlval':'SELECT_ZX_REG_INFO',
'Connection':'keep-alive'},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},
callback = self.otcInfoParse2,
meta = {'result':result}
)
def otcInfoParse2(self, response):
request = checkTimeError(response)
if request:return request
'''证券评级机构--基本信息2'''
item = SacItem()
js = json.loads(response.text)
configs = otcInfoBaseconfigs2
for js_ in js:
result = response.meta['result']
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = {
'filter_EQS_mri_reg_id':str(result['REG_ID']),
'sqlkey':'info',
'sqlval':'GET_FILES_BY_REG_ID'},
callback = self.otcInfoParse3,
meta = {'orgid':result['orgid']},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux')),
'Referer': 'http://jg.sac.net.cn/pages/publicity/credit_rating_reg.html?aoi_id={orgid}&is_org_search=no'.format(orgid=result['orgid']),
'Content-Type': 'application/x-www-form-urlencoded',
'Connection':'keep-alive'},
)
def otcInfoParse3(self, response):
request = checkTimeError(response)
if request:return request
'''证券评级机构--执照图片'''
item = SacItem()
orgid = response.meta['orgid']
js = json.loads(response.text)
configs = {'list':{'v':'','t':'','keys':['REG_ID','ZRNI_NAME'],'db':'dbo.SAC_otclicenseCopy'},
'data':[{'n':'REGID','En':'REG_ID','t':'json','v':'MRI_REG_ID','dt':''},
{'n':'证书ID','En':'ZRNI_ID','t':'json','v':'ZRNI_ID','dt':''},
{'n':'证书name','En':'ZRNI_NAME','t':'json','v':'ZRNI_NAME','dt':''},
{'n':'证书path','En':'ZRNI_PATH','t':'json','v':'ZRNI_PATH','dt':''},
{'n':'证书类型','En':'ZRNI_TYPE','t':'json','v':'ZRNI_TYPE','dt':''},
]
}
for js_ in js:
result = dict()
result['orgid'] = orgid
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
formtxt = 'http://jg.sac.net.cn/pages/publicity/train-line-register!writeFile.action?inputPath={path}&fileName={filename}'
filename = urllib.parse.quote(urllib.parse.quote(result['ZRNI_NAME'].encode('utf-8')).encode('utf-8'))
result['url'] = formtxt.format(path=result['ZRNI_PATH'],filename = filename)
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
def otcInfoParse4(self, response):
request = checkTimeError(response)
if request:return request
'''证券评级机构--高管人员信息'''
item = SacItem()
orgid = response.meta['otcid']
js = json.loads(response.text)
configs = {'list':{'v':'','t':'','keys':['NAME','orgid','PRACTITIONERS_START_DATE'],'db':'dbo.SAC_otcseniorExecutive'},
'data':[{'n':'中国注册会计师资格证书号码','En':'ACCOUNTANTS_NO','t':'json','v':'EI_ACCOUNTANTS_NO','dt':''},
{'n':'现任职务','En':'CURRENT_POSITION','t':'json','v':'EI_CURRENT_POSITION','dt':''},
{'n':'是否通过证券评级业务高级管理人员资质测试','En':'ISPASS_SENIOR_MANAGEMENT','t':'json','v':'EI_ISPASS_SENIOR_MANAGEMENT','dt':''},
{'n':'姓名','En':'NAME','t':'json','v':'EI_NAME','dt':''},
{'n':'任职起始时间','En':'PRACTITIONERS_START_DATE','t':'json','v':'EI_PRACTITIONERS_START_DATE','dt':''},
{'n':'证券从业人员证书号码','En':'SECURITIES_PROFESSIONALS','t':'json','v':'EI_SECURITIES_PROFESSIONALS','dt':''},
{'n':'性别','En':'Gender','t':'json','v':'GC_ID','dt':''}
]
}
for js_ in js:
result = dict()
result['orgid'] = orgid
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
def EQS_sacListParse(self, response):
request = checkTimeError(response)
if request:return request
'''证券投资咨询机构--列表页parse'''
js = json.loads(response.text)
for js_ in js:
orgid = js_['AOI_ID']
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
callback = self.EQS_sacInfoParse,
formdata = {
'filter_EQS_aoi_id':str(orgid),
'sqlkey':'info',
'sqlval':'GET_ORG_INFO_AOIID'
},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},
)
def EQS_sacInfoParse(self, response):
'''证券投资咨询机构--基本信息1'''
# orgid = response.meta['orgid']
js = json.loads(response.text)
configs = {'list':{'v':'','t':'','keys':'','db':''},
'data':[{'n':'机构ID','En':'orgid','t':'json','v':'AOI_ID','dt':''},
{'n':'会员编号','En':'MEMBER_NO','t':'json','v':'AOI_MEMBER_NO','dt':''},
{'n':'会员代码','En':'menber_code','t':'json','v':'AOI_NO','dt':''},
{'n':'机构代码','En':'org_No','t':'json','v':'AOI_ORG_NO','dt':''},
{'n':'会员级别','En':'OPC_NAME','t':'json','v':'OPC_NAME','dt':''},
]
}
for js_ in js:
result=dict()
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = {'filter_EQS_aoi_id':str(result['orgid']),
'sqlkey':'publicity',
'sqlval':'SELECT_TZ_REG_INFO'},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},
callback = self.EQS_sacInfoParse2,
meta = {'result':result})
def EQS_sacInfoParse2(self, response):
request = checkTimeError(response)
if request:return request
'''证券投资咨询机构--基本信息2'''
js = json.loads(response.text)
item = SacItem()
configs = EQS_sacInfoParse2Configs
for js_ in js:
result = response.meta['result']
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
def orgListParse(self, response):
request = checkTimeError(response)
if request:return request
'''证券公司--列表页parse'''
js = json.loads(response.text)
for orgid_ in js:
orgid = orgid_['AOI_ID']
page=1
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = {'filter_EQS_aoi_id':str(orgid),
'sqlkey':'publicity',
'sqlval':'SELECT_ZQ_REG_INFO'},
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},
callback = self.orgInfoParse1,
meta = {'orgid':orgid},
)
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!list.action',
formdata = {'filter_LIKES_mboi_branch_full_name':'',
'filter_LIKES_mboi_off_address':'',
'filter_EQS_aoi_id':str(orgid),
'page.searchFileName':'publicity',
'page.sqlKey':'PAG_BRANCH_ORG',
'page.sqlCKey':'SIZE_BRANCH_ORG',
'_search':'false',
'nd':str(int(time.time()*1000)),
'page.pageSize':'15',
'page.pageNo':str(page),
'page.orderBy':'MATO_UPDATE_DATE',
'page.order':'desc'},
meta = {'orgid':orgid,'page':1},
callback = self.BRANCH_OrgParse,
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!list.action',
formdata = {'filter_LIKES_msdi_name':'',
'filter_LIKES_msdi_reg_address':'',
'filter_EQS_aoi_id':str(orgid),
'page.searchFileName':'publicity',
'page.sqlKey':'PAG_SALES_DEPT',
'page.sqlCKey':'SIZE_SALES_DEPT',
'_search':'false',
'nd':str(int(time.time()*1000)),
'page.pageSize':'15',
'page.pageNo':str(page),
'page.orderBy':'MATO_UPDATE_DATE',
'page.order':'desc'},
meta = {'orgid':orgid,'page':1},
callback = self.SALES_DEPTParse,
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = {'filter_EQS_aoi_id':str(orgid),
'sqlkey':'publicity',
'sqlval':'EXECUTIVE_LIST'},
meta = {'orgid':orgid},
callback = self.senior_executiveParse,
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
def senior_executiveParse(self, response):
request = checkTimeError(response)
if request:return request
'''证券公司--高管信息'''
item = SacItem()
orgid = response.meta['orgid']
js = json.loads(response.text)
configs = {'list':{'v':'','t':'','keys':['orgid','name','OFFICE_DATE','OFFICE_DATE'],'db':'dbo.SAC_executive'},
'data':[{'n':'现任职务','En':'CURRENT_POSITION','t':'json','v':'EI_CURRENT_POSITION','dt':''},
{'n':'姓名','En':'name','t':'json','v':'EI_NAME','dt':''},
{'n':'任职起始时间','En':'OFFICE_DATE','t':'json','v':'EI_OFFICE_DATE','dt':''},
{'n':'性别','En':'gender','t':'json','v':'GC_ID','dt':''},
]
}
for js_ in js:
result=dict()
result['orgid'] = orgid
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
def SALES_DEPTParse(self, response):
request = checkTimeError(response)
if request:return request
'''证券公司--营业部信息'''
item = SacItem()
orgid = response.meta['orgid']
page = response.meta['page']
js = json.loads(response.text)
if page == 1:
totalPage = js['totalPages']
else:
totalPage = response.meta['totalPage']
configs = SALES_DEPTParseConfigs
for js_ in js['result']:
result = dict()
result['orgid'] = orgid
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
if page< totalPage:
page+=1
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!list.action',
formdata = {'filter_LIKES_msdi_name':'',
'filter_LIKES_msdi_reg_address':'',
'filter_EQS_aoi_id':str(orgid),
'page.searchFileName':'publicity',
'page.sqlKey':'PAG_SALES_DEPT',
'page.sqlCKey':'SIZE_SALES_DEPT',
'_search':'false',
'nd':str(int(time.time()*1000)),
'page.pageSize':'15',
'page.pageNo':str(page),
'page.orderBy':'MATO_UPDATE_DATE',
'page.order':'desc'},
meta = {'orgid':orgid,'page':page,'totalPage':totalPage},
callback = self.SALES_DEPTParse,
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
def BRANCH_OrgParse(self, response):
request = checkTimeError(response)
if request:return request
'''证券公司--分公司信息'''
item = SacItem()
orgid = response.meta['orgid']
page = response.meta['page']
js = json.loads(response.text)
if page == 1:
totalPage = js['totalPages']
else:
totalPage = response.meta['totalPage']
configs = BRANCH_OrgConfigs
for js_ in js['result']:
result=dict()
result['orgid'] = orgid
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_, config)
result[k] = S.replace_invalid_char(result[k])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item
if page<=totalPage:
page+=1
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!list.action',
formdata = {'filter_LIKES_mboi_branch_full_name':'',
'filter_LIKES_mboi_off_address':'',
'filter_EQS_aoi_id':str(orgid),
'page.searchFileName':'publicity',
'page.sqlKey':'PAG_BRANCH_ORG',
'page.sqlCKey':'SIZE_BRANCH_ORG',
'_search':'false',
'nd':str(int(time.time()*1000)),
'page.pageSize':'15',
'page.pageNo':str(page),
'page.orderBy':'MATO_UPDATE_DATE',
'page.order':'desc'},
meta = {'orgid':orgid,'page':page,'totalPage':totalPage},
callback = self.BRANCH_OrgParse,
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},)
def orgInfoParse1(self, response):
request = checkTimeError(response)
if request:return request
'''证券公司信息基本信息--result传入orgInfoParse2'''
item = SacItem()
orgid = response.meta['orgid']
js = json.loads(response.text)
configs = orgInfoparse1configs
result = dict()
for js_ in js:
for config in configs['data']:
k = config['En']
result[k] = S.select_content(js_ , config,response)
result[k] = S.replace_invalid_char(result[k])
data = {'filter_EQS_aoi_id':str(orgid),
'sqlkey':'publicity',
'sqlval':'SEARCH_ZQGS_QUALIFATION'}
yield scrapy.FormRequest('http://jg.sac.net.cn/pages/publicity/resource!search.action',
formdata = data,
headers = {'User-Agent':generate_user_agent(os=('win','mac','linux'))},
callback = self.orgInfoParse2,
meta = {'orgid':orgid,'result':result},
)
def orgInfoParse2(self, response):
request = checkTimeError(response)
if request:return request
'''证券公司信息获取经营范围'''
item = SacItem()
result = response.meta['result']
result['orgid'] = response.meta['orgid']
js = json.loads(response.text)
PTSC_NAME = []
for i in js:
PTSC_NAME.append(i['PTSC_NAME'])
result['ptsc'] = ','.join(PTSC_NAME)
result['ptsc'] = S.replace_invalid_char(result['ptsc'])
item['result'] = result
item['keys'] = ['orgid']
item['db'] = 'dbo.SAC_securitiesInfo'
yield item
def parse(self, response):
request = checkTimeError(response)
if request:return request
'''从业资格证书--公司基本信息'''
item = SacItem()
js = json.loads(response.text)
configs = configs1
for json_ in js:
result = dict()
for config in configs['data']:
result[config['En']] = json_[config['v']]
result[config['En']] = S.replace_invalid_char(result[config['En']])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
CropRowID = result['CropRowID']
datas = asc_data(CropRowID)
headers = {'User-Agent':generate_user_agent()}
yield scrapy.FormRequest("http://person.sac.net.cn/pages/registration/train-line-register!search.action",
formdata=datas[0],
headers = headers,
meta = {'CropRowID':CropRowID},
priority=0,
callback = self.cctparse)
yield scrapy.FormRequest("http://person.sac.net.cn/pages/registration/train-line-register!search.action",
formdata=datas[1],
headers = headers,
meta = {'CropRowID':CropRowID},
priority=0,
callback = self.cctparse)
yield item
def cctparse(self, response):
request = checkTimeError(response)
if request:return request
'''个人证券从业信息列表页PARSE'''
js = json.loads(response.text)
configs = cctconfigs
for json_ in js:
# print(json_)
headers = {'User-Agent':generate_user_agent()}
result = dict()
for config in configs['data']:
result[config['En']] = S.select_content(json_, config,response)
result[config['En']] = S.replace_invalid_char(result[config['En']])
EmpHashID = result['EmpHashID']
data = {'filter_EQS_PPP_ID':EmpHashID,
'sqlkey':'registration',
'sqlval':'SD_A02Leiirkmuexe_b9ID'}
yield scrapy.FormRequest('http://person.sac.net.cn/pages/registration/train-line-register!search.action',
formdata = data,
headers = headers,
callback = self.getEmpIDparse,
priority=2,
meta = {'result':result}
)
def getEmpIDparse(self, response):
request = checkTimeError(response)
if request:return request
'''证券从业资格-个人信息'''
js = json.loads(response.text)
headers = {'User-Agent':generate_user_agent()}
result = response.meta['result']
if js:
result['EmpID'] = js[0]['RPI_ID']
data = {
'filter_EQS_RH#RPI_ID':result['EmpID'],
'sqlkey':'registration',
'sqlval':'SEARCH_LIST_BY_PERSON'}
yield scrapy.FormRequest('http://person.sac.net.cn/pages/registration/train-line-register!search.action',
formdata = data,
headers = headers,
callback = self.Employee_Change,
priority=3,
meta={'EmpID':result['EmpID']}
)
yield scrapy.Request('http://person.sac.net.cn/pages/registration/train-line-register!search.action?filter_EQS_RPI_ID={EMPID}&sqlkey=registration&sqlval=SELECT_PERSON_INFO'.format(EMPID=result['EmpID']),
headers = headers,
callback = self.Employee_InFo,
priority=3,
meta={'result':result}
)
def Employee_InFo(self, response):
request = checkTimeError(response)
if request:return request
item = SacItem()
try:
js = json.loads(response.text)
result = response.meta['result']
for json_ in js:
result['image'] = 'http://photo.sac.net.cn/sacmp/images/'+json_['RPI_PHOTO_PATH']
result['ADI_NAME'] = json_['ADI_NAME']
result['ADI_ID'] = json_['ADI_ID']
item['result'] = result
item['keys'] = cctconfigs['list']['keys']
item['db'] = cctconfigs['list']['db']
yield item
except:
msg = '%s%s'%(response.url,response.text)
scrapy.log.msg(msg)
def Employee_Change(self, response):
request = checkTimeError(response)
if request:return request
'''证券从业资格-个人变更信息'''
item = SacItem()
js = json.loads(response.text)
result = dict()
configs = Employee_ChangeConfigs
for json_ in js:
for config in configs['data']:
result[config['En']] = S.select_content(json_, config,response)
result[config['En']] = S.replace_invalid_char(result[config['En']])
item['result'] = result
item['keys'] = configs['list']['keys']
item['db'] = configs['list']['db']
yield item |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# In[3]:
#View To The Existing Raw Data
data = pd.read_csv('playstore-analysis .csv')
data
# In[4]:
data.columns
# In[5]:
data.info()
# In[6]:
data.isnull().sum()
# In[7]:
### here we are able to see null values in each column
# In[8]:
data = data.dropna(subset=['Rating'], how = 'all') # after looking a lot of value is null so we drops the null values record
# In[9]:
data["Rating"].isnull().sum()
# In[10]:
### i droped the record where rating is missing since rating is our target/study variable
# ### Check the null values for the Android Ver column
# In[11]:
data.loc[data["Android Ver"].isnull()]
# In[12]:
### here i got the three record here with NaN in the Android Ver column
# ### Are all 3 records having the same problem?
# In[13]:
### yes ,all the three column is having the same problem that is NaN values
# ### Drop the 3rd record i.e. record for “Life Made WIFI
# In[14]:
data.drop([10472] , inplace=True)
# In[15]:
data.loc[data["Android Ver"].isnull()]
# ### Replace remaining missing values with the mode
#
# In[16]:
data['Android Ver'].fillna(data['Android Ver'].mode()[0], inplace=True)
# In[17]:
data.loc[data["Android Ver"].isnull()]
# ### Current ver – replace with most common value
# In[18]:
data.loc[data["Current Ver"].isnull()]
# In[19]:
data['Current Ver'].fillna(data['Current Ver'].mode()[0], inplace=True)
# In[20]:
data.loc[data["Current Ver"].isnull()]
# ### TASK 2: Data clean up – correcting the data types
# ### Which all variables need to be brought to numeric types?
#
# In[21]:
### there are few variable need be bring /change the numerical variable if they are not in the Numerical type
#1. Size
#2. Install
#3. Category _and Content Rating
#4. price
# ### Price variable remove doller sign and convert to float
# In[22]:
data["Price"].unique()
# In[23]:
### here we are able to see that data type is object and a unwanted symbol that is $ so i have to remove it because it will creatye a problem while performing any operation.
# In[24]:
data['price']=data.Price.replace('Everyone',np.nan)
data['Price']=data.Price.str.replace('$',"").astype(float)
data['Price'].dtype
# ### Installs – remove ‘,’ and ‘+’ sign, convert to integer
# In[25]:
data["Installs"].unique()
# In[26]:
data['Installs'] = data.Installs.str.replace(",","")
data['Installs'] = data.Installs.str.replace("+","")
data['Installs'] = data.Installs.replace("Free",np.nan)
data['Installs'] = data['Installs'].astype(float)
data['Installs'].dtype
# ### Convert all other identified columns to numeric
# In[27]:
data["Reviews"]=data["Reviews"].astype(float)
# In[28]:
data["Reviews"]
# ### Sanity checks – check for the following and handle accordingly
# #### Avg. rating should be between 1 and 5, as only these values are allowed on the play store.
# In[29]:
data.loc[data.Rating < 1] & data.loc[data.Rating > 5]
# ### Are there any such records? Drop if so.
# In[30]:
### here we get thet there is no vlaues less than 1 and Greater than 5 so no need to drop anything
# ### Reviews should not be more than installs as only those who installed can review the app.
# In[31]:
data.loc[data["Reviews"]>data["Installs"]]
# In[32]:
### here few values which is greater reviews than installs
# ### Are there any such records? Drop if so.
# In[33]:
temp = data[data['Reviews']>data['Installs']].index
data.drop(labels=temp, inplace=True)
# In[34]:
data.loc[data['Reviews'] > data['Installs']]
# ### 4: Identify and handle outliers
# ### Make suitable plot to identify outliers in price
# In[35]:
df=pd.DataFrame(data)
# In[36]:
sns.boxplot(df["Price"])
# In[37]:
data["Price"].describe()
# In[38]:
### from the above visualization we are able to see that there is a outerlier laying in the Price column
# ### Do you expect apps on the play store to cost 200Doller? Check out these cases
# In[39]:
data1=data.loc[data['Price'] > 200]
data1
# In[40]:
data1.count()
# In[41]:
# yes there are 15 records in the data which cost more than 200 $ in the play store
# In[42]:
data.drop(data[data['Price'] >200].index, inplace = True)
# In[43]:
data1=data.loc[data['Price'] > 200]
data1
# In[44]:
sns.boxplot(data['Price'])
# ### Limit data to records with price < $30
#
# In[45]:
record_30 = data[data['Price'] > 30].index
data.drop(labels=record_30, inplace=True)
# In[46]:
plt.boxplot(data['Price'])
plt.show()
# ## b. Reviews column
# ### i. Make suitable plot
# In[47]:
box =sns.boxplot(data["Reviews"])
plt.show(box)
# ### ii) Limit data to apps with < 1 Million reviews
# In[48]:
record_1m = data[data['Reviews'] > 1000000 ].index
data.drop(labels = record_1m, inplace=True)
print(record_1m.value_counts().sum(),'cols dropped')
# ## Install
# ### i. What is the 95th percentile of the installs?
# In[49]:
percentile = data.Installs.quantile(0.95)
print(percentile,"is 95th percentile of Installs")
# ### Drop records having a value more than the 95th percentile
# In[50]:
temp=data[data["Installs"]>percentile].index
data.drop(labels=temp,inplace=True)
print(temp.value_counts().sum())
# # Data analysis to answer business questions
# ### What is the distribution of ratings like? (use Seaborn) More skewed towards higher/lowervalues?
# In[51]:
#how do you explain this
sns.distplot(data['Rating'])
plt.show()
print('The skewness of this distribution is',data['Rating'].skew())
print('The Median of this distribution {} is greater than mean {} of this distribution'.format(data.Rating.median(),data.Rating.mean()))
# In[52]:
#The skewness of this distribution is -1.7434270330647985
#The Median of this distribution 4.3 is greater than mean 4.170800237107298 of this distribution
# In[53]:
##What is the implication of this on your analysis?
'''
Since mode >= median > mean, the distribution of Rating is Negatively Skewed.
Thereforethe distribution of Rating is more Skewed towards lower values.
'''
data['Rating'].mode()
# ## What are the top Content Rating values?
# In[54]:
# Are there any values with very few records?
# In[55]:
data['Content Rating'].value_counts()
# In[ ]:
# # Effect of size on rating
# In[56]:
## Make a joinplot to understand the effect of size on rating
# In[57]:
sns.jointplot(y='Size',x='Rating',data=data,kind='hex')
plt.show()
# In[58]:
# b) Do you see any patterns?
'''Yes, Patterns can be observed between Size and Rating which proves their is correlation between Size and Rating.'''
# In[59]:
# c) How do you explain the pattern?
'''There is positive correlation between Size and Rating since usually on increased Rating, Size of App also increases, but this is not always the case ie.for higher Rating, their is constant Size maintained'''
# # Effect of price on rating
# In[64]:
# a) Make a jointplot (with regression line)
sns.jointplot(x='Price', y='Rating', data=data, kind='reg')
plt.show()
# In[65]:
sns.jointplot(y='Price',x='Rating',data=data,kind='hex')
plt.show()
#
# In[ ]:
# In[ ]:
# In[ ]:
### Which metric would you use? Mean? Median? Some other quantile?
# In[ ]:
# In[ ]:
# In[ ]:
# ### Look at all the numeric interactions together
# In[66]:
# a) Make a pairplort with the colulmns - 'Reviews', 'Size', 'Rating', 'Price'
sns.pairplot(data, vars=['Reviews', 'Size', 'Rating', 'Price'], kind='reg')
plt.show()
# ## 10. Rating vs. content rating
# In[ ]:
## Make a bar plot displaying the rating for each content rating
# In[ ]:
data.groupby(['Content Rating'])['Rating'].count().plot.bar(color="lightblue")
plt.show()
# In[ ]:
data1=data.groupby(['Content Rating'])
print(data1)
# In[ ]:
# b) Which metric would you use? Mean? Median? Some other quantile?
'''We use Median in this case as we are having Outliers in Rating. Because in case of Outliers, median is the best measure of central tendency.'''
# In[ ]:
plt.boxplot(data['Rating'])
plt.show()
# In[ ]:
# c) Choose the right metric and plot
data.groupby(['Content Rating'])['Rating'].median().plot.barh(color="darkgreen")
plt.show()
# ## 11. Content rating vs. size vs. rating – 3 variables at a time
#
# In[ ]:
### a. Create 5 buckets (20% records in each) based on Size
# In[ ]:
#data[('Size')].count()
# In[ ]:
#bins=[0-1687,3374,5061,6748,8435]
# In[ ]:
#data["Size"]=pd.cut(data['Size'],bins)
# In[ ]:
#data["Size"]
# In[ ]:
## By Content Rating vs. Size buckets, get the rating (20th percentile) for each combination
# In[ ]:
# In[ ]:
temp3= pd.pivot_table(data, values='Rating', index='Bucket Size', columns='Content Rating',
aggfunc= lambda x:np.quantile(x,0.2))
temp3
# In[ ]:
# c) Make a Heatmap of this
# i) Annoted
f,ax = plt.subplots(figsize=(5, 5))
sns.heatmap(temp3, annot=True, linewidths=.5, fmt='.1f',ax=ax)
plt.show()
# In[ ]:
# ii) Greens color map
f,ax = plt.subplots(figsize=(5, 5))
sns.heatmap(temp3, annot=True, linewidths=.5, cmap='Greens',fmt='.1f',ax=ax)
plt.show()
# In[61]:
# d) What’s your inference? Are lighter apps preferred in all categories? Heavier? Some?
'''After The Analysis, it is visible that its not solely that lighter apps are preferred in all categories,
as apps with 60K-100K have got the Highest Rating in almost every category. Hence we can conclude as there is no such
preference of lighter weighing apps over heavier apps. '''
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 09:39:20 2018
@author: kad017
"""
import numpy as np
#import matplotlib.pyplot as plt
m=16
data=np.arange(m)
data=data.reshape(-1,np.sqrt(m))
print data
var_orig=np.var(data)
print var_orig,'orig var'
rows,col=data.shape
print rows,"rows"
column_t=np.transpose(data)
var_row_i=[]
var_col_i=[]
var_colrow_i=[]
for i in range (2,rows):
print i, "i"
k=rows%i
print k,"k"
def col0(inp):
column=inp.reshape(-1,i)
print column,"transposed data, zero k"
column=np.mean(column,axis=1)
print column, "averaged col data"
#column=column.reshape(rows/i,col)...don't need to reshape after taking the mean, since you calc variance
#of flattened array
var_col_i.append(np.var(column))
print var_col_i,"variance for columns"
return var_col_i
def coln0(inp,nrow):
print inp
column1=np.delete(inp,np.s_[nrow-k:nrow],axis=1)
print column1
#column=column[0][0:-k] #have to include the [0] because the matrix is ([[..]]) and you want to eliminate
#array element so first you must extract array, so ([[..]])[0]=([..])
column1=column1.reshape(-1,i)
print column1,"transposed data,non zero k"
column1=np.mean(column1,axis=1)
print column1, "averaged col data"
#column=column.reshape(rows/i,col)...don't need to reshape after taking the mean, since you calc variance
#of flattened array
var_col_i.append(np.var(column1))
print var_col_i,"variance for columns"
return var_col_i
if k==0:
col0(column_t)
else:
coln0(column_t,rows)
"Using function to average columns only in orig data"
#For averaging rows only
for i in range(2,col):
print i,"i"
j=col%i
print j,"j"
def row0(inp):
row=inp.reshape(-1,i)
row=np.mean(row,axis=1)
row_shaped=row.reshape(-1,col/i)
rows0,col0=row_shaped.shape
var_row_i.append(np.var(row))
print var_row_i,i,"variance for rows"
return var_row_i,row_shaped,rows0
def rown0(inp):
print inp, "input data"
row1=np.delete(inp,np.s_[col-j:col],axis=1)
row1=row1.reshape(-1,i)
row1=np.mean(row1,axis=1)
row1_shaped=row1.reshape(-1,(col-j)/i)
rows1,col1=row1_shaped.shape
var_row_i.append(np.var(row1))
print var_row_i,i,"variance for rows"
return var_row_i,row1_shaped,rows1
if j==0:
variance_row,shaped_row,rows0=row0(data)
variance_row
#Axis=1 means taking average of each row, which here means averaging
#over time
else:
variance_row1,shaped_row1,rows1=rown0(data)
variance_row1
for i in range (2,rows):
print "PENULT"
print shaped_row,"row0"
row_t=np.transpose(shaped_row)
print i, "i"
k=rows0%i
print k,"k"
if k==0:
col0(row_t)
else:
coln0(row_t,rows0)
for i in range (2,rows):
print "LAST PART"
variance_row1,shaped_row1,rows1=rown0(data)
row1_t=np.transpose(shaped_row1)
print i, "i"
k=rows1%i
print k,"k"
if k==0:
col0(row1_t)
else:
coln0(row1_t,rows1)
print var_row_i,'row'
print var_col_i,'col'
#mean = np.mean(data,axis=1)
#plt.plot(mean)
#plt.imshow(np.transpose(mean_col), aspect='auto', cmap='hot')
#plt.imshow(var, aspect='auto', cmap='hot')
#plt.plot(np.arange(len(mean)), mean)
#print mean[39000:40100].shape, np.arange(39000,40100).shape
#plt.plot(np.arange(39720,39745), mean[39720:39745])
#ax = plt.gca()
#plt.plot(np.arange(39720,39745), mean[39720:39745])
#plt.show() |
import itertools
def loadFile(filename):
D=[]
f=open(filename,"r")
transactions=0
for line in f:
T = []
transactions += 1
for word in line.split():
T.append(word)
if word not in C1.keys():
C1[word] = 1
else:
count = C1[word]
C1[word] = count + 1
D.append(T)
print "\nDataset: "+filename+" \t Total Elements: "+str(len(D))
for i in D:
print " "+str(i)
print C1
return D
def computeInitialItemset():
L1 = []
for key in C1:
if (100 * C1[key]/transactions) >= float(support):
list = []
list.append(key)
L1.append(list)
print "\nFrequent Itemset: 1 \t Elements: "+str(len(L1))
for i in L1:
print " "+str(i)
return L1
def apriori_gen(Lk_1, k):
length = k
Ck = []
for list1 in Lk_1:
for list2 in Lk_1:
count = 0
c = []
if list1 != list2:
while count < length-1:
if list1[count] != list2[count]:
break
else:
count += 1
else:
if list1[length-1] < list2[length-1]:
for item in list1:
c.append(item)
c.append(list2[length-1])
if not has_infrequent_subset(c, Lk_1, k):
Ck.append(c)
c = []
return Ck
def findsubsets(S,m):
return set(itertools.combinations(S, m))
def has_infrequent_subset(c, Lk_1, k):
list = []
list = findsubsets(c,k)
for item in list:
s = []
for l in item:
s.append(l)
s.sort()
if s not in Lk_1:
return True
return False
def frequent_itemsets():
k = 2
Lk_1 = []
Lk = []
L = []
count = 0
transactions = 0
for item in L1:
Lk_1.append(item)
while Lk_1 != []:
Ck = []
Lk = []
Ck = apriori_gen(Lk_1, k-1)
for c in Ck:
count = 0
transactions = 0
s = set(c)
for T in D:
transactions += 1
t = set(T)
if s.issubset(t) == True:
count += 1
if (100 * count/transactions) >= float(support):
c.sort()
Lk.append(c)
Lk_1 = []
print "\nFrequent Itemset: "+str(k)+" \t Elements: "+str(len(Lk))
for i in Lk:
print " "+str(i)
for l in Lk:
Lk_1.append(l)
k += 1
if Lk != []:
L.append(Lk)
return L
def generateAssociationRules():
s = []
r = []
length = 0
count = 1
inc1 = 0
inc2 = 0
num = 1
m = []
L= frequent_itemsets()
print ("\nAssosication Rules:")
for list in L:
for l in list:
length = len(l)
count = 1
while count < length:
s = []
r = findsubsets(l,count)
count += 1
for item in r:
inc1 = 0
inc2 = 0
s = []
m = []
for i in item:
s.append(i)
for T in D:
if set(s).issubset(set(T)) == True:
inc1 += 1
if set(l).issubset(set(T)) == True:
inc2 += 1
if (100*inc2/inc1 >= float(confidence)):
for index in l:
if index not in s:
m.append(index)
print (" Rule: %d \t %s -> %s \t Support: %d \t Confidence: %d" %(num, s, m, 100*inc2/len(D), 100*inc2/inc1))
num += 1
C1 = {}
transactions = 0
D = []
T = []
L1=[]
support = input("Enter Support in percentage %: ")
confidence = input("Enter Confidence in percentage %: ")
D=loadFile("DataSet.txt")
transactions=len(D)
L1=computeInitialItemset()
generateAssociationRules()
|
#!/usr/bin/python
from __future__ import print_function
import time
import argparse
import ConfigParser
import pprint
from scrapers.agis import AGIS # EOL is near
from scrapers.rebus import REBUS # EOL is near
from scrapers.cric import CRIC
from scrapers.grafana import Grafana
from scrapers.elasticsearch import ElasticSearch
from maps import PQ_names_map as pq_map
import logging
from commonHelpers.logger import logger
from commonHelpers import notifications
# do some configurations
config = ConfigParser.ConfigParser()
config.read("config.cfg")
logger = logger.getChild("mephisto")
parser = argparse.ArgumentParser(description="Run a set of JSON/web scrapers")
parser.add_argument("--debug", action="store_true", help="print debug messages")
parser.add_argument(
"-interval", default="10m", help="Defines which scrapers are being run"
)
argparse = parser.parse_args()
if argparse.debug:
logger.setLevel(logging.DEBUG)
def run():
# Each time the scrapers are run, we update the PQ map
pqs = pq_map.PQ_names_map(file="data/map_PQ_names.json")
if not pqs.update(
ifile="data/scraped_cric_pandaqueue.json",
ofile="data/map_PQ_names.json",
key="panda_resource",
):
logger.warning("PQ map is not available")
if argparse.interval == "10m":
# Now run all the scrapers that should run in 10min intervals
# First the PQ CRIC information
cric = CRIC()
raw_data = cric.download(
url="https://atlas-cric.cern.ch/api/atlas/pandaqueue/query/?json"
)
json_data = cric.convert(data=raw_data, sort_field="panda_resource")
if cric.save(file="data/scraped_cric_pandaqueue.json", data=json_data):
logger.info("Scraped PQ CRIC")
else:
logger.error("Problem scraping PQ CRIC")
elif argparse.interval == "1h":
# Run all the scrapers that only need to be run once per hour (because they don't change too often)
# Next the ATLAS sites CRIC information
cric = CRIC()
raw_data = cric.download(
url="https://atlas-cric.cern.ch/api/atlas/site/query/?json"
)
json_data = cric.convert(data=raw_data, sort_field="name")
if cric.save(file="data/scraped_cric_sites.json", data=json_data):
logger.info("Scraped sites CRIC")
else:
logger.error("Problem scraping sites CRIC")
# Now the DDM info from CRIC
raw_data = cric.download(
url="https://atlas-cric.cern.ch/api/atlas/ddmendpoint/query/?json"
)
json_data = cric.convert(data=raw_data, sort_field="site")
if cric.save(file="data/scraped_cric_ddm.json", data=json_data):
logger.info("Scraped DDM CRIC")
else:
logger.error("Problem scraping DDM CRIC")
# Next up is REBUS, start with the actual federation map
rebus = REBUS()
raw_data = rebus.download(
url="https://wlcg-cric.cern.ch/api/core/federation/query/?json"
)
json_data = rebus.convert(data=raw_data, sort_field="rcsites")
if rebus.save(file="data/scraped_rebus_federations.json", data=json_data):
logger.info("Scraped federations CRIC")
else:
logger.error("Problem scraping federations CRIC")
# then the pledges
# can actually use same JSON raw data as before
json_data = rebus.convert(
data=raw_data, sort_field="accounting_name", append_mode=True
)
if rebus.save(file="data/scraped_rebus_pledges.json", data=json_data):
logger.info("Scraped pledges CRIC")
else:
logger.error("Problem scraping pledges CRIC")
# we also get datadisk information from monit Grafana
url = config.get("credentials_monit_grafana", "url")
token = config.get("credentials_monit_grafana", "token")
now = int(round(time.time() * 1000))
date_to = now - 12 * 60 * 60 * 1000
date_from = date_to - 24 * 60 * 60 * 1000
period = """"gte":{0},"lte":{1}""".format(date_from, date_to)
data = (
"""{"search_type":"query_then_fetch","ignore_unavailable":true,"index":["monit_prod_rucioacc_enr_site*"]}\n{"size":0,"query":{"bool":{"filter":[{"range":{"metadata.timestamp":{"""
+ period
+ ""","format":"epoch_millis"}}},{"query_string":{"analyze_wildcard":true,"query":"data.account:* AND data.campaign:* AND data.country:* AND data.cloud:* AND data.datatype:* AND data.datatype_grouped:* AND data.prod_step:* AND data.provenance:* AND data.rse:* AND data.scope:* AND data.experiment_site:* AND data.stream_name:* AND data.tier:* AND data.token:(\\\"ATLASDATADISK\\\" OR \\\"ATLASSCRATCHDISK\\\") AND data.tombstone:(\\\"primary\\\" OR \\\"secondary\\\") AND NOT(data.tombstone:UNKNOWN) AND data.rse:/.*().*/ AND NOT data.rse:/.*(none).*/"}}]}},"aggs":{"4":{"terms":{"field":"data.rse","size":500,"order":{"_term":"desc"},"min_doc_count":1},"aggs":{"1":{"sum":{"field":"data.files"}},"3":{"sum":{"field":"data.bytes"}}}}}}\n"""
)
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer %s" % token,
}
grafana = Grafana(url=url, request=data, headers=headers)
raw_data = grafana.download()
pprint.pprint(raw_data)
json_data = grafana.convert(data=raw_data.json())
if grafana.save(file="data/scraped_grafana_datadisk.json", data=json_data):
logger.info("Scraped datadisks from monit grafana")
else:
logger.error("Problem scraping datadisks from monit grafana")
# TODO: not running ES scraper for now since the benchmark jobs are no longer being run
# #get credentials
# password = config.get("credentials_elasticsearch", "password")
# username = config.get("credentials_elasticsearch", "username")
# host = config.get("credentials_elasticsearch", "host")
# arg = ([{'host': host, 'port': 9200}])
# elasticsearch = ElasticSearch(arg,**{'http_auth':(username, password)})
# kwargs = {
# 'index' : "benchmarks-*",
# 'body' : {
# "size" : 10000,"query" : {"match_all" : {},},
# "collapse": {"field": "metadata.PanDAQueue","inner_hits": {"name": "most_recent","size": 50,"sort": [{"timestamp": "desc"}]}
# }
# },
# 'filter_path' : [""]
# }
# raw_data = elasticsearch.download(**kwargs)
# json_data = elasticsearch.convert(data=raw_data)
#
# if elasticsearch.save(file='data/scraped_elasticsearch_benchmark.json', data=json_data):
# logger.info('Scraped benchmark results from ES')
# else:
# logger.error('Problem scraping benchmark results from ES')
else:
# Nothing to do otherwise
print("Dropping out")
if __name__ == "__main__":
try:
run()
except Exception, e:
logger.error("Got error while running scrapers. " + str(e))
msg = "QMonit failed to run a scraper job.\n\nError:\n" + str(e)
subj = "[QMonit error] InfluxDB"
notifications.send_email(
message=msg,
subject=subj,
**{"password": config.get("credentials_adcmon", "password")}
)
|
from MongoNodeService import TxMongoNodeService
from RawStorageService import TxRawStorageService
class TxCms(object):
'''
storageConfig format:
storageConfig = {
'RawStorageService':{
'ssid':'rootdir',
},
}
'''
def __init__(self,mongodb,storagesConfig):
self.__storages = {}
if storagesConfig.has_key('RawStorageService'):
for i,v in storagesConfig['RawStorageService'].iteritems():
self.__storages[i] = TxRawStorageService(v)
self.__nodes = TxMongoNodeService(mongodb.nodes,self.__storages)
def getNodes(self):
return self.__nodes
def getSsids(self):
return self.__storages.keys()
|
from bs4 import BeautifulSoup
import urllib
import requests
import re
#定义一个getHtml()函数
def getHtml(url):
page = urllib.request.urlopen(url) #urllib.urlopen()方法用于打开一个URL地址
html = page.read() #read()方法用于读取URL上的数据
return html
def getImg(link,html):
html = html.decode('utf-8') # python3
reg = r'src="(.+?\.png)"' #正则表达式,得到图片地址
imgre = re.compile(reg) #re.compile() 可以把正则表达式编译成一个正则表达式对象.
imglist = re.findall(imgre,html) #re.findall() 方法读取html 中包含 imgre(正则表达式)的 数据
#把筛选的图片地址通过for循环遍历并保存到本地
#核心是urllib.urlretrieve()方法,直接将远程数据下载到本地,图片通过x依次递增命名
for imgurl in imglist:
imName = imgurl.split('/')[1]
urllib.request.urlretrieve(link+imgurl,r'C:\Users\Administrator\Documents\MATLAB\pic\%s' % imName)
print(imgurl)
link = "http://www.cs.huji.ac.il/~raananf/projects/dehaze_cl/results/#forest/images/cityscape_input.png"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36"}
img_url = requests.get(link, headers=headers)
f = open('02.png', 'wb')
f.write(img_url.content)
f.close()
# html = getHtml(link)
# getImg(link,html) |
import abc
class Controller:
__metaclass__ = abc.ABCMeta
K_UP = 'UP'
K_DOWN = 'DOWN'
K_LEFT = 'LEFT'
K_RIGHT = 'RIGHT'
K_A = 'A'
K_B = 'B'
K_X = 'X'
K_Y = 'Y'
K_START = 'START'
K_BACK = 'BACK'
K_GUIDE = 'GUIDE'
RS_H = 'RS_H'
RS_V = 'RS_V'
LS_H = 'LS_H'
LS_V = 'LS_V'
@abc.abstractmethod
def update(self):
return
@abc.abstractmethod
def is_button_down(self, button_name):
return
@abc.abstractmethod
def is_button_pressed(self, button_name):
return
@abc.abstractmethod
def get_axis(self, axis_name):
return
@abc.abstractmethod
def get_axis_digital_value(self, axis_name):
return
|
import tensorflow as tf
from os import listdir
from os.path import isfile, join
graph_file_name = '/root/projects/dogvscat/model/classify_image_graph_def.pb'
input_dir = '/root/projects/dogvscat/test'
prediction_list = []
labels=['cat', 'dog']
image_files = [f for f in listdir(input_dir) if isfile(join(input_dir, f))]
def predict_on_image(image, labels):
# Unpersists graph from file
with tf.gfile.FastGFile(graph_file_name, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# Read in the image_data
image_data = tf.gfile.FastGFile(image, 'rb').read()
try:
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
prediction = predictions[0]
except:
print("Error making prediction.")
sys.exit()
# Return the label of the top classification.
prediction = prediction.tolist()
max_value = max(prediction)
max_index = prediction.index(max_value)
predicted_label = labels[max_index]
return prediction
for i in range(0,1000):
prediction_list.append(predict_on_image(image_files[i], labels))
prediction_list
|
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.random_normal([784, 784]), name='weight')
b = tf.Variable(tf.random_normal([784]), name = 'bais')
layer1 = tf.sigmoid(tf.matmul(X,W)+b)
W1 = tf.Variable(tf.random_normal([784,784]), name='weight1')
b1 = tf.Variable(tf.random_normal([784]), name = 'bias1')
layer2 = tf.sigmoid(tf.matmul(layer1,W1)+b1)
W2 = tf.Variable(tf.random_normal([784,1], name = 'weight2'))
b2 = tf.Variable(tf.random_normal([1], name = 'bais2'))
hypothesis = tf.sigmoid(tf.matmul(layer2,W2)+b2)
cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(hypothesis), axis = 1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(cost)
is_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
training_epochs = 15
batch_size = 100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ = sess.run([cost, optimizer], feed_dict={X: batch_xs, Y: batch_ys})
avg_cost += c/total_batch
print('Epoch:', '%04d' % (epoch+1), 'cost =', '{:.9f}'.format(avg_cost))
print("Accuracy: ",accuracy.eval(session = sess, feed_dict = {X: mnist.test.images, Y: mnist.test.labels})) |
import numpy
from generalised_least_squares import *
# max for numpy arrays
max_ = numpy.vectorize(lambda x, y: (x, y)[x < y])
class unit_fo(object):
def __call__(self, x):
return 1.0
class linear_fo(object):
def __init__(self, i):
self.__i = i
def __call__(self, x):
return x[self.__i]
class quadratic_fo(object):
def __init__(self, i, j):
self.__i = i
self.__j = j
def __call__(self, x):
return x[self.__i]*x[self.__j]
class n_quadratic_fo(object):
def __init__(self,num_expl_vars):
self.__fos = []
self.__fos.append(unit_fo())
for i in range(num_expl_vars):
self.__fos.append(linear_fo(i))
for j in range(i, num_expl_vars):
self.__fos.append(quadratic_fo(i, j))
self.__n = len(self.__fos)
def __call__(self, alphas, x):
y = 0.0
for i in range(self.__n):
y += alphas[i]*self.__fos[i](x)
return y
def fit_fos(self):
return self.__fos
class fitted_fo(object):
def __init__(self, alphas, fo):
self.__alphas = alphas
self.__fo = fo
def __call__(self, x):
return self.__fo(self.__alphas, x)
def fit(x, y):
if len(x.shape) <> 2:
raise RuntimeError, "Expected 'x' to be 2d array"
if len(y.shape) <> 1:
raise RuntimeError, "Expected 'y' to be 1d array"
num_obs = x.shape[0]
num_expl_vars = x.shape[1]
if num_obs <> y.shape[0]:
raise RuntimeError, "'y' array has wrong size"
fo = n_quadratic_fo(num_expl_vars)
sig = numpy.zeros(num_obs)
sig.fill(1.0)
alphas = generalised_least_squares_fit(y, x, sig, fo.fit_fos())
return fitted_fo(alphas, fo)
def evaluate_regression(x, fo):
if len(x.shape) <> 2:
raise RuntimeError, "Expected 'x' to be a 2d array"
num_obs = x.shape[0]
y = numpy.zeros(num_obs)
for i in range(num_obs):
y[i] = fo(x[i, :])
return y
def pickup_value_regression(ies, ns, vs):
if len(ies.shape) <> 2:
raise RuntimeError, "Expected 'immediate exercise values' to be a 2d array"
if len(ns.shape) <> 2:
raise RuntimeError, "Expected 'numeraires' to be a 2d array"
if len(vs.shape) <> 3:
raise RuntimeError, "Expected 'explanatory variables' to be a 3d array"
num_times = ies.shape[0]
num_obs = ies.shape[1]
num_expl_vars = vs.shape[2]
if ns.shape[0] <> num_times or ns.shape[1] <> num_obs:
raise RuntimeError, "'numeraires' array has wrong size"
if vs.shape[0] <> num_times or vs.shape[1] <> num_obs:
raise RuntimeError, "'explanatory variables' array has wrong size"
fitted_fos = []
zero = numpy.zeros(num_obs)
H = numpy.zeros(num_obs) # holding value
for i in range(num_times-1,-1,-1):
x = vs[i, :, :]
n = ns[i, :]
pv = n*(ies[i, :]-H) # reinflate by numeraire
fit_fo = fit(x, pv)
temp = evaluate_regression(x, fit_fo) # pickup value regression
fitted_fos.insert(0, fit_fo)
H += max_(temp/n, zero) # deflate by numeraire
return fitted_fos
|
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import unittest
from hamcrest import assert_that, greater_than_or_equal_to, raises
from hamcrest import equal_to
from storops_test.vnx.nas_mock import t_nas, patch_nas
from storops.vnx.enums import VNXShareType
from storops.exception import VNXBackendError, VNXInvalidMoverID, \
VNXMoverInterfaceNotAttachedError, VNXMoverInterfaceNotExistsError
from storops.vnx.resource.vdm import VNXVdm
__author__ = 'Jay Xu'
class VNXVdmTest(unittest.TestCase):
@patch_nas
def test_get_all(self):
vdm_list = VNXVdm.get(t_nas())
assert_that(len(vdm_list), greater_than_or_equal_to(1))
dm = next(dm for dm in vdm_list if dm.vdm_id == 2)
self.verify_vdm_2(dm)
@patch_nas
def test_get_by_id_invalid(self):
dm = VNXVdm.get(vdm_id=1, cli=t_nas())
assert_that(dm.existed, equal_to(False))
@patch_nas
def test_get_by_id_2(self):
dm = VNXVdm(vdm_id=2, cli=t_nas())
self.verify_vdm_2(dm)
@patch_nas
def test_get_by_name(self):
dm = VNXVdm.get(name='VDM_ESA', cli=t_nas())
self.verify_vdm_2(dm)
@patch_nas
def test_get_by_name_not_found(self):
dm = VNXVdm(name='not_found', cli=t_nas())
assert_that(dm.existed, equal_to(False))
@staticmethod
def verify_vdm_2(dm):
assert_that(dm.root_fs_id, equal_to(199))
assert_that(dm.mover_id, equal_to(1))
assert_that(dm.name, equal_to('VDM_ESA'))
assert_that(dm.existed, equal_to(True))
assert_that(dm.vdm_id, equal_to(2))
assert_that(dm.state, equal_to('loaded'))
assert_that(dm.status, equal_to('ok'))
assert_that(dm.is_vdm, equal_to(True))
@patch_nas
def test_create_vdm_invalid_mover_id(self):
def f():
VNXVdm.create(t_nas(), 3, 'myVdm')
assert_that(f, raises(VNXInvalidMoverID))
@patch_nas
def test_create_vdm(self):
dm = VNXVdm.create(t_nas(), 2, 'myVdm')
assert_that(dm.name, equal_to('myVdm'))
assert_that(dm.vdm_id, equal_to(3))
assert_that(dm.mover_id, equal_to(2))
assert_that(dm.root_fs_id, equal_to(245))
@patch_nas
def test_delete_vdm(self):
dm = VNXVdm(vdm_id=3, cli=t_nas())
resp = dm.delete()
assert_that(resp.is_ok(), equal_to(True))
@patch_nas
def test_delete_vdm_not_found(self):
def f():
dm = VNXVdm(vdm_id=5, cli=t_nas())
dm.delete()
assert_that(f, raises(VNXBackendError, 'not found'))
@patch_nas
def test_attach_interface_success(self):
dm = VNXVdm(name='myvdm', cli=t_nas())
dm.attach_nfs_interface('1.1.1.1-0')
@patch_nas
def test_attach_interface_not_found(self):
def f():
dm = VNXVdm(name='myvdm', cli=t_nas())
dm.attach_nfs_interface('1.1.1.2-0')
assert_that(f, raises(VNXMoverInterfaceNotExistsError, 'not exist'))
@patch_nas
def test_detach_interface_success(self):
dm = VNXVdm(name='myvdm', cli=t_nas())
dm.detach_nfs_interface('1.1.1.1-0')
@patch_nas
def test_detach_interface_not_found(self):
def f():
dm = VNXVdm(name='myvdm', cli=t_nas())
dm.detach_nfs_interface('1.1.1.2-0')
assert_that(f, raises(VNXMoverInterfaceNotExistsError, 'not exist'))
@patch_nas
def test_detach_interface_not_attached(self):
def f():
dm = VNXVdm(name='myvdm', cli=t_nas())
dm.detach_nfs_interface('1.1.1.3-0')
assert_that(f, raises(VNXMoverInterfaceNotAttachedError, 'attached'))
@patch_nas
def test_get_interfaces(self):
dm = VNXVdm(name='VDM_ESA', cli=t_nas())
ifs = dm.get_interfaces()
assert_that(len(ifs), equal_to(1))
interface = ifs[0]
assert_that(interface.name, equal_to('10-110-24-195'))
assert_that(interface.share_type, equal_to(VNXShareType.NFS))
|
# Author: Spencer Mae-Croft
# Date: 08/31/2020
from name_function import get_formatted_name
print("Enter 'q' at any time to quit the application.")
while True:
first = input("\nPlease enter your first name: ")
if first.lower() == 'q':
break
last = input("\nPlease enter you last name: ")
if last.lower() == 'q':
break
middle = input"\nPlease enter a middle name (enter blank to void): "
if middle:
formatted_name = get_formatted_name(first, last, middle)
else:
formatted_name = get_formatted_name(first,last)
print("\nNeatly formatted name: " + formatted_name + ".")
|
# LeetCode Medium
# Product of Array Except Self Question
# Must be solved in O(n) time and CANNOT use division
class Solution:
# O(n) time
# O(1) space since they don't count return array as extra space
def productExceptSelf(self, nums: List[int]) -> List[int]:
n = len(nums)
ret = [None]*n
# focus on populating the left and right product arrays first
# left array first
left_product = 1
for i in range(n):
if i == 0:
ret[i] = 1
else:
left_product *= nums[i-1]
ret[i] = left_product
right_product = 1
for i in reversed(range(n)):
if i != n-1:
right_product *= nums[i+1]
ret[i] *= right_product
return ret |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MicroPayOrderDetail import MicroPayOrderDetail
class AlipayMicropayOrderGetResponse(AlipayResponse):
def __init__(self):
super(AlipayMicropayOrderGetResponse, self).__init__()
self._micro_pay_order_detail = None
@property
def micro_pay_order_detail(self):
return self._micro_pay_order_detail
@micro_pay_order_detail.setter
def micro_pay_order_detail(self, value):
if isinstance(value, MicroPayOrderDetail):
self._micro_pay_order_detail = value
else:
self._micro_pay_order_detail = MicroPayOrderDetail.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayMicropayOrderGetResponse, self).parse_response_content(response_content)
if 'micro_pay_order_detail' in response:
self.micro_pay_order_detail = response['micro_pay_order_detail']
|
"""
生成随机测试数据
"""
import numpy as np
from config import *
def gen_data(n=config_dense.data_size,
input_dim=config_dense.input_dim,
attention_column=config_dense.attention_column):
"""生成随机数据
数据特征:
x[attention_column] = y
网络应该学习到 y = x[attention_column],这是为了测试 attention 特意构造的数据
Returns:
x: [n, input_dim]
y: [n, 1]
"""
x = np.random.standard_normal(size=(n, input_dim))
y = np.random.randint(low=0, high=2, size=(n, 1))
x[:, attention_column] = y[:, 0]
return x, y
def gen_time_data(n=config_lstm.data_size,
time_steps=config_lstm.time_steps,
input_dim=config_lstm.input_dim,
attention_column=config_lstm.attention_column):
"""生成随机数据
Returns:
x: [n, time_steps, input_dim]
y: [n, 1]
"""
x = np.random.standard_normal(size=(n, time_steps, input_dim))
y = np.random.randint(low=0, high=2, size=(n, 1))
x[:, attention_column, :] = np.tile(y[:], (1, input_dim))
return x, y
|
# -*- coding: utf-8 -*-
from django.db import models
import datetime
from django.utils import timezone
from cms.models import CMSPlugin
class Poll(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.question
def was_published_recently(self):
return self.pub_date >= timezone.now - datetime.timedelta(days=1)
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text
class PollPlugin(CMSPlugin):
poll = models.ForeignKey('polls.Poll', related_name='plugins')
def __unicode__(self):
return self.poll.question
|
vars = Variables()
vars.Add(PackageVariable('boost', 'boost installation directory (should contain boost/ and lib/)', 'yes'))
vars.Add('compiler', 'compiler command to use', 'g++')
env = Environment(variables = vars)
if env['boost'] == True:
dir = '/usr/local/include'
env['boost'] = dir
if env['boost']:
env.Append(CPPPATH='$boost/include')
env.Append(LIBPATH='$boost/lib')
env.Replace(CXX = '$compiler')
Help(vars.GenerateHelpText(env))
target = 'bawt'
buildDirectory = '.build'
SConscript('src/SConscript', exports='env target', variant_dir = buildDirectory , duplicate = 0 )
Clean('.', Glob("*~") + Glob('*/*~') + [ buildDirectory ] )
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import random
doc = """
The English registration form for Public Goods Game
"""
class Constants(BaseConstants):
name_in_url = 'PGGRegiEN'
players_per_group = 4
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
def role(self):
if self.id_in_group in [1, 2]:
return 'low'
else:
return 'high'
rule = models.PositiveIntegerField(
choices=[
[1, 'Equal sharing of the bonus'],
[2, 'Equal payoff'],
[3, 'No preference'],
],
widget=widgets.RadioSelect()
)
rulestr = models.PositiveIntegerField(
choices=[
[1, 'Seems most fair'],
[2, 'Easier to understand'],
[3, 'Payoffs should not be differentiated'],
[4, 'Uncertain about contributions of other players'],
[5, 'No preference'],
],
widget=widgets.RadioSelect()
)
|
from django.db import models
class Comments(models.Model):
text = models.TextField("Комментарий")
created = models.DateTimeField("Дата добавления", auto_now_add=True, null=True)
class Meta:
verbose_name = "Комментарий"
verbose_name_plural = 'Коментарии'
def __str__(self):
return self.text |
# -*- coding: utf-8 -*-
import scrapy
class TopSeriesWeekSpider(scrapy.Spider):
name = 'top_series_week'
start_urls = ['http://www.adorocinema.com/series-tv/top/']
def parse(self, response):
series = response.xpath('//a[@class="meta-title-link"][contains(@href, "/series/serie")]')
for serie in series:
serie_title = serie.xpath('./text()').extract_first()
serie_link = serie.xpath('./@href').extract_first()
yield scrapy.Request(
url=response.urljoin(serie_link),
callback=self.parse_series,
meta={
'serie_title': serie_title,
}
)
def parse_series(self, response):
last_episode= response.xpath('//div[contains(@class, "prev-episode")]')
next_episode= response.xpath('//div[@class="card-entity card-episode row row-col-padded-10"]')
if last_episode:
last_episode_title = last_episode.xpath('.//div[@class="meta-title"]//span/text()').extract_first(),
last_episode_date = last_episode.xpath('.//div[@class="meta-body"]//strong/following-sibling::text()').extract_first().replace(', ', '')
else:
last_episode_title = 'N/A'
last_episode_date = 'N/A'
if next_episode:
next_episode_title = next_episode.xpath('.//div[@class="meta-title"]//span/text()').extract_first()
next_episode_date = next_episode.xpath('.//div[@class="meta-body"]//strong/following-sibling::text()').extract_first().replace(', ','')
else:
next_episode_title = 'N/A'
next_episode_date = 'N/A'
yield{
'Title': response.meta.get('serie_title'),
'Description': response.xpath('//div[contains(@class, "content-txt")]/text()').extract_first(),
'Seasons': response.xpath('//div[@class="stats-info"][contains(text(), "Temporadas")]/preceding-sibling::div/text()').extract_first(),
'Episodes': response.xpath('//div[@class="stats-info"][contains(text(), "Epis")]/preceding-sibling::div/text()').extract_first(),
'Last EP': last_episode_title,
'Last EP date': last_episode_date,
'Next EP': next_episode_title,
'Next EP date': next_episode_date,
'Serie link': response.url
}
|
from base.vector3 import Vector3
from scene.objects.transformablesceneobject import TransformableSceneObject
class Screen(TransformableSceneObject):
floats_per_vertex = 5
chars_per_vertex = 0
bytes_per_vertex = floats_per_vertex*4 + chars_per_vertex*1
def __init__(self, a, b, c, d):
TransformableSceneObject.__init__(self)
self.corners = [a, b, c, d]
@property
def num_verts(self):
return 6
@property
def shader_info(self):
shader_data = [
self.corners[0][0], self.corners[0][1], self.corners[0][2], 0.0, 1.0,
self.corners[1][0], self.corners[1][1], self.corners[1][2], 1.0, 1.0,
self.corners[2][0], self.corners[2][1], self.corners[2][2], 0.0, 0.0,
self.corners[2][0], self.corners[2][1], self.corners[2][2], 0.0, 0.0,
self.corners[1][0], self.corners[1][1], self.corners[1][2], 1.0, 1.0,
self.corners[3][0], self.corners[3][1], self.corners[3][2], 1.0, 0.0,
]
return shader_data
@staticmethod
def flat(self, location, width, height):
w = width/2
h = height/2
return Screen(
Vector3(location[0] - width, 0.0, location[0] + height),
Vector3(location[0] + width, 0.0, location[0] + height),
Vector3(location[0] - width, 0.0, location[0] - height),
Vector3(location[0] + width, 0.0, location[0] - height)
)
|
# coding=utf-8
from flask import Flask
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return '<h1>Index</h1>'
@app.route('/hello', methods=['GET'])
def hello():
return '<h1>Hello</h1>'
if __name__ == '__main__':
app.run()
|
from flask import Flask, render_template
from flask_sockets import Sockets
from GDT import *
import json, yaml
app = Flask(__name__)
sockets = Sockets(app)
config = yaml.safe_load(open('config.yml', 'r'))
gdt = GDT(config['db']['connection'], config['db']['datatype'],
config['coordinates']['sw'], config['coordinates']['ne'])
@app.route('/')
def root():
return render_template('map.html')
@sockets.route('/tweets')
def send_tweets(ws):
result = gdt._table.all()
for item in result:
item['timestamp'] = str(item['timestamp'])
ws.send(json.dumps(item))
if __name__ == '__main__':
app.debug = True
app.run()
|
import configparser
from selenium import webdriver
import os.path
from framework.logger import Logger
import time
logger = Logger(logger="BrowserEngine").getlog()
class BrowserEngine(object):
dir = os.path.dirname(os.path.abspath('.'))#获取相对路径方法
chrome_driver_path = dir +'/tools/chromedriver.exe'
def __init__(self,driver):
self.driver = driver
#read the browser type from config.ini file ,return the driver
def open_browser(self,driver):
config = configparser.ConfigParser()
file_path = os.path.dirname(os.path.abspath('.'))+'/config/config.ini'
config.read(file_path)
browser = config.get("browserType","browserName")
logger.info("You had select %s borwser." %browser)
url = config.get("testServer","URL")
logger.info("The test server url is %s."% url)
if browser =="Firefox":
driver = webdriver.Firefox(self.firefox_driver_path)
logger.info("Starting firefox browser")
elif browser =="Chrome":
driver=webdriver.Chrome(self.chrome_driver_path)
logger.info("Starting chrome browser")
elif browser =="IE":
driver = webdriver.Chrome(self.ie_driver_path)
logger.info("Starting ie browser")
driver.get(url)
logger.info("Open url:%s"%url)
driver.maximize_window()
logger.info("Maximize the current window")
driver.implicitly_wait(10)
logger.info("Set implicitly wait 10 seconds")
return driver
def quit_browser(self,driver):
logger.info("Now Close and quit the borwser")
driver.quit()
|
#!/usr/env python
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
import hashlib
import time
import re
from struct import *
import random
from datetime import datetime
from util import *
from hashdb import *
from getinfo import *
import Queue
class BittorrentProtocol(DatagramProtocol):
max_tasks = 10
min_port = 6882
max_port = 6891
def __init__(self, bootnodes=()):
self.id = gen_id()
self.sessions = {}
self.nodes = {}
self.bootnodes = bootnodes
self.unvisitednodes = []
for host, port in bootnodes:
self.unvisitednodes.append((host, port))
self.hashdb = HashDB()
self.hashq = Queue.Queue()
self.tasks = {}
self.portmap = {}
for i in xrange(BittorrentProtocol.min_port, BittorrentProtocol.max_port+1):
self.portmap[i] = None
def startProtocol(self):
self.lc = LoopingCall(self.loop)
self.lc.start(3)
def stopProtocol(self):
self.lc.stop()
self.hashdb.release()
def write(self, ip, port, data):
self.transport.write(data, (ip, port))
def datagramReceived(self, data, (host, port)):
data = bytes(data)
bd = bdecode(data)
if bd == None:
# self.taskmgr.receive(data, (host, port))
return
rmsg, rm = bd
tid = rmsg['t']
if rmsg['y'] == 'r':
if (tid in self.sessions) == False:
return
mtype = self.sessions[tid]
del self.sessions[tid]
if mtype == 'ping':
self.nodes[rmsg['r']['id']] = (host, port)
elif mtype == 'find_node':
self.handle_rfindnode(rmsg)
# reactor.callLater(int(random.random()*10), self.handle_rfindnode, rmsg)
elif mtype == 'get_peers':
pass
elif mtype == 'announce_peer':
pass
elif rmsg['y'] == 'q':
if rmsg['q'] == 'ping':
self.nodes[rmsg['a']['id']] = (host, port)
self.rping(tid, (host, port))
elif rmsg['q'] == 'find_node':
self.rfind_node(tid, rmsg['a']['target'], (host, port))
elif rmsg['q'] == 'get_peers':
self.rget_peers(tid, rmsg['a']['info_hash'], (host, port))
self.found_hash(rmsg['a']['info_hash'])
elif rmsg['q'] == 'announce_peer':
self.rannounce_peer(tid, (host, port))
self.found_hash(rmsg['a']['info_hash'])
def find_node(self, target, (host, port)):
tid = gentid()
self.sessions[tid] = 'find_node'
msg = {
"t": tid,
"y": "q",
"q": "find_node",
"a": {
"id": self.id,
"target": target,
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def rfind_node(self, tid, target, (host, port)):
nodes = ''
k = 8
for i in self.nodes:
if k == 0:
break
k -= 1
h, p = self.nodes[i]
nodes += i
bytes = map(int, h.split('.'))
for b in bytes:
nodes += pack('B', b)
nodes += pack('>H', p)
msg = {
"t": tid,
"y": "r",
"r": {
"id": self.id,
"nodes": nodes,
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def ping(self, (host, port)):
tid = gentid()
self.sessions[tid] = 'ping'
msg = {
"t": tid,
"y": "q",
"q": "ping",
"a": {
"id": self.id
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def rping(self, tid, (host, port)):
msg = {
"t": tid,
"y": "r",
"r": {
"id": self.id
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def get_peers(self, info_hash, (host, port)):
tid = gentid()
self.sessions[tid] = 'get_peers'
msg = {
"t": tid,
"y": "q",
"q": "get_peers",
"a": {
"id": self.id,
"info_hash": info_hash
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def rget_peers(self, tid, info_hash, (host, port)):
nodes = ''
k = 8
for i in self.nodes:
if k == 0:
break
k -= 1
h, p = self.nodes[i]
nodes += i
bytes = map(int, h.split('.'))
for b in bytes:
nodes += pack('B', b)
nodes += pack('>H', p)
msg = {
"t": tid,
"y": "r",
"r": {
"id": self.id,
"token": gen_id(),
"nodes": nodes,
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def handle_rgetpeers(self, info_hash, rmsg):
if 'nodes' in rmsg['r']:
for i in xrange(0, len(rmsg['r']['nodes']), 26):
nid, compact = unpack('>20s6s', rmsg['r']['nodes'][i:i+26])
ip, port = decompact(compact)
if 'values' in rmsg['r']:
for compact in rmsg['r']['values']:
if len(compact) != 6:
continue
ip, port = decompact(compact)
# self.taskmgr.new_task(info_hash, ip, port)
print hexstr(info_hash), ip, port
def announce_peer(self, info_hash, (host, port)):
# TODO
pass
def rannounce_peer(self, tid, (host, port)):
msg = {
"t": tid,
"y": "r",
"r": {
"id": self.id
}
}
bmsg = bencode(msg)
reactor.resolve(host).addCallback(self.write, port, bmsg)
def handle_rfindnode(self, rmsg):
if ('nodes' in rmsg['r']) == False:
return
nodes = rmsg['r']['nodes']
for i in xrange(0, len(nodes), 26):
info = nodes[i:i+26]
nid, compact = unpack('>20s6s', info)
ip, port = decompact(compact)
if (nid in self.nodes) == False:
# self.find_node(gen_id(), (ip, port))
# reactor.callLater(random.random()*60, self.find_node, gen_id(), (ip, port))
self.unvisitednodes.append((ip, port))
self.nodes[nid] = (ip, port)
def found_hash(self, info_hash):
if self.hashdb.exist(info_hash):
return
self.hashq.put(info_hash)
def loop(self):
t = 16
while t > 0:
t -= 1
if len(self.unvisitednodes) == 0:
break
host, port = self.unvisitednodes.pop()
self.find_node(gen_id(), (host, port))
for info_hash in self.tasks.keys():
port, task = self.tasks[info_hash]
if task.finish():
self.hashdb.insert_hash(hexstr(info_hash), task.get_result())
if task.timeout() or task.finish():
self.portmap[port].stopListening()
self.portmap[port] = None
del self.tasks[info_hash]
while len(self.tasks) < BittorrentProtocol.max_tasks and not self.hashq.empty():
info_hash = self.hashq.get()
if info_hash in self.tasks:
continue
task = GetinfoProtocol(info_hash, self.bootnodes)
for p in self.portmap.keys():
if self.portmap[p] == None:
self.portmap[p] = reactor.listenUDP(p, task)
self.tasks[info_hash] = (p, task)
break
def monitor(p):
print "[%s] got %d nodes" % (datetime.now(), len(p.nodes))
def main():
boots = (('router.bittorrent.com', 6881),)
p = BittorrentProtocol(boots)
# lc = LoopingCall(monitor, p)
# lc.start(5)
reactor.listenUDP(6881, p)
reactor.run()
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.7 on 2021-04-12 21:26
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0004_auto_20210412_2355'),
]
operations = [
migrations.AddField(
model_name='note',
name='urgent',
field=models.BooleanField(default=0, verbose_name='Важно или нет'),
),
migrations.AlterField(
model_name='note',
name='date_complete',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 14, 0, 26, 6, 621642), verbose_name='Когда выполнить'),
),
]
|
#!/usr/bin/python
"""
Parse YNAB4's budget data to work out how much is left in the current month.
Designed for an Alfred 2 Workflow
Written by James Seward 2013-07; http://jamesoff.net; @jamesoff
Thanks to @ppiixx for pointing out/fixing the rollover problem :)
BSD licenced, have fun.
Uses the alp library from https://github.com/phyllisstein/alp; thanks Daniel!
"""
import json
import datetime
import os.path
import datetime
import locale
import alp
def handle_error(title, subtitle, icon = "icon-no.png", debug = ""):
i = alp.Item(title = title, subtitle = subtitle, icon = icon)
alp.feedback(i)
alp.log("Handled error: %s, %s\n%s" % (title, subtitle, debug))
sys.exit(0)
def find_budget(path):
# Look in the ymeta file to find our data directory
try:
fh = open(os.path.join(path, "Budget.ymeta"), "r")
info = json.load(fh)
fh.close()
except Exception, e:
if fp:
fp.close()
handle_error("Unable to find budget file :(", path, "icon-no.png", e)
folder_name = info["relativeDataFolderName"]
# Now look in the devices folder, and find a folder which has full knowledge
devices_path = os.path.join(path, folder_name, "devices")
devices = os.listdir(devices_path)
use_folder = ""
try:
for device in devices:
fh = open(os.path.join(devices_path, device))
device_info = json.load(fh)
if device_info["hasFullKnowledge"]:
use_folder = device_info["deviceGUID"]
break
except Exception, e:
handle_error("Unable to read budget data", "Parse error looking for full knowledge", "icon-no.png", e)
if use_folder == "":
handle_error("Unable to find usable budget data", "", "icon-no.png")
return os.path.join(path, folder_name, use_folder)
def load_budget(path):
try:
fp = open(os.path.join(path, "Budget.yfull"), "r")
data = json.load(fp)
fp.close()
except Exception, e:
if fp:
fp.close()
handle_error("Unable to find budget file :(", path, "icon-no.png", e)
return data
def get_currency_symbol(data):
try:
currency_locale = data["budgetMetaData"]["currencyLocale"]
locale.setlocale(locale.LC_ALL, locale.normalize(currency_locale))
except Exception, e:
pass
def all_categories(data):
all = []
try:
master_categories = data["masterCategories"]
for master_category in master_categories:
if master_category["name"] in ["Pre-YNAB Debt", "Hidden Categories"]:
continue
sub_categories = master_category["subCategories"]
if sub_categories != None:
for sub_category in master_category["subCategories"]:
if "isTombstone" in sub_category and sub_category["isTombstone"]:
continue
all.append({"entityId": sub_category["entityId"], "name": sub_category["name"]})
except Exception, e:
handle_error("Error reading budget categories", "", "icon-no.png", e)
return all
def find_category(data, category_name):
entityId = ""
try:
master_categories = data["masterCategories"]
for master_category in master_categories:
sub_categories = master_category["subCategories"]
if sub_categories != None:
for sub_category in master_category["subCategories"]:
if sub_category["name"] == category_name and not "isTombstone" in sub_category and not sub_category["isTombstone"]:
entityId = sub_category["entityId"]
break
if entityId != "":
break
if entityId == "":
pass
except Exception, e:
pass
if entityId == "":
handle_error("Error finding budget category", "", "icon-no.png", e)
return entityId
def find_budgeted(data, entityId):
budgeted = 0
try:
monthly_budgets = data["monthlyBudgets"]
monthly_budgets = sorted(monthly_budgets, key=lambda k: k["month"])
now = datetime.date.today()
for budget in monthly_budgets:
year = int(budget["month"][0:4])
month = int(budget["month"][5:7])
budget_month = datetime.date(year, month, 1)
if budget_month > now:
# Now we've reached the future so time to stop
break
subcategory_budgets = budget["monthlySubCategoryBudgets"]
for subcategory_budget in subcategory_budgets:
if subcategory_budget["categoryId"] == entityId:
budgeted += subcategory_budget["budgeted"]
except Exception, e:
handle_error("Error finding budget value", "", "icon-no.png", e)
return budgeted
def walk_transactions(data, categoryId, balance):
try:
transactions = data["transactions"]
for transaction in transactions:
# Check for subtransactions
if transaction["categoryId"] == "Category/__Split__":
for sub_transaction in transaction["subTransactions"]:
if sub_transaction["categoryId"] == categoryId and not "isTombstone" in sub_transaction:
balance += sub_transaction["amount"]
else:
if transaction["categoryId"] == categoryId and not "isTombstone" in transaction:
balance += transaction["amount"]
except Exception, e:
handle_error("Error finding budget balance", "", "icon-no.png", e)
return balance
def check_for_budget(path):
result_path = ""
if os.path.exists(path):
sub_folders = os.listdir(path)
if ".DS_Store" in sub_folders:
sub_folders.remove(".DS_Store")
if "Exports" in sub_folders:
sub_folders.remove("Exports")
if len(sub_folders) == 1:
path = os.path.join(path, sub_folders[0])
result_path = find_budget(path)
return result_path
if __name__ == "__main__":
# If we have a setting for the location, use that
s = alp.Settings()
path = s.get("budget_path", "")
if not path == "":
path = find_budget(path)
# Else, we guess...
# First we look in Dropbox
if path == "":
path = check_for_budget(os.path.expanduser("~/Dropbox/YNAB"))
# Then we look locally
if path == "":
path = check_for_budget(os.path.expanduser("~/Documents/YNAB"))
# Then we give up
if path == "":
handle_error("Unable to guess budget location", "Use Alfred's File Action on your budget file to configure", "icon-no.png")
# Load data
data = load_budget(path)
get_currency_symbol(data)
all = all_categories(data)
query = alp.args()[0]
results = alp.fuzzy_search(query, all, key = lambda x: '%s' % x["name"])
items = []
for r in results:
# Find category ID matching our requirement
entityId = r["entityId"]
if entityId == "":
pass
else:
# Find the starting balance of our category
starting_balance = find_budgeted(data, entityId)
# Replay the transactions
ending_balance = walk_transactions(data, entityId, starting_balance)
if ending_balance == None:
ending_balance = 0
if ending_balance < 0:
ending_text = "Overspent on %s this month!"
icon = "icon-no.png"
elif ending_balance == 0:
ending_text = "No budget left for %s this month"
icon = "icon-no.png"
else:
ending_text = "Remaining balance for %s this month"
icon = "icon-yes.png"
try:
i = alp.Item(title=locale.currency(ending_balance, True, True).decode("latin1"), subtitle = ending_text % r["name"], uid = entityId, valid = False, icon = icon)
except Exception, e:
i = alp.Item(title="%0.2f" % ending_balance, subtitle = ending_text % r["name"], uid = entityId, valid = False, icon = icon)
items.append(i)
alp.feedback(items)
|
#!/usr/bin/env python
import modeltools.hycom
import modeltools.tools
import argparse
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
import abfile
import numpy
import netCDF4
import logging
import re
import cfunits
import os
import os.path
# Set up logger
_loglevel=logging.INFO
logger = logging.getLogger(__name__)
logger.setLevel(_loglevel)
formatter = logging.Formatter("%(asctime)s - %(name)10s - %(levelname)7s: %(message)s")
ch = logging.StreamHandler()
ch.setLevel(_loglevel)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate=False
def check_grids(plon,plon2,plat,plat2) :
# Check grids match
maxdlon = numpy.amax(numpy.abs(plon -plon2 ))
maxdlat = numpy.amax(numpy.abs(plat -plat2 ))
if maxdlon > 1e-4 or maxdlat > 1e-4 :
msg="grid file mismatch max lon diff =%g , max lat diff = %g"%(maxdlon,maxdlat)
logger.error(msg)
raise ValueError,msg
def check_depths(depth,depth2):
# Check depths match. NB: Since central region can be filled, we only check
# where depth > 0
tmp1=depth>.1
tmp2=depth2>.2
tmp1=numpy.logical_and(tmp1,tmp2)
maxddep = numpy.amax(numpy.abs(depth-depth2)[tmp1])
if maxddep > 1e-4 :
msg="depth file mismatch max diff =%g , max lat diff = %g"%(maxddep)
logger.error(msg)
raise ValueError,msg
def cf_time_to_datetime(times,time_units) :
# Time processing
tmp=cfunits.Units(time_units)
refy, refm, refd=(1950,1,1) # Reference time for this routine
tmp2=cfunits.Units("seconds since %d-%d-%d 00:00:00"%(refy,refm,refd)) # Units from CF convention
tmp3=cfunits.Units.conform(times,tmp,tmp2) # Transform to new new unit (known to this routine)
# Then calculate dt. Phew!
mydt = [ datetime.datetime(refy,refm,refd,0,0,0) +
datetime.timedelta(seconds=int(elem)) for elem in tmp3]
return mydt
def diff_in_seconds(deltat) :
return deltat.days*86400. + deltat.seconds
def main(tide_file,archv_files,include_uv=False):
# 1) If this routine is called without any archive files (empty list), then
# Files suitable for barotropic nesting only are created. The new archive files are then
# chosen to match times in tide file.
# 2) If routines are called with archive files, then times matching the archive file times are
# sought from the tide file. It they are found, srfhgt and montg1 are adjusted
# to match the new tidal data.
# Read plon,plat and depth from regional files. Mainly used to check that
# grid is ok ...
logger.info("Opening regional.grid.[ab]")
gfile=abfile.ABFileGrid("regional.grid","r")
plon=gfile.read_field("plon")
plat=gfile.read_field("plat")
pang=gfile.read_field("pang") # For rotation of tidal current
gfile.close()
logger.info("Opening regional.depth.[ab]")
bathyfile=abfile.ABFileBathy("regional.depth","r",idm=gfile.idm,jdm=gfile.jdm,mask=True)
depth=bathyfile.read_field("depth")
bathyfile.close()
depth = depth.filled(0.)
ip=depth>0.0
iu=numpy.copy(ip)
iu[:,1:] = numpy.logical_and(iu[:,1:],iu[:,0:-1])
iv=numpy.copy(ip)
iv[1:,:] = numpy.logical_and(iv[1:,:],iv[0:-1,:])
# Open netcdf file, get time variable and some basic stuff
print os.getcwd(),tide_file
logger.info("Opening %s"%tide_file)
nc_h = netCDF4.Dataset(tide_file,"r")
plon_h=nc_h.variables["longitude"][:]
plat_h=nc_h.variables["latitude"][:]
depth_h=nc_h.variables["depth"][:]
check_grids(plon,plon_h,plat,plat_h)
check_depths(depth,depth_h)
# Time processing for tidal elevations
time_h=nc_h.variables["time"][:]
tunit = nc_h.variables["time"].units
mydt_h = cf_time_to_datetime(time_h,tunit)
if include_uv :
m=re.match("^(.*)_h.nc$",tide_file)
if m :
tide_file_u = m.group(1)+"_u.nc"
else :
msg="Unable to guesstimate tidal u component from tidsl heights file %s "%tide_file_h
logger.error(msg)
raise ValueError,msg
m=re.match("^(.*)_h.nc$",tide_file)
if m :
tide_file_v = m.group(1)+"_v.nc"
else :
msg="Unable to guesstimate tidal u component from tidsl heights file %s "%tide_file_h
logger.error(msg)
raise ValueError,msg
logger.info("Opening %s"%tide_file_u)
nc_u = netCDF4.Dataset(tide_file_u,"r")
plon_u=nc_u.variables["longitude"][:]
plat_u=nc_u.variables["latitude"][:]
depth_u=nc_u.variables["depth"][:]
check_grids(plon,plon_u,plat,plat_u)
check_depths(depth,depth_u)
# Time processing for tidal elevations
time_u=nc_u.variables["time"][:]
tunit = nc_u.variables["time"].units
mydt_u = cf_time_to_datetime(time_u,tunit)
logger.info("Opening %s"%tide_file_v)
nc_v = netCDF4.Dataset(tide_file_v,"r")
plon_v=nc_v.variables["longitude"][:]
plat_v=nc_v.variables["latitude"][:]
depth_v=nc_v.variables["depth"][:]
check_grids(plon,plon_v,plat,plat_v)
check_depths(depth,depth_v)
# Time processing for tidal elevations
time_v=nc_v.variables["time"][:]
tunit = nc_v.variables["time"].units
mydt_v = cf_time_to_datetime(time_v,tunit)
# restriction for now, u and v must have same time steps as h
# TODO: Loosen restriction
try :
difftu=[abs(diff_in_seconds(elem[0]-elem[1])) for elem in zip(mydt_h,mydt_u)]
difftv=[abs(diff_in_seconds(elem[0]-elem[1])) for elem in zip(mydt_h,mydt_v)]
except:
# Probably due to size mismatch, but could be more descriptive.
# TODO: Add more descriptive error message
msg="Error when subtracting times from u/v from h. Check your data"
logger.error(msg)
raise ValueError,msg
#print difftu
#print difftv
if any([ elem > 10. for elem in difftu]) or any([ elem > 10. for elem in difftv]):
msg="Times in tidal u/v vs tidal h mismatch. Time series must be estimated at the same times"
logger.error(msg)
raise ValueError,msg
# Create output dir.
path0=os.path.join(".","archv_with_tide")
if os.path.exists(path0) and os.path.isdir(path0) :
pass
else :
os.mkdir(path0)
# Open blkdat files. Get some properties
bp=modeltools.hycom.BlkdatParser("blkdat.input")
idm = bp["idm"]
jdm = bp["jdm"]
kdm = bp["kdm"]
thflag = bp["thflag"]
thbase = bp["thbase"]
kapref = bp["kapref"]
iversn = bp["iversn"]
iexpt = bp["iexpt"]
yrflag = bp["yrflag"]
thref=1e-3
if kapref == -1 :
kapnum = 2
msg="Only kapref>=0 is implemented for now"
logger.error(msg)
raise ValueError,msg
else :
kapnum = 1
if kapnum > 1 :
msg="Only kapnum=1 is implemented for now"
logger.error(msg)
raise ValueError,msg
# hycom sigma and kappa, written in python. NB: sigver is not used here.
# Modify to use other equations of state. For now we assume sigver is:
# 1 (7-term eqs referenced to 0 bar)
# 2 (7-term eqs referenced to 2000 bar)
if thflag == 0 :
sigver=1
else :
sigver=2
sig = modeltools.hycom.Sigma(thflag)
if kapref > 0 : kappa = modeltools.hycom.Kappa(kapref,thflag*1000.0e4) #
# Now loop through tide_times
for rec,tide_time in enumerate(mydt_h) :
# Construct archive file name to create
iy = tide_time.year
id,ih,isec = modeltools.hycom.datetime_to_ordinal(tide_time,yrflag)
archv_file_in_string = "archv.%04d_%03d_%02d"%(iy,id,ih)
# Is there match for this file name in list of archive files?
I=[elem for elem in archv_files if os.path.basename(elem)[:17] == archv_file_in_string ]
state_from_archv=len(I)>0
if state_from_archv : archv_file_in =I[0]
# Output file name
fnameout = os.path.join(path0,os.path.basename(archv_file_in_string))
arcfile_out=abfile.ABFileArchv(fnameout,"w",
iversn=iversn,
yrflag=yrflag,
iexpt=iexpt,mask=False,
cline1="TIDAL data has been added")
tide_h=numpy.copy(nc_h.variables["h"][rec,:,:])
tide_h=numpy.where(tide_h==nc_h.variables["h"]._FillValue,0.,tide_h)
#print tide_h.min(),tide_h.max()
if include_uv :
tide_u=numpy.copy(nc_u.variables["u"][rec,:,:])
tide_v=numpy.copy(nc_v.variables["v"][rec,:,:])
#print tide_u.min(),tide_u.max()
#print tide_v.min(),tide_u.max()
tide_u=numpy.where(tide_u==nc_u.variables["u"]._FillValue,0.,tide_u)
tide_v=numpy.where(tide_v==nc_v.variables["v"]._FillValue,0.,tide_v)
# Rotate vectors to align with grid
tide_u= tide_u*numpy.cos(pang) + tide_v*numpy.sin(pang)
tide_v=-tide_u*numpy.sin(pang) + tide_v*numpy.cos(pang) #tide_v=tide_u*numpy.cos(pang+.5*numpy.pi) + tide_v*numpy.sin(pang+.5*numpy.pi)
# From P-point to u. 2nd dim in python = 1st dim in Fortran
tide_u[:,1:] =.5*(tide_u[:,1:] + tide_u[:,0:-1])
tide_u=numpy.where(iu,tide_u,0.)
# From P-point to v. 1st dim in python = 2nd dim in Fortran
tide_v[1:,:] =.5*(tide_v[1:,:] + tide_v[0:-1,:])
tide_v=numpy.where(iv,tide_v,0.)
if state_from_archv :
logger.info("Adding tidal values to existing state:%s"%arcfile_out.basename)
arcfile=abfile.ABFileArchv(archv_file_in,"r")
if arcfile.idm <> plon.shape[1] or arcfile.jdm <> plon.shape[0] :
msg="Grid size mismatch between %s and %s "%(tide_file,archv_file_in)
# Read all layers .. (TODO: If there are memory problems, read and estimate sequentially)
temp = numpy.ma.zeros((jdm,idm)) # Only needed when calculating density
saln = numpy.ma.zeros((jdm,idm)) # Only needed when calculating density
th3d =numpy.ma.zeros((kdm,jdm,idm))
thstar=numpy.ma.zeros((kdm,jdm,idm))
dp =numpy.ma.zeros((jdm,idm))
p =numpy.ma.zeros((kdm+1,jdm,idm))
logger.info("Reading layers to get thstar and p")
for k in range(kdm) :
logger.debug("Reading layer %d from %s"%(k,archv_file_in))
temp =arcfile.read_field("temp",k+1)
saln =arcfile.read_field("salin",k+1)
#dp [k ,:,:]=arcfile.read_field("thknss",k+1)
dp [:,:]=arcfile.read_field("thknss",k+1)
th3d [k ,:,:]=sig.sig(temp,saln) - thbase
p [k+1,:,:]= p[k,:,:] + dp[:,:]
thstar[k ,:,:]=numpy.ma.copy(th3d [k ,:,:])
if kapref > 0 :
thstar[k ,:,:]=thstar [k ,:,:] + kappa.kappaf(
temp[:,:], saln[:,:], th3d[k,:,:]+thbase, p[k,:,:])
elif kapref < 0 :
msg="Only kapref>=0 is implemented for now"
logger.error(msg)
raise ValueError,msg
# Read montg1 and srfhgt, and set new values
# ... we have ...
# montg1 = montgc + montgpb * pbavg
# srfhgt = montg1 + thref*pbavg
# ...
montg1 = arcfile.read_field("montg1",thflag)
srfhgt = arcfile.read_field("srfhgt",0)
# New surface height -
montg1pb=modeltools.hycom.montg1_pb(thstar,p)
montg1 = montg1 + montg1pb * modeltools.hycom.onem * tide_h
srfhgt = montg1 + thref*tide_h*modeltools.hycom.onem
# Barotrpic velocities
if include_uv :
ubavg = arcfile.read_field("u_btrop",0)
vbavg = arcfile.read_field("v_btrop",0)
ubavg = ubavg + tide_u
vbavg = vbavg + tide_v
# Loop through original fields and write
for key in sorted(arcfile.fields.keys()) :
fieldname = arcfile.fields[key]["field"]
time_step = arcfile.fields[key]["step"]
model_day = arcfile.fields[key]["day"]
k = arcfile.fields[key]["k"]
dens = arcfile.fields[key]["dens"]
fld =arcfile.read_field(fieldname,k)
if fieldname == "montg1" :
logger.info("Writing field %10s at level %3d to %s (modified)"%(fieldname,k,fnameout))
arcfile_out.write_field(montg1,None,fieldname,time_step,model_day,sigver,thbase)
elif fieldname == "srfhgt" :
logger.info("Writing field %10s at level %3d to %s (modified)"%(fieldname,k,fnameout))
arcfile_out.write_field(srfhgt,None,fieldname,time_step,model_day,sigver,thbase)
elif fieldname == "u_btrop" and include_uv :
logger.info("Writing field %10s at level %3d to %s (modified)"%(fieldname,k,fnameout))
arcfile_out.write_field(ubavg,None,fieldname,time_step,model_day,sigver,thbase)
elif fieldname == "v_btrop" and include_uv :
logger.info("Writing field %10s at level %3d to %s (modified)"%(fieldname,k,fnameout))
arcfile_out.write_field(vbavg,None,fieldname,time_step,model_day,sigver,thbase)
else :
arcfile_out.write_field(fld ,None,fieldname,time_step,model_day,k,dens)
#logger.info("Writing field %10s at level %3d to %s (copy from original)"%(fieldname,k,fnameout))
arcfile.close()
else :
logger.info("Crating archv file with tidal data :%s"%arcfile_out.basename)
montg1=numpy.zeros((jdm,idm,))
srfhgt=tide_h*modeltools.hycom.onem*thref
arcfile_out.write_field(montg1,None,"montg1",0,0.,sigver,thbase)
arcfile_out.write_field(srfhgt,None,"srfhgt",0,0.,0,0.0)
# Write 9 empty surface fields so that forfun.F can understand these files .... TODO: Fix in hycom
arcfile_out.write_field(montg1,None,"surflx",0,0.,0,0.0)
arcfile_out.write_field(montg1,None,"salflx",0,0.,0,0.0)
arcfile_out.write_field(montg1,None,"bl_dpth",0,0.,0,0.0)
arcfile_out.write_field(montg1,None,"mix_dpth",0,0.,0,0.0)
if include_uv :
ubavg = tide_u
vbavg = tide_v
arcfile_out.write_field(ubavg ,None,"u_btrop" ,0,0.,0,0.0)
arcfile_out.write_field(vbavg ,None,"v_btrop" ,0,0.,0,0.0)
logger.info("Finished writing to %s"%fnameout)
arcfile_out.close()
logger.info("Files containing tidal data in directory %s"%path0)
logger.warning("Sigver assumed to be those of 7 term eqs")
logger.warning(" 1 for sigma-0/thflag=0, 2 for sigma-2/thflag=2")
if __name__ == "__main__" :
class PointParseAction(argparse.Action) :
def __call__(self, parser, args, values, option_string=None):
tmp = values[0].split(",")
tmp = [float(elem) for elem in tmp[0:2]]
tmp1= getattr(args, self.dest)
tmp1.append(tmp)
setattr(args, self.dest, tmp1)
parser = argparse.ArgumentParser(description="""This routine will add
previously generated tidal data to a archv file. The resulting file can be
used as input to a nested hycom simulation""")
parser.add_argument('--include-uv' , action="store_true", default=False,
help="Also add tidal u and v components")
parser.add_argument('tide_file', type=str)
parser.add_argument('archv', type=str,nargs="*")
args = parser.parse_args()
main(args.tide_file,args.archv,include_uv=args.include_uv)
|
from rest_framework import viewsets
from .serializer import TaskSerializer
from task.models import Task
class TaskListViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : dataset.py
@Contact : xxzhang16@fudan.edu.cn
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2021/8/9 19:52 zxx 1.0 None
'''
# import lib
from torch.utils.data import DataLoader, Dataset
import torch
import numpy as np
import os
import json
START_TAG = "<START>"
STOP_TAG = "<STOP>"
class TagDataset(Dataset):
def __init__(self, path=None, f_name=None, cache_dir='./cache', from_cache=False, just4Vocab=False):
super(TagDataset, self).__init__()
self.path = path
self.f_name = f_name
self.cache_dir = cache_dir
self.START_TAG = "<START>" # 这两个属于tag
self.STOP_TAG = "<STOP>"
self.UNK_TAG = "<UNK>"
self.word2idx = {}
self.idx2word = {}
self.label2idx = {}
self.idx2label = {}
if just4Vocab:
self.createVocab(path, f_name)
else:
with open(os.path.join(cache_dir, 'unique_vocab_cache.json'), 'r') as jr:
dic = json.load(jr)
self.word2idx = dic['word2idx']
self.idx2word = dic['idx2word']
self.label2idx = dic['label2idx']
self.idx2label = dic['idx2label']
if not from_cache:
self.data = self._process(path, f_name)
data_cache = json.dumps(self.data, indent=4)
with open(os.path.join(cache_dir, f_name.split('.')[0] + '_cache.json'), 'w') as jw:
jw.write(data_cache)
else:
self._from_cache(cache_dir, f_name)
def createVocab(self, path, f_name):
self.word2idx = {'<PAD>': 0}
self.idx2word = {'0': '<PAD>'}
word_cnt = 1
self.label2idx = {'<PAD>': 0}
self.idx2label = {'0': '<PAD>'}
label_cnt = 1
with open(os.path.join(path, f_name), 'r') as fr:
for line in fr:
temp = line.strip('\n').lower().split('\t')
raw_sentence_lst = eval(temp[0])
word_set = set(raw_sentence_lst)
for word in word_set:
if self.word2idx.get(word, -1) == -1:
self.word2idx[word] = word_cnt
self.idx2word[str(word_cnt)] = word
word_cnt += 1
raw_label_lst = eval(temp[1])
label_set = set(raw_label_lst)
for label in label_set:
if self.label2idx.get(label, -1) == -1:
self.label2idx[label] = label_cnt
self.idx2label[str(label_cnt)] = label
label_cnt += 1
self.vocab_insert(self.START_TAG, self.label2idx, self.idx2label)
self.vocab_insert(self.STOP_TAG, self.label2idx, self.idx2label)
self.vocab_insert(self.UNK_TAG, self.word2idx, self.idx2word)
vocab_cache = json.dumps({
'word2idx': self.word2idx,
'idx2word': self.idx2word,
'label2idx': self.label2idx,
'idx2label': self.idx2label
}, indent=4)
with open(os.path.join(self.cache_dir, 'unique_vocab_cache.json'), 'w') as jw:
jw.write(vocab_cache)
def _from_cache(self, cache_dir, f_name):
with open(os.path.join(cache_dir, f_name.split('.')[0] + '_cache.json'), 'r') as jr:
self.data = json.load(jr)
def encode(self, sentence, word2idx):
return [word2idx[w] for w in sentence]
def decode(self, idxs, idx2word):
return [idx2word[str(i)] for i in idxs]
def vocab_insert(self, new_word, word2idx, idx2word):
if word2idx.get(new_word, -1) == -1:
pos = len(word2idx)
word2idx[new_word] = pos
idx2word[str(pos)] = new_word
else:
raise ValueError("该词已存在")
def _process(self, path, f_name):
sentences = []
labels = []
lengths = []
with open(os.path.join(path, f_name), 'r') as fr:
for line in fr:
temp = line.strip('\n').lower().split('\t')
raw_sentence_lst = eval(temp[0])
sentence_lst = []
for word in raw_sentence_lst:
sentence_lst.append(self.word2idx.get(word, self.word2idx['<UNK>']))
raw_label_lst = eval(temp[1])
label_lst = []
for label in raw_label_lst:
label_lst.append(self.label2idx[label])
sentences.append(sentence_lst)
labels.append(label_lst)
lengths.append(len(sentence_lst))
return {'sentences': sentences, 'labels': labels, 'lengths': lengths}
def __len__(self):
assert len(self.data['sentences']) == len(self.data['labels']) == len(self.data['lengths'])
return len(self.data['labels'])
def __getitem__(self, index):
sample = {
'sentence': self.data['sentences'][index],
'label': self.data['labels'][index],
'length': self.data['lengths'][index],
}
return sample
def collate_func(batch_dic):
from torch.nn.utils.rnn import pad_sequence
batch_len = len(batch_dic)
max_seq_length = max([dic['length'] for dic in batch_dic])
mask_batch = torch.zeros((batch_len, max_seq_length)).byte()
sentence_batch = []
label_batch = []
length_batch = []
for i in range(len(batch_dic)):
dic = batch_dic[i]
sentence_batch.append(torch.tensor(dic['sentence'], dtype=torch.long))
label_batch.append(torch.tensor(dic['label'], dtype=torch.long))
mask_batch[i, :dic['length']] = 1
length_batch.append(dic['length'])
res = {
'sentence': pad_sequence(sentence_batch, batch_first=True),
'label': pad_sequence(label_batch, batch_first=True),
'mask': mask_batch,
'length': torch.tensor(length_batch, dtype=torch.long)
}
return res
if __name__ == '__main__':
data = TagDataset('conll2003', 'train.txt')
dataloader = DataLoader(data, batch_size=8, shuffle=True, collate_fn=collate_func)
for i_batch, batch_data in enumerate(dataloader):
print(i_batch)
print(batch_data['sentence'])
for word in batch_data['sentence'][0]:
print(data.idx2word[word.item()])
print(batch_data['label'])
# print(batch_data['mask'])
if i_batch > 2:
break |
import sys
class BarkClient:
def authenticate(self, username, password):
print >> sys.stdout 'Bark Client Authenticate Called'
def
|
# moving average smoothing as feature engineering
from pandas import Series
from pandas import DataFrame
from pandas import concat
series = Series.from_csv('daily-total-female-births.csv', header=0)
df = DataFrame(series.values)
width = 3
lag1 = df.shift(1)
lag3 = df.shift(width - 1)
window = lag3.rolling(window=width)
means = window.mean()
dataframe = concat([means, lag1, df], axis=1)
dataframe.columns = ['mean', 't', 't+1']
print(dataframe.head(10)) |
#coding: utf-8
from __future__ import division, absolute_import, print_function, unicode_literals
from .version import version
# kasaya client calls
from kasaya.core.client import sync, async, trans, control
# worker task decorator
from kasaya.core.worker.decorators import *
# worker class
from kasaya.core.worker.worker_daemon import WorkerDaemon
|
def balanced(input_string):
print(input_string)
parenCount = 0
for c in input_string:
if c == '{':
parenCount += 1
continue
if c == '}':
if parenCount == 0:
return False
parenCount -= 1
if parenCount == 0:
return True
return False
#testing |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-10 23:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('buildboard_app', '0012_auto_20160810_2303'),
]
operations = [
migrations.AlterField(
model_name='company',
name='logo',
field=models.ImageField(upload_to='media/uploads/logo/'),
),
]
|
import torch.nn as nn
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from ..utils.activations import Swish
from ..utils.se_block import SE
class MBConv(nn.Module):
"""Mobile inverted Bottleneck block with Squeeze-and-Excitation (SE).
Args:
input_width (int): Number of input filters.
output_width (int): Number of output filters.
stride (int): stride of the first block.
exp_ratio (int): Expansion ratio..
kernel (int): Kernel size of the dwise conv.
se_ratio (float): Ratio of the Squeeze-and-Excitation (SE).
Default: 0.25
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
"""
def __init__(self,
input_width,
output_width,
stride,
exp_ratio,
kernel,
se_ratio=0.25,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True)):
super().__init__()
self.exp = None
exp_width = int(input_width * exp_ratio)
if exp_width != input_width:
self.exp = build_conv_layer(
conv_cfg,
input_width,
exp_width,
1,
stride=1,
padding=0,
bias=False)
self.exp_bn_name, exp_bn = build_norm_layer(
norm_cfg, exp_width, postfix='exp')
self.add_module(self.exp_bn_name, exp_bn)
self.exp_swish = Swish()
dwise_args = {
'groups': exp_width,
'padding': (kernel - 1) // 2,
'bias': False
}
self.dwise = build_conv_layer(
conv_cfg,
exp_width,
exp_width,
kernel,
stride=stride,
**dwise_args)
self.dwise_bn_name, dwise_bn = build_norm_layer(
norm_cfg, exp_width, postfix='dwise')
self.add_module(self.dwise_bn_name, dwise_bn)
self.dwise_swish = Swish()
self.se = SE(exp_width, int(input_width * se_ratio))
self.lin_proj = build_conv_layer(
conv_cfg,
exp_width,
output_width,
1,
stride=1,
padding=0,
bias=False)
self.lin_proj_bn_name, lin_proj_bn = build_norm_layer(
norm_cfg, output_width, postfix='lin_proj')
self.add_module(self.lin_proj_bn_name, lin_proj_bn)
# Skip connection if in and out shapes are the same (MN-V2 style)
self.has_skip = (stride == 1 and input_width == output_width)
@property
def dwise_bn(self):
return getattr(self, self.dwise_bn_name)
@property
def exp_bn(self):
return getattr(self, self.exp_bn_name)
@property
def lin_proj_bn(self):
return getattr(self, self.lin_proj_bn_name)
def forward(self, x):
f_x = x
if self.exp:
f_x = self.exp_swish(self.exp_bn(self.exp(f_x)))
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
f_x = self.se(f_x)
f_x = self.lin_proj_bn(self.lin_proj(f_x))
if self.has_skip:
f_x = x + f_x
return f_x
class EfficientLayer(nn.Sequential):
"""EfficientLayer to build EfficientNet style backbone.
Args:
input_width (int): Number of input filters.
output_width (int): Number of output filters.
depth (int): Number of Mobile inverted Bottleneck blocks.
stride (int): stride of the first block.
exp_ratio (int):
Expansion ratios of the MBConv blocks.
kernel (int):
Kernel size of the dwise conv of the MBConv blocks.
se_ratio (float): Ratio of the Squeeze-and-Excitation (SE) blocks.
Default: 0.25
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
"""
def __init__(self,
input_width,
output_width,
depth,
stride,
exp_ratio,
kernel,
se_ratio=0.25,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True)):
layers = []
for d in range(depth):
block_stride = stride if d == 0 else 1
block_width = input_width if d == 0 else output_width
layers.append(
MBConv(
input_width=block_width,
output_width=output_width,
stride=block_stride,
exp_ratio=exp_ratio,
kernel=kernel,
se_ratio=se_ratio))
super().__init__(*layers)
@BACKBONES.register_module()
class EfficientNet(nn.Module):
"""EfficientNet backbone.
More details can be found in:
`paper <https://arxiv.org/abs/1905.11946>`_ .
Args:
scale (int): Compund scale of EfficientNet.
From {0, 1, 2, 3, 4, 5, 6, 7}.
in_channels (int): Number of input image channels.
Default: 3.
base_channels (int): Number of channels of the stem layer.
Default: 32
strides (Sequence[int]):
Strides of the first block of each EfficientLayer.
Default: (1, 2, 2, 2, 1, 2, 1)
exp_ratios (Sequence[int]):
Expansion ratios of the MBConv blocks.
Default: (1, 6, 6, 6, 6, 6, 6)
kernels (Sequence[int]):
Kernel size for the dwise conv of the MBConv blocks.
Default: (3, 3, 5, 3, 5, 5, 3)
se_ratio (float): Ratio of the Squeeze-and-Excitation (SE) blocks.
Default: 0.25
out_indices (Sequence[int]): Output from which stages.
Default: (2, 4, 6)
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
Default: -1
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
Default: True
Example:
>>> from mmdet.models import EfficientNet
>>> import torch
>>> self = EfficientNet(scale=0)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 40, 4, 4)
(1, 112, 2, 2)
(1, 320, 1, 1)
"""
arch_settings = {
0: ([1, 2, 2, 3, 3, 4, 1], [16, 24, 40, 80, 112, 192, 320]),
1: ([2, 3, 3, 4, 4, 5, 2], [16, 24, 40, 80, 112, 192, 320]),
2: ([2, 3, 3, 4, 4, 5, 2], [16, 24, 48, 88, 120, 208, 352]),
3: ([2, 3, 3, 5, 5, 6, 2], [24, 32, 48, 96, 136, 232, 384]),
4: ([2, 4, 4, 6, 6, 8, 2], [24, 32, 56, 112, 160, 272, 448]),
5: ([3, 5, 5, 7, 7, 9, 3], [24, 40, 64, 128, 176, 304, 512])
}
def __init__(self,
scale,
in_channels=3,
base_channels=32,
strides=(1, 2, 2, 2, 1, 2, 1),
exp_ratios=(1, 6, 6, 6, 6, 6, 6),
kernels=(3, 3, 5, 3, 5, 5, 3),
se_ratio=0.25,
out_indices=(2, 4, 6),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True):
super().__init__()
self.out_indices = out_indices
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.stage_depths, self.stage_widths = self.arch_settings[scale]
self._make_stem_layer(3, base_channels)
self.efficient_layers = []
previous_width = base_channels
for i, (d, w) in enumerate(zip(self.stage_depths, self.stage_widths)):
efficient_layer = self.make_efficient_layer(
input_width=previous_width,
output_width=w,
depth=d,
stride=strides[i],
exp_ratio=exp_ratios[i],
kernel=kernels[i],
se_ratio=se_ratio,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, efficient_layer)
self.efficient_layers.append(layer_name)
previous_width = w
def _make_stem_layer(self, in_channels, out_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, out_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.swish = Swish()
def make_efficient_layer(self, **kwargs):
return EfficientLayer(**kwargs)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.swish(x)
outs = []
for i, layer_name in enumerate(self.efficient_layers):
efficient_layer = getattr(self, layer_name)
x = efficient_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs) |
from tkinter import *
from rumorFunctions import *
from random import randint
import tkinter.simpledialog as simpleDialog
from math import sin, cos
from tkinter.colorchooser import *
class NetworkFrame:
def __init__(self, master):
self.canvas = Canvas(master)
self.canvas.pack(side = LEFT, fill = BOTH, expand = True)
self.canvas.config(bg = "lightblue")
self.radius = 20
self.people= []
self.network = []
def newNode(self, n = None):
x = randint(0, self.canvas.winfo_width() - self.radius)
y = randint(0, self.canvas.winfo_height() - self.radius)
self.canvas.create_oval(x,
y,
x + self.radius * 2,
y + self.radius * 2,
width = 3,
fill = "black",
disableddash = (5, 5),
state = DISABLED,
activeoutline = "red",
activewidth = 2,
tags = "cur")
name = simpleDialog.askstring("New Node", "Name:") if not n else n
if name == None:
name = ""
p = Person(name, len(self.people))
p.setRumor(0)
self.people.append(p)
self.canvas.addtag_withtag(name, "cur")
self.canvas.dtag(name, "cur")
self.canvas.itemconfig(name, state = NORMAL)
self.canvas.tag_bind(name, "<Double-Button-1>", self.deleteNode)
self.canvas.tag_bind(name, "<Button-3>", self.setColor)
self.canvas.delete("cur")
if not n:
self.update()
def deleteNode(self, event):
name = self.canvas.gettags(CURRENT)[0]
print(name)
self.canvas.delete(CURRENT)
person = next((p for p in self.people if p.name() == name))
self.people.remove(person)
self.update()
def setColor(self, event):
name = self.canvas.gettags(CURRENT)[0]
color = askcolor()[1]
rumor = int(color[1:], 16)
print(color)
person = next((p for p in self.people if p.name() == name))
person.setRumor(rumor)
self.canvas.itemconfig(name, fill = color)
def update(self):
nbNodes = len(self.people)
if nbNodes > 0:
incr = 360 / nbNodes
for i, p in enumerate(self.people):
x = (self.canvas.winfo_width() / 2) + (cos(i*incr) * 150)
y = (self.canvas.winfo_height() / 2) + (sin(i*incr) * 150)
r = self.radius
self.canvas.coords(p.name(), x-r, y+r, x+r, y-r)
def updateColors(self):
for p in self.people:
color = hex(p.rumor())
color = color[2:]
color = "#" + color
color = format(p.rumor(), '06x')
color = "#" + color
self.canvas.itemconfig(p.name(), fill = color)
|
import matplotlib
matplotlib.use('Agg')
'''
author: Karel Klein Cardena
userID: kkc3
'''
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import StrategyLearner as sle
from marketsimcode import compute_portvals
def assess_portfolio(portfolio, sv):
#takes in a normalized portfolio
port_val = sv * portfolio
cum_return = port_val.ix[-1] / port_val.ix[0] - 1
daily_ret = port_val / port_val.shift(1) - 1
std_daily_ret = daily_ret.std()
avg_daily_ret = daily_ret.mean()
return cum_return, std_daily_ret, avg_daily_ret
def plot_results(x, y, title, ylabel, filename):
plt.plot(x, y)
plt.title(title)
plt.xlabel('Impact')
plt.ylabel(ylabel)
plt.grid(True)
plt.savefig(filename + '.pdf')
# how would changing the value of impact affect
# in sample trading behavior and results (provide at least two metrics)
if __name__=='__main__':
# in-sample period
symbol = 'JPM'
start = '01-01-2008'
end = '12-31-2009'
start_value = 100000
impacts = np.arange(0,10.)/100 + 0.005
# arrays for gathering results
num_trades_array = []
cum_returns = []
std_daily_rets = []
avg_daily_rets = []
for impact in impacts:
# Strategy Learner
sd=dt.datetime(2008,1,1)
ed=dt.datetime(2009,12,31)
sl = sle.StrategyLearner(impact=impact, flag=2)
sl.addEvidence(symbol=symbol, sd=sd, ed=ed, sv=start_value)
trades_sl = sl.testPolicy(symbol=symbol, sd=sd, ed=ed, sv=start_value)
# count the number of trades that occured
num_trades = 0
for i in range(trades_sl.shape[0]):
if trades_sl.ix[i,symbol] != 0:
num_trades += 1
num_trades_array.append(num_trades)
# get strategy learner portfolio values
port_values_sl = compute_portvals(trades_sl, start_val=start_value)
normed_port_sl = port_values_sl / port_values_sl.ix[0]
sl_cum_return, sl_std_daily_ret, sl_avg_daily_ret = assess_portfolio(normed_port_sl, start_value)
cum_returns.append(sl_cum_return)
std_daily_rets.append(sl_std_daily_ret)
avg_daily_rets.append(sl_avg_daily_ret)
# plot results
#plot_results(impacts, num_trades_array, 'Impact vs Number of Trades', 'Number of Trades Executed' ,'exp2aa')
plot_results(impacts, cum_returns, 'Impact vs Cumulative Return', 'Cumulative Return', 'exp2bb')
|
from django import forms
from .models import TaxP
class taxform(forms.ModelForm):
class Meta:
model=TaxP
fields=[
'q1',
'q2',
'q3',
'q4',
'q5',
]
labels = {
'q1':'Question 1',
'q2': 'Question 2',
'q3': 'Question 3',
'q4': 'Question 4',
'q5': 'Question 5',
}
widgets ={
'q1' : forms.TextInput(attrs={'class' :'form-control'}),
'q2': forms.TextInput(attrs={'class' :'form-control'}),
'q3': forms.TextInput(attrs={'class': 'form-control'}),
'q4': forms.TextInput(attrs={'class': 'form-control'}),
'q5': forms.TextInput(attrs={'class': 'form-control'}),
} |
'''
Created on Mar 17, 2017
Client implementation of UDP echo
@author: Christopher Blake Matis
'''
#include Python's socket library
from socket import*
#set variables serverName and serverPort
serverName = '172.16.0.5'
serverPort = 12000
while 1:
#create UDP socket for server
clientSocket = socket(AF_INET, SOCK_DGRAM)
#get user keyboard input
message = ' '
message = raw_input('input lowercase sentence:')
#if message is quit close the connection on client-side
if message == "quit":
print("closed connection")
break
#check if message equals 'quit' and then close the connection
#attach server name, port to message, then send into socket
clientSocket.sendto(message,(serverName,serverPort))
#read reply characters from socket into string
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
#print out received string and close socket
print modifiedMessage
clientSocket.close();
|
import matplotlib.pyplot as plt
import gym
import numpy as np
import cv2
# 输入 N个3通道的图片array
# 输出:一个array 形状 (84 84 N)
# 步骤: 1. resize ==>(84 84 3)[uint 0-255]
# 2. gray ==> (84 84 1) [uint 0-255]
# 3. norm ==> (84 84 1) [float32 0.0-1.0]
# 4. concat ===>(84 84 N) [float32 0.0-1.0]
#resize a img
def imgbuffer_process(imgbuffer, out_shape = (84, 84)):
img_list = []
for img in imgbuffer:
tmp = cv2.resize(src=img, dsize=out_shape)
tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
## 需要将数据类型转为32F
tmp = cv2.normalize(tmp, tmp, alpha=0.0, beta=1.0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# 扩充一个维度
tmp = np.expand_dims(tmp, len(tmp.shape))
img_list.append(tmp)
ret = np.concatenate(tuple(img_list), axis=2)
#print('ret_shape = ' + str(ret.shape))
return ret
def test():
env = gym.make('Breakout-v4')
env.seed(1) # reproducible
# env = env.unwrapped
N_F = env.observation_space.shape[0] # 状态空间的维度
N_A = env.action_space.n # 动作空间的维度
img_buffer = []
img_buffer_size = 1
s = env.reset()
max_loop = 100000
for i in range(2):
a = np.random.randint(0, N_A - 1)
s_, r, done, info = env.step(a)
env.render()
if len(img_buffer) < img_buffer_size:
img_buffer.append(s_)
continue
else:
img_buffer.pop(0)
img_buffer.append(s_)
img_input = imgbuffer_process(img_buffer)
print('img_input_shape = ' + str(img_input.shape))
# plt.subplot(2, 2, 1)
plt.imshow(np.uint8(img_input[:, :, 0] * 255))
plt.savefig('a.png')
print("aaaaaaaa")
# plt.subplot(2, 2, 2)
# plt.imshow(np.uint8(img_input[:, :, 1] * 255), cmap='gray')
# plt.subplot(2, 2, 3)
# plt.imshow(np.uint8(img_input[:, :, 2] * 255), cmap='gray')
# plt.subplot(2, 2, 4)
# plt.imshow(np.uint8(img_input[:, :, 3] * 255), cmap='gray')
plt.show()
if __name__ == '__main__':
test()
|
class Solution:
def letterCasePermutation(self, S: str) -> List[str]:
result = [S]
for i, c in enumerate(S):
if c.isalpha():
result.extend([s[:i] + c.swapcase() + s[i+1:] for s in result])
return result
class Solution:
def letterCasePermutation(self, S: str) -> List[str]:
parameters = [[c.lower(), c.upper()] if c.isalpha() else c for c in S]
return [''.join(letters) for letters in itertools.product(*parameters)]
|
import numpy as np
import meep
eV_um_scale = 1/1.23984193*1e6
def drude_lorentz_material(freq, gamma, sigma, eps_inf=1, multiplier=1):
"""return a drude-lorentz material, where the first index is the Drude term"""
freq, gamma, sigma = map(np.atleast_1d, [freq, gamma, sigma])
Npoles = len(freq)
susc = []
for i in range(Npoles):
func = meep.DrudeSusceptibility if i == 0 else meep.LorentzianSusceptibility
susc.append(func(frequency=freq[i], gamma=gamma[i], sigma=sigma[i]*multiplier))
material = meep.Medium(epsilon=eps_inf*multiplier, E_susceptibilities=susc)
return material
def lorentz_material(freq, gamma, sigma, eps_inf=1, multiplier=1):
"""return a lorentz material"""
freq, gamma, sigma = map(np.atleast_1d, [freq, gamma, sigma])
Npoles = len(freq)
func = meep.LorentzianSusceptibility
susc = [func(frequency=freq[i], gamma=gamma[i], sigma=sigma[i]*multiplier) for i in range(Npoles)]
material = meep.Medium(epsilon=eps_inf*multiplier, E_susceptibilities=susc)
return material
def single_freq_material(eps, freq, multiplier=1):
"""fit a material model to complex permitivitty at a single given frequency (1/wavelength)"""
# with positive eps, use simple material
if eps.real > 0:
return meep.Medium(epsilon=eps.real*multiplier, D_conductivity=2*np.pi*freq*eps.imag/eps.real*multiplier*1e18)
# with negative eps, use Lorentz material
else:
eps_inf = 1
sigma = 1
gamma = freq*eps.imag/(eps.imag**2 + (eps.real-2)*(eps.real-1))
fn_sq = 1/(2-eps.real)*(freq*gamma*eps.imag - freq**2*(eps.real-1))
fn = fn_sq**0.5
return lorentz_material(fn, gamma, sigma, eps_inf=eps_inf, multiplier=multiplier)
def fit_drude_lorentz(eps, freq):
"""fit a drude-lorentz material model to complex permitivitty"""
pass
def get_eps(material):
"""obtain the complex permitivitty eps(wavelength) function of a material"""
# assume isotropic material
def eps(wavelength):
omega = 1/wavelength
eps_val = material.epsilon_diag[0]
for pole in material.E_susceptibilities:
freq = pole.frequency
gamma = pole.gamma
sigma = pole.sigma_diag[0]
if isinstance(pole, meep.geom.DrudeSusceptibility):
eps_val += 1j*sigma*freq**2/(omega*(gamma - 1j*omega))
elif isinstance(pole, meep.geom.LorentzianSusceptibility):
eps_val += sigma*freq**2/(freq**2 - omega**2 - 1j*omega*gamma)
factor = 1 + 1j*material.D_conductivity_diag[0]*wavelength/(2*np.pi)
return eps_val*factor
return eps
def Au(multiplier=1):
"""Gold material"""
wp = 9.01*eV_um_scale
f = eV_um_scale*np.array([1e-20, 4.25692])
gam = eV_um_scale*np.array([0.0196841, 4.15975])
sig = wp**2/f**2*np.array([0.970928, 1.2306])
eps_inf = 3.63869
return drude_lorentz_material(f, gam, sig, eps_inf, multiplier=multiplier)
|
from pyparsing import (
Empty as PpEmpty,
Forward as PpForward,
Keyword as PpKeyword,
Literal as PpLiteral,
Suppress as PpSuppress,
Word as PpWord,
QuotedString as PpQuotedString,
Regex as PpRegex,
Optional as PpOptional,
White as PpWhite,
oneOf,
infixNotation as PpInfixNotation,
opAssoc as OpAssoc, # noqa
MatchFirst as PpMatchFirst,
And as PpAnd,
pythonStyleComment,
pyparsing_common,
)
class Adapter:
def __init__(self, grammar):
self.grammar = grammar
try:
self._set_parse_action(self.action)
except AttributeError:
pass
try:
self.set_name(str(self))
except AttributeError:
pass
def _set_parse_action(self, action):
try:
self.grammar.setParseAction(action)
except AttributeError:
pass
def set_name(self, name):
self.grammar.setName(name)
def __add__(self, other):
return And([self, other])
def __radd__(self, other):
# return Adapter(self.grammar.__radd__(other._grammar))
return other + self
def __sub__(self, other):
return Adapter(self.grammar.__sub__(other._grammar))
def __rsub__(self, other):
return other - self
def __eq__(self, other):
return self.grammar.__eq__(other._grammar)
def __req__(self, other):
return self == other
def __ne__(self, other):
return not (self == other)
def __rne__(self, other):
return not (self == other)
def __getitem__(self, key):
return MultipleMatch(self, key)
def __mul__(self, other):
return Adapter(self.grammar.__mul__(other._grammar))
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
return MatchFirst([self, other])
def __ror__(self, other):
return other | self
# def __repr__(self):
# return f'{self.__class__.__name__}({self.value})'
def __str__(self):
return str(self.grammar)
@property
def _grammar(self):
"""Return PyParsing grammar contained in this instance."""
return self.grammar
def get_adapter_grammar(self):
return self
def parse(self, string, explode=True):
result = self.grammar.parseString(string, parseAll=True).asList()
if explode and len(result) == 1:
return result.pop()
else:
return result
def ignore(self, expr):
return Adapter(self.grammar.ignore(expr._grammar))
class Keyword(Adapter):
def __init__(self, match_string):
self.match_string = match_string
super(Keyword, self).__init__(PpKeyword(match_string))
def __str__(self):
return self.match_string
class Word(Adapter):
def __init__(
self,
initChars,
bodyChars=None,
min=1,
max=0,
exact=0,
asKeyword=False,
excludeChars=None,
):
grammar = PpWord(initChars, bodyChars, min, max, exact, asKeyword, excludeChars)
super(Word, self).__init__(grammar)
class Suppress(Adapter):
def __init__(self, expr):
self.expr = expr
super(Suppress, self).__init__(PpSuppress(expr._grammar))
def __str__(self):
return str(self.expr)
class QuotedString(Adapter):
def __init__(
self,
quoteChar,
escChar=None,
escQuote=None,
multiline=False,
unquoteResults=True,
endQuoteChar=None,
convertWhitespaceEscapes=True,
):
grammar = PpQuotedString(
quoteChar,
escChar,
escQuote,
multiline,
unquoteResults,
endQuoteChar,
convertWhitespaceEscapes,
)
super(QuotedString, self).__init__(grammar)
class Forward(Adapter):
def __init__(self, other=None):
grammar = PpForward(other)
super(Forward, self).__init__(grammar)
def __lshift__(self, other):
return Adapter(self.grammar.__lshift__(other._grammar))
def __ilshift__(self, other):
return Adapter(self.grammar.__ilshift__(other._grammar))
class Regex(Adapter):
def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):
grammar = PpRegex(pattern, flags, asGroupList, asMatch)
super(Regex, self).__init__(grammar)
class Empty(Adapter):
def __init__(self):
super(Empty, self).__init__(PpEmpty())
class Literal(Adapter):
def __init__(self, match_string):
self.match_string = match_string
super(Literal, self).__init__(PpLiteral(match_string))
def __str__(self):
return self.match_string
class Optional(Adapter):
def __init__(self, expr):
self.expr = expr
super(Optional, self).__init__(PpOptional(expr._grammar))
def __str__(self):
return f'[ {self.expr._grammar} ]'
class White(Adapter):
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White, self).__init__(PpWhite(ws, min, max, exact))
class OneOf(Adapter):
def __init__(self, literals, case_less=False, use_regex=True, as_keyword=False):
super(OneOf, self).__init__(oneOf(literals, case_less, use_regex, as_keyword))
class InfixExpression(Adapter):
def __init__(
self,
base_expr,
precedence_list,
lparen=Suppress(Literal('(')),
rparen=Suppress(Literal(')')),
):
super(InfixExpression, self).__init__(
PpInfixNotation(
base_expr._grammar,
[(p[0]._grammar, *p[1:]) for p in precedence_list],
lparen._grammar,
rparen._grammar,
)
)
class ParseExpression(Adapter):
pass
class MatchFirst(ParseExpression):
def __init__(self, exprs, savelist=False):
self.exprs = exprs
grammar = PpMatchFirst([expr._grammar for expr in exprs], savelist)
super(MatchFirst, self).__init__(grammar)
def _get_elements(self):
"""Flatten the nested MatchFirst objects and return as a list.
{ { A | B } | C } will become { A | B | C }.
"""
res = []
for expr in self.exprs:
if isinstance(expr, MatchFirst):
res += expr._get_elements()
else:
res.append(expr)
return res
def __str__(self):
return '{ ' + ' | '.join(str(e._grammar) for e in self._get_elements()) + ' }'
class And(ParseExpression):
def __init__(self, exprs, savelist=True):
self.exprs = exprs
grammar = PpAnd([expr._grammar for expr in exprs], savelist)
super(And, self).__init__(grammar)
def __str__(self):
return ' '.join(str(e._grammar) for e in self.exprs)
class HashComment(Adapter):
def __init__(self):
super(HashComment, self).__init__(pythonStyleComment)
class MultipleMatch(Adapter):
"""Unlike others, this class does not use any specific PyParsing class.
self.grammar here can be pyparsing.OneOrMore or pyparsing.ZeroOrMore.
This class is created to override str behaviour of those classes.
"""
def __init__(self, expr, key):
self.expr = expr
super(MultipleMatch, self).__init__(expr.grammar[key])
def __str__(self):
if isinstance(self.expr.get_adapter_grammar(), And):
return f'{{ {self.expr._grammar} }}...'
else:
return f'{self.expr._grammar}...'
sci_real = Adapter(pyparsing_common.sci_real)
real = Adapter(pyparsing_common.real)
signed_integer = Adapter(pyparsing_common.signed_integer)
|
# String indexing
str0 = 'Tista loves chocolate'
print(len(str0))
print(str0[3])
# String slicing
print(str0[5:7])
print(str0[4:7])
# String mutation
# Strings are not 'mutable'; they are called immutable
str0[3] = 'z'
print(str0)
s2 = 'New York'
zip_code = 10001
# The following is called string concatenation
print(s2 + zip_code)
print(s2 + str(zip_code))
print(s2 + ' ' + str(zip_code))
s3 = 'New York '
print(s3 + str(zip_code))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
# Python 2.7
import re
import curses, sys, os, signal,argparse,time
from multiprocessing import Process
from scapy.all import *
from subprocess import call, PIPE
from datetime import date, time, datetime
import os, sys
from PyQt4 import QtCore, QtGui
# ICI LA CLASSE CAPTURE
class Capture:
def DebutCaptureTraffic(self,fichier,interface,duree):
s=("timeout ---- tcpdump -i **** -s 0 -w ++++.pcap")
s=s.replace("****", interface)
s=s.replace("++++", fichier)
s=s.replace("----", duree)
call(s,shell=True)
# ICI LA CLASSE EXAMEN
class Archive:
def CreationDossier(self,NomDossier):
path = "/home/Archivage/+++++"
path=path.replace("+++++","%s"%NomDossier)
os.mkdir( path, 0777 );
print "Le dossier a été crée avec succès"
class Examen:
# Les informations concernant le fichier de capture
def Capinfos(self,f):
command=("capinfos ++++ > capinfos.txt")
chaine=command.replace("++++","%s"%f)
call(chaine, shell= True)
r=open('capinfos.txt','r')
ligne=r.readlines()
EncapsulationFichier=ligne[2].split("File encapsulation:")
EncapsulationFichier=EncapsulationFichier[1].strip()
TypeFichier=ligne[1].split("File type:")
TypeFichier=TypeFichier[1].strip()
NombrePaquets=ligne[4].split("Number of packets:")
NombrePaquets=NombrePaquets[1].strip()
TailleFichier=ligne[5].split("File size:")
TailleFichier=TailleFichier[1].strip()
DebutCapture=ligne[8].split("Start time:")
DebutCapture=DebutCapture[1].strip()
FinCapture=ligne[9].split("End time:")
FinCapture=FinCapture[1].strip()
DureeCapture=ligne[7].split("Capture duration:")
DureeCapture=DureeCapture[1].strip()
SHA1=ligne[14].split("SHA1:")
SHA1=SHA1[1].strip()
RIPEMD160=ligne[15].split("RIPEMD160:")
RIPEMD160=RIPEMD160[1].strip()
MD5=ligne[16].split("MD5:")
MD5=MD5[1].strip()
r.close()
os.remove('capinfos.txt')
return EncapsulationFichier,TypeFichier,NombrePaquets,TailleFichier,DebutCapture,FinCapture,DureeCapture,SHA1,RIPEMD160,MD5
#Le BSSID et le SSID de chaque point d'accès
def BssidSsid(self,f):
command="tcpdump -nne -r ++++ wlan[0]=0x80 | awk '{print $0}'> filebssid.txt"
chaine=command.replace("++++","%s"%f)
subprocess.call(chaine, shell=True)
command="cat filebssid.txt|sort -u > tmp.txt"
resultats=command
subprocess.call(command,shell=True)
os.remove('filebssid.txt')
def search(self):
f=open('tmp.txt','r')
ligne=f.readlines()
chaine1=[]
chaine2=[]
chaine3=[]
for chaine in ligne:
res2=re.search(r'CH: ([0-9]*)', chaine, re.I).group()
res=re.search(r'BSSID:([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})', chaine, re.I).group()
res1= re.search(r'([(].*[)])', chaine, re.I).group()
chaine1.append(res)
chaine2.append(res2)
chaine3.append(res1)
chaine4=[]
for chaine in chaine1:
chaine=chaine.strip("BSSID:")
chaine4.append(chaine)
os.remove("tmp.txt")
return chaine3,chaine4,chaine2
#os.remove('tmp.txt')
#ICI LA CLASS ANALYSE
class Analyse:
#Nombre de trames de données envoyées et reçues par le point d'accès
def NbrTramDataSendWAP(self,f,bssid):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x20)&&(wlan.bssid==++++))'|wc -l > NbrTramDataSendWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
f=open('NbrTramDataSendWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrTramDataSendWAP.txt')
return ligne
def ssidAP(self,ssid,bssid1,bssid):
for ch in bssid1:
if ch==bssid:
ssid=ssid[bssid1.index(ch)]
return ssid
#Le nombre de trames de données chiffrées
def NbrTramDataCrypt(self,f,bssid):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x20)&&(wlan.fc.protected==1))&&(wlan.bssid==++++)'|wc -l > NbrTramDataCrypt.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
f=open('NbrTramDataCrypt.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrTramDataCrypt.txt')
return ligne
#Type du chiffrement
def TypeDuChiffrement(self,f,bssid):
a=Analyse()
a.x=a.NbrTramDataSendWAP(f,bssid)
a.y=a.NbrTramDataCrypt(f,bssid)
if a.x==a.y:
ligne="WEP"
else:
ligne="WPA"
return ligne
#Les station associées au point d'accès
def StationAssocie(self,f,bssid):
command="tcpdump -nne -r fichier 'wlan[0]=0x10 and wlan[26:2]=0x0000 and wlan src ++++' |awk '{print $0}'|sort|uniq -c|sort -nr > StationAssocie.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
r=open('StationAssocie.txt','r')
ligne=r.readlines()
chaine=[]
for ch in ligne:
exp=re.search(r'DA:([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})', ch, re.I).group()
chaine.append(exp)
i=0
while i in range(0, len(ligne)):
z=ligne[i].strip()
z=z.split(' ')
if z[i]!=chaine[i]:
i=i+1
else:
pos=i
return str(pos)
#print chaine
def StationAssocieResultat(self,f,bssid,position):
command="tcpdump -nne -r fichier 'wlan[0]=0x10 and wlan[26:2]=0x0000 and wlan src ++++' |awk '{print $position}'|sort|uniq -c|sort -nr > StationAssocie.txt"
y=command.replace("++++","%s"%bssid)
y=y.replace("fichier","%s"%f)
y=y.replace("position","%s"%position)
subprocess.call(y,shell=True)
r=open('StationAssocie.txt','r')
ligne=r.readlines()
x1=[]
x2=[]
for i in range(0, len(ligne)):
x=ligne[i].split(':',1)
x[1]=x[1].replace("\n"," ")
x[1]=x[1].strip()
x[0]=x[0].strip("DA")
x[0]=x[0].strip()
x1.append(x[1])
x2.append(x[0])
r.close()
return x1, x2
#os.remove('StationAssocie.txt')
#Nombre de trames de données envoyées par chaque station
def NbrDataFramSendFromStation(self,f,bssid):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x20)&&(wlan.fc.protected==1))&&(wlan.bssid==++++)' -T fields -e wlan.sa|sort|uniq -c |sort -nr > NbrDataFramSendFromStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
f=open('NbrDataFramSendFromStation.txt','r')
ligne=f.readlines()
chaine1 = []
chaine2 = []
for i in range(0, len(ligne)):
x=ligne[i].strip()
x=x.replace(" ",",")
x=x.split(",")
bssid=x[1]
nbr=x[0]
chaine1.append(nbr)
chaine2.append(bssid)
max=0
for i in chaine1:
if int(i) > max:
max=i
suspect=chaine2[chaine1.index(max)]
return suspect,max
#os.remove('DestDataFrameSendByStation.txt')
def NbrDataFramSendFromStation2(self,f,bssid):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x20)&&(wlan.fc.protected==1))&&(wlan.bssid==++++)' -T fields -e wlan.sa|sort|uniq -c |sort -nr > NbrDataFramSendFromStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
f=open('NbrDataFramSendFromStation.txt','r')
ligne=f.readlines()
chaine1 = []
chaine2 = []
for i in range(0, len(ligne)):
x=ligne[i].strip()
x=x.replace(" ",",")
x=x.split(",")
bssid=x[1]
nbr=x[0]
chaine1.append(nbr)
chaine2.append(bssid)
return chaine1, chaine2
#Le nombre de trames de données reçues par chaque station
def DestDataFrameSendByStation(self,f,bssid):
command="tshark -r fichier '((wlan.fc.type_subtype==0x20)&&(wlan.fc.protected==1))&&(wlan.bssid==++++)' -T fields -e wlan.da|sort|uniq -c |sort -nr > DestDataFrameSendByStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
f=open('DestDataFrameSendByStation.txt','r')
ligne=f.readlines()
for i in range(0, len(ligne)):
x=ligne[i].strip()
x=x.replace(" ",";")
x=x.split(";")
f.close()
#os.remove('DestDataFrameSendByStation.txt')
#Le nombre de trames de données envoyées par chaque station
def SrcDestDataFramSendStation(self,f,bssid):
command="tshark -r fichier '((wlan.fc.type_subtype==0x20)&&(wlan.fc.protected==1))&&(wlan.bssid==++++)' -T fields -e wlan.sa -e wlan.da|sort|uniq -c |sort -nr > SrcDestDataFramSendStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
f=open('SrcDestDataFramSendStation.txt','r')
ligne=f.readlines()
ch1=[]
ch2=[]
ch3=[]
for i in range(0, len(ligne)):
x=ligne[i].strip()
x=x.replace(" ",";")
x=x.split(";")
z=x[1].split("\t")
f.close()
ch1.append(z[0])
ch2.append(x[0])
ch3.append(z[1])
return ch1, ch2, ch3
#os.remove('SrcDestDataFramSendStation.txt')
#La date de début d'envoi des trames de données par la station suspecte
def DebSendDataStation(self,f,bssid,station):
command="tshark -r fichier '(wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x20)' -T fields -e frame.time|head -1 > DebSendDataStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DebSendDataStation.txt','r')
ligne=f.read()
f.close()
#os.remove('DebSendDataStation.txt')
return ligne
#La date de fin d'envoi des trames de données par la station suspecte
def FinSendDataStation(self,f,bssid,station):
command="tshark -r fichier '(wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x20)' -T fields -e frame.time|tail -1 > FinSendDataStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('FinSendDataStation.txt','r')
ligne=f.read()
f.close()
#os.remove('FinSendDataStation.txt')
return ligne
#Le nombre des trames de dés-authentification par le point d'accès à la station suspecte
def NbrDesauthWAP(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0c)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==****)' -T fields -e frame.time|wc -l > NbrDesauthWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('NbrDesauthWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrDesauthWAP.txt')
return ligne
#La date de début d'envoi des trames de dés-authentification par le point d'accès à la station suspecte
def authWAP(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0c)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==****)' -T fields -e frame.time|awk '{print $0}'|head -1 > authWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('authWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('authWAP.txt')
return ligne
#La date de fin d'envoi des trames de dés-authentification par le point d'accès à la station suspecte
def DateFinDesauthWAP(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0c)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==****)' -T fields -e frame.time|awk '{print $0}'|tail -1 > DateFinDesauthWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DateFinDesauthWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('DateFinDesauthWAP.txt')
return ligne
#Compter le nombre de trame d'authentification enovoyées par la station inconnue au WAP
def NbrAuthStationToWAP(self,f,bssid,station):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x0b))'|wc -l > NbrAuthStationToWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('NbrAuthStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrAuthStationToWAP.txt')
#Afficher la date de début d'envoi de trame d'authentification par la station inconnu au WAP
def DateDebAuthStationToWAP(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x0b)' -T fields -e frame.time|head -1 > DateDebAuthStationToWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DateDebAuthStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('DateDebAuthStationToWAP.txt')
return ligne
#Afficher la date de la fin d'envoi des trames d'authentification par la station inconnu au AP
def DateFinAuthStationToWAP(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x0b)' -T fields -e frame.time|tail -1 > DateFinAuthStationToWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DateFinAuthStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('DateFinAuthStationToWAP.txt')
return ligne
#Comptrer le nombre des trames d'association envoyé par la station inconnu vers le AP
def NbrAssoStationToWAP(self,f,bssid,station):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x00))'|wc -l > NbrAssoStationToWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('NbrAssoStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrAssoStationToWAP.txt')
#Affiche le début d'envoi des trames d'association de la station inconnu vers le WAP
def DateDebAssoStationToWAP(self,f,bssid,station):
command18="tshark -r fichier -R '(wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x00)' -T fields -e frame.time|head -1 > DateDebAssoStationToWAP.txt"
x=command18.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DateDebAssoStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('DateDebAssoStationToWAP.txt')
return ligne
#Affiche le fin d'envoi des trames d'association de la station inconnu vers le WAP
def DateFinAssoStationToWAP(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.bssid==++++)&&(wlan.sa==****)&&(wlan.fc.type_subtype==0x00)' -T fields -e frame.time|tail -1>DateFinAssoStationToWAP.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DateFinAssoStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('DateFinAssoStationToWAP.txt')
return ligne
#Compter le nombre de trame de des-association envoyé par le WAP vers la station inconnue:
def NbrDesassoWAPToStation(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0a)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==****)' -T fields -e frame.time|wc -l > NbrDesassoWAPToStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('NbrAssoStationToWAP.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrDesassoWAPToStation.txt')
#Afficher la date de début d'envoi des trames de des-association par le poitnt d'accès à la station inconnu:
def assoWAPToStation(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0a)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==****)' -T fields -e frame.time|awk '{print $0}'|head -1 > assoWAPToStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('assoWAPToStation.txt','r')
ligne=f.read()
f.close()
#os.remove('assoWAPToStation.txt')
return ligne
#Afficher la date de fin d'envoi de trame de désassociation par le point d'accès vers la station inconnue
def DateFinDesassoWAPToStation(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0a)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==****)' -T fields -e frame.time|awk '{print $0}'|tail -1 > DateFinDesassoWAPToStation.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
f=open('DateFinDesassoWAPToStation.txt','r')
ligne=f.read()
f.close()
#os.remove('DateFinDesassoWAPToStation.txt')
return ligne
#Méthode qui permet le crack des clés WEP
def crackWEP(self,f,bssid):
command="aircrack-ng -b ++++ fichier"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
subprocess.call(x,shell=True)
#Méthode qui permet de décrypter le trafic WEP
def Decrypt(self,f,bssid,PW):
command="airdecap-ng -l -b ++++ -w **** fichier"
x=command.replace("++++","%s"%bssid)
x=x.replace("fichier","%s"%f)
x=x.replace("****","%s"%PW)
subprocess.call(x,shell=True)
#Calculer le nombre de vecteurs d'initialisation avant l'attaque
def NbrVIsAvant(self,f,station,bssid,DatAvant):
command="tshark -r fichier '(wlan.bssid==++++)&&(wlan.sa!=****)&&(frame.time < \"----\")' -T fields -e wlan.wep.iv|sort -u|wc -l > NbrVIsAvant.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("****","%s"%station)
x=x.replace("fichier","%s"%f)
x=x.replace("----","%s"%DatAvant)
subprocess.call(x,shell=True)
f=open('NbrVIsAvant.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrVIsAvant.txt')
return ligne
#Calculer le nombre de vecteurs d'initialisation durant l'attaque
def NbrVIsDurant(self,f,station,bssid,DatApres,DatAvant):
command="tshark -r fichier -R '(wlan.bssid==++++)&&(wlan.sa!=****)&&(frame.time<= \"----\")&&(frame.time>= \"....\")' -T fields -e wlan.wep.iv|sort -u|wc -l > NbrVIsDurant.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("****","%s"%station)
x=x.replace("fichier","%s"%f)
x=x.replace("----","%s"%DatApres)
x=x.replace("....","%s"%DatAvant)
subprocess.call(x,shell=True)
f=open('NbrVIsDurant.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrVIsDurant.txt')
return ligne
#Calculer le nombre de vecteurs d'initialisation après l'attaque
def NbrVIsApres(self,f,station,bssid,DatApres):
command="tshark -r fichier -R '(wlan.bssid==++++)&&(wlan.sa!=****)&&(frame.time> \"----\")' -T fields -e wlan.wep.iv|sort -u|wc -l > NbrVIsApres.txt"
x=command.replace("++++","%s"%bssid)
x=x.replace("****","%s"%station)
x=x.replace("fichier","%s"%f)
x=x.replace("----","%s"%DatApres)
subprocess.call(x,shell=True)
f=open('NbrVIsApres.txt','r')
ligne=f.read()
f.close()
#os.remove('NbrVIsApres.txt')
return ligne
#Chronologie des activité de l'attaquant
def ChronoAttaqueWEP(self,f,bssid,station):
e=Examen()
a=Analyse()
a.DateDebAuthStationToWAP=a.DateDebAuthStationToWAP(f,bssid,station)
a.authWAP=a.authWAP(f,bssid,station)
a.DebSendDataStation=a.DebSendDataStation(f,bssid,station)
a.assoWAPToStation=a.assoWAPToStation(f,bssid,station)
a.FinSendDataStation=a.FinSendDataStation(f,bssid,station)
a.DateFinDesauthWAP=a.DateFinDesauthWAP(f,bssid,station)
a.DateFinAuthStationToWAP=a.DateFinAuthStationToWAP(f,bssid,station)
return a.DateDebAuthStationToWAP,a.authWAP,a.DebSendDataStation,a.assoWAPToStation,a.FinSendDataStation,a.DateFinDesauthWAP,a.DateFinAuthStationToWAP
#Connaitre le type d'attaque qui a été réalisé sur le point d'accès
def TypeAttaque(self,NbrVIsAvant,NbrVIsDurant,NbrVIsApres):
if (NbrVIsDurant > NbrVIsAvant and NbrVIsApres < NbrVIsDurant and NbrVIsDurant>1):
TypeAttaque="WEP attack"
else:
TypeAttaque="No WEP attack"
return TypeAttaque
#Durée de l'attaque WEP
def DureeAttaque(self,DatAvant,DatApres):
try:
DatAvant=DatAvant.split(",")
DatAvant=DatAvant[1].replace(" ",",")
DatAvant=DatAvant.split(",")
DatAvant=DatAvant[2]
DatAvant=DatAvant.replace(":",",")
DatAvant=DatAvant.replace(".",",")
DatAvant=DatAvant.split(",")
HD=DatAvant[0]
MD=DatAvant[1]
SD=DatAvant[2]
DatApres=DatApres.split(",")
DatApres=DatApres[1].replace(" ",",")
DatApres=DatApres.split(",")
DatApres=DatApres[2]
DatApres=DatApres.replace(":",",")
DatApres=DatApres.replace(".",",")
DatApres=DatApres.split(",")
HF=DatApres[0]
MF=DatApres[1]
SF=DatApres[2]
Duree=datetime(year=2014,month=1,day=1,hour=int(HF),minute=int(MF),second=int(SF))-datetime(year=2014,month=1,day=1,hour=int(HD),minute=int(MD),second=int(SD))
return Duree.seconds
except:
return '0'
#---------------------------------------Evil Twin:--------------------------
#----------------------------------------------------------------------------
#Date de debut de des-authentification envoyé du WAP à Broadcast.
def DatDebDesauthWAPToBroad(self,f,bssid):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0c)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==ff:ff:ff:ff:ff:ff)' -T fields -e frame.time|head -1>DatDebDesauth.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('DatDebDesauth.txt','r')
ligne=f.read()
f.close()
ligne=ligne.replace("\n"," ")
ligne=ligne.strip()
#os.remove('NbrVIsDurant.txt')
if ligne!='':
return ligne
else:
return 'Not available'
#Nombre de désauthentification envoyé par le AP à @ de diffusion.
def NbrDesauthEnvoyParAPToBroad(self,f,bssid):
try:
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0c)&&(wlan.bssid==++++)&&(wlan.da==ff:ff:ff:ff:ff:ff)' |sort -u|wc -l >nbrDesauthAPtoBroad.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('nbrDesauthAPtoBroad.txt','r')
ligne=f.read()
f.close()
ligne=ligne.replace("\n"," ")
ligne=ligne.strip()
#os.remove('NbrVIsDurant.txt')
return ligne
except:
return 'Not available'
#Date de fin de des-authentification envoyé du WAP à Broadcast.
def DatFinDesauthWAPToBroad(self,f,bssid):
command="tshark -r fichier -R '(wlan.fc.type_subtype==0x0c)&&(wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.da==ff:ff:ff:ff:ff:ff)' -T fields -e frame.time|tail -1>DatFinDesauth.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('DatFinDesauth.txt','r')
ligne=f.read()
f.close()
ligne=ligne.replace("\n"," ")
ligne=ligne.strip()
#os.remove('DatFinDesauth.txt')
if ligne!='':
return ligne
else:
return 'Not available'
#Numero de sequence pendant la des-authentification.
#def NSeqDurantDesauth(self,f,DatDebDesauth,DatFinDesauth):
# command="tshark -nn -r fichier -R '((wlan.fc.type_subtype==0x08||wlan.fc.type_subtype==0x05)&&(wlan_mgt.fixed.capabilities.ibss==0)&&(frame.time>\"++++\")&&(frame.time<\"****\"))' -T fields -e frame.time -e wlan.seq -e wlan.fc.subtype|sort -u>NSeqDuranDesauth.txt"
# x=command.replace("fichier","%s"%f)
# x=x.replace("++++","%s"%DatDebDesauth)
#x=x.replace("****","%s"%DatFinDesauth)
#subprocess.call(x,shell=True)
#f=open('NSeqDuranDesauth.txt','r')
#ligne=f.readlines()
#for chaine in ligne:
# chaine=chaine.replace("\t","?")
# chaine=chaine.replace(" ","?")
# chaine=chaine.split("?")
# print("Heure :%s ==> Numéro de séquence: %s ==> Sous-type de la trame de gestion envoyé par le point d'accès :%s")%(chaine[4],chaine[5],chaine[6])
#os.remove('NSeqDuranDesauth.txt')
#return chaine[4],chaine[5],chaine[6]
#Numéro de séquence aprés désauthentification.
def NSeqApreDesauth(self,f,bssid,DatFinDesauth):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.fc.type_subtype==0x08)&&(wlan_mgt.fixed.capabilities.ess==1)&&(wlan_mgt.fixed.capabilities.ibss==0)&&(frame.time>=\"----\"))' -T fields -e frame.time -e wlan.sa -e wlan.seq -e wlan.fc.subtype|sort -u|head -20>nseqapredesaut.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatFinDesauth)
subprocess.call(x,shell=True)
f=open('nseqapredesaut.txt','r')
ligne=f.readlines()
chaine1=[]
chaine2=[]
chaine3=[]
chaine4=[]
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.replace(" ","?")
chaine=chaine.split("?")
chaine1.append(chaine[4])
chaine2.append(chaine[5])
chaine3.append(chaine[6])
chaine4.append(chaine[7])
return chaine1,chaine2,chaine3,chaine4
#La creation de evil twin si SN=0 et SSID=broadcast.
def CreationEvil(self,f,bssid,DatDebDesauth):
try:
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(wlan_mgt.fixed.capabilities.ess==1)&&(wlan_mgt.fixed.capabilities.ibss==0)&&(wlan.seq==0)&&(frame.time<\"----\"))'>creationevil.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatDebDesauth)
subprocess.call(x,shell=True)
f=open('creationevil.txt','r')
ligne=f.readlines()
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.replace(" ","?")
chaine=chaine.split("?")
return chaine[13],chaine[17]
except:
return 'Not available','Not available'
#os.remove('creationevil.txt')
#Le moment de creation de Evil Twin.
def MomentCreationEvil(self,f,bssid,DatDebDesauth):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(wlan_mgt.fixed.capabilities.ess==1)&&(wlan_mgt.fixed.capabilities.ibss==0)&&(wlan.seq==0)&&(frame.time<\"----\"))' -T fields -e frame.time -e wlan.seq -e wlan.sa -e wlan.da|head>momentcreationevil.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatDebDesauth)
subprocess.call(x,shell=True)
f=open('momentcreationevil.txt','r')
ligne=f.readline()
ligne=ligne.replace("\t0","?")
ligne=ligne.split("?")
DateCreationEvil=ligne[0]
DateCreationEvil=DateCreationEvil.replace(" ","",1)
DateCreationEvil=DateCreationEvil.replace("\n"," ")
DateCreationEvil=DateCreationEvil.strip()
#os.remove('momentcreationevil.txt')
return DateCreationEvil
#canal de transmission utilisé par les stations avant la creation de evil (14:08:13)
def CanalAvantCreationEvil(self,f,bssid,DatCreationEvil):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(frame.time<\"****\"))' -T fields -e wlan_mgt.ds.current_channel -e wlan.sa -e wlan.da|sort -u>canalavantcreationevil.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("****","%s"%DatCreationEvil)
subprocess.call(x,shell=True)
f=open('canalavantcreationevil.txt','r')
ligne=f.readlines()
chaine1=[]
chaine2=[]
chaine3=[]
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.replace(" ","?")
chaine=chaine.split("?")
chaine[2]=chaine[2].replace("\n"," ")
chaine[2]=chaine[2].strip()
chaine[1]=chaine[1].replace("\n"," ")
chaine[1]=chaine[1].strip()
chaine[0]=chaine[0].replace("\n"," ")
chaine[0]=chaine[0].strip()
chaine1.append(chaine[0])
chaine2.append(chaine[1])
chaine3.append(chaine[2])
return chaine1, chaine2, chaine3
#canal de transmission des différents stations après la creation de evil twin
def CanalApreCreationEvil(self,f,bssid,DatCreationEvil):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(frame.time>\"----\"))' -T fields -e wlan_mgt.ds.current_channel -e wlan.sa -e wlan.fc.subtype|sort -u > canalAprescreationevil.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatCreationEvil)
subprocess.call(x,shell=True)
f=open('canalAprescreationevil.txt','r')
ligne=f.readlines()
chaine1=[]
chaine2=[]
chaine3=[]
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.replace(" ","?")
chaine=chaine.split("?")
chaine[2]=chaine[2].replace("\n"," ")
chaine[2]=chaine[2].strip()
chaine[1]=chaine[1].replace("\n"," ")
chaine[1]=chaine[1].strip()
chaine[0]=chaine[0].replace("\n"," ")
chaine[0]=chaine[0].strip()
chaine1.append(chaine[0])
chaine2.append(chaine[1])
chaine3.append(chaine[2])
return chaine1,chaine2,chaine3
#Le canal sur lequel a été envoyé la 1ere trame de balise par le evil twin.
def NumCanalPremiereBalise(self,f,bssid,DatCreationEvil):
a=Analyse()
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(frame.time>\"----\"))' -T fields -e wlan.fc.type_subtype -e wlan_mgt.ds.current_channel -e wlan.sa -e wlan.da -e frame.time|head > numcanal1balise.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatCreationEvil)
subprocess.call(x,shell=True)
f=open('numcanal1balise.txt','r')
ligne=f.readlines()
chaine1=[]
chaine2=[]
chaine3=[]
chaine4=[]
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.split("?")
chaine[4]=chaine[4].replace("\n"," ")
chaine[4]=chaine[4].strip()
chaine[1]=chaine[1].replace("\n"," ")
chaine[1]=chaine[1].strip()
chaine[2]=chaine[2].replace("\n"," ")
chaine[2]=chaine[2].strip()
chaine[0]=chaine[0].replace("\n"," ")
chaine[0]=chaine[0].strip()
chaine1.append(chaine[1])
chaine2.append(chaine[0])
chaine3.append(chaine[2])
chaine4.append(chaine[4])
return chaine1,chaine2, chaine3, chaine4
#Le canal du evil twin
def CanalEvilTwin(self,chaine,liste):
i=0
while i < len(liste):
if liste[i]<>chaine.strip("CH: "):
return liste[i]
i=i+1
return 'Non disponnible'
#Début de evil twin
def DebEvil(self,f,bssid,DatCreationEvil,n):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(frame.time>\"----\")&&(wlan_mgt.ds.current_channel==****))' -T fields -e wlan.fc.type_subtype -e wlan_mgt.ds.current_channel -e wlan.sa -e wlan.da -e frame.time|head -1 > debEvil.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatCreationEvil)
x=x.replace("****","%s"%n)
subprocess.call(x,shell=True)
f=open('debEvil.txt','r')
ligne=f.readlines()
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.split("?")
chaine[4].replace("\n"," ")
chaine[4].strip()
chaine[4]=chaine[4].replace(" ","",1)
chaine=chaine[4]
return chaine
#os.remove('momentcreationevil.txt')
#fin de evil
def FinEvil(self,f,bssid,DatCreationEvil,n):
command="tshark -r fichier -R '((wlan.bssid==++++)&&(wlan.sa==++++)&&(wlan.fc.type_subtype==0x08)||(wlan.fc.type_subtype==0x05)&&(frame.time>\"----\")&&(wlan_mgt.ds.current_channel==****))' -T fields -e wlan.fc.type_subtype -e wlan_mgt.ds.current_channel -e wlan.sa -e wlan.da -e frame.time|tail -1>FinEvil.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%DatCreationEvil)
x=x.replace("****","%s"%n)
subprocess.call(x,shell=True)
f=open('FinEvil.txt','r')
ligne=f.readlines()
for chaine in ligne:
chaine=chaine.replace("\t","?")
chaine=chaine.split("?")
chaine[4].replace("\n"," ")
chaine[4]=chaine[4].replace(" ","",1)
chaine[4].strip()
chaine=chaine[4]
return chaine
#Nbr de trame de gestion envoyé par station legitime/wap/station suspect(à quoi elle sert)
def NbrGestionEnvoyParStation(self,f,bssid,station):
command="tshark -r fichier -R '(wlan.fc.type==0)&&(wlan.bssid==++++)&&(wlan.sa==****)' -T fields -e wlan.da|sort|uniq -c|sort -nr>NbrGestion.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("****","%s"%station)
subprocess.call(x,shell=True)
def TypeAttaqueEvil(self,legitime,evil):
if legitime!=evil and evil!='Non disponnible':
typeattaque="Attaque EVIL TWIN"
else:
typeattaque="Aucune Attaque Evil"
return typeattaque
#-----------------------------------------------Attaque DOS------------------------------------------------------------------------------------
#Station qui a envoyé les trames de données NULL.
def StationNULL(self,f,bssid):
try:
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++))' -T fields -e wlan.sa -e wlan.da|sort|uniq -c|sort -nr>StationNULL.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('StationNULL.txt','r')
ligne=f.readlines()
ligne[0]=ligne[0].strip()
ligne[0]=ligne[0].replace(" ","\t")
ligne=ligne[0]
ligne=ligne.split("\t")
return ligne[0],ligne[1]
except:
return '0','Not available'
#Réussite de l'attaque
def TypeAttaqueDos(self,NbrDesauth):
if int(NbrDesauth)>1000:
typeattaque="DoS attack"
else:
typeattaque="No DoS attack"
return typeattaque
#Nombre Trame NULL envoyé par toutes les station
def NbrNULL(self,f,bssid):
try:
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++))'|wc -l > StationNULL.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('StationNULL.txt','r')
ligne=f.readlines()
ligne=ligne[0]
return ligne
except:
return '0'
#Nombre Trame NULL envoyé avant désauthentification
def NbrNullAvanAttak(self,f,bssid,debDesauth):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++)&&(frame.time<\"----\"))'|wc -l>nbrNULLavant.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%debDesauth)
subprocess.call(x,shell=True)
f=open('nbrNULLavant.txt','r')
ligne=f.readlines()
ligne=ligne[0]
return ligne
#Nombre de trame NULL envoyé durant désauthentification.
def NbrNULLduranAttak(self,f,bssid,debDesauth,finDesauth):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++)&&(frame.time<=\"----\")&&(frame.time>=\"****\"))'|wc -l > NbrNULLduranAttak.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%finDesauth)
x=x.replace("****","%s"%debDesauth)
subprocess.call(x,shell=True)
f=open('NbrNULLduranAttak.txt','r')
ligne=f.readlines()
ligne=ligne[0]
return ligne
#Nombre de trame NULL envoyé aprés désauthentification.
def NbrNullApresAttak(self,f,bssid,finDesauth):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++)&&(frame.time>\"----\"))'|wc -l >NbrNullApresAttak.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
x=x.replace("----","%s"%finDesauth)
subprocess.call(x,shell=True)
f=open('NbrNullApresAttak.txt','r')
ligne=f.readlines()
ligne=ligne[0]
return ligne
#Début d'envoi de trame NULL.
def DebEnvoiNULL(self,f,bssid):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++))' -T fields -e frame.time|head -1 > DebEnvoiNULL.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('DebEnvoiNULL.txt','r')
ligne=f.readlines()
for chaine in ligne:
chaine.replace("\n"," ")
chaine=chaine.replace(" ","",1)
chaine.strip()
return chaine
#Fin d'envoi de trame NULL
def FinEnvoiNULL(self,f,bssid):
command="tshark -r fichier -R '((wlan.fc.type_subtype==0x24)&&(wlan.bssid==++++))' -T fields -e frame.time|tail -1 > FinEnvoiNULL.txt"
x=command.replace("fichier","%s"%f)
x=x.replace("++++","%s"%bssid)
subprocess.call(x,shell=True)
f=open('FinEnvoiNULL.txt','r')
ligne=f.readlines()
for chaine in ligne:
chaine.replace("\n"," ")
chaine=chaine.replace(" ","",1)
chaine.strip()
return chaine
|
import sys
from collections import Counter
# counter: 각 요소별 갯수 count해서 dictionary
def mean(num):
return round(sum(num)/n)
def median(num):
if n ==1:
return num[0]
else:
return num [n // 2]
def most(num):
b_list = []
if n ==1:
return num[0]
else:
counter = Counter(num)
cnt = counter.most_common(2) # 리스트내 최빈값 꺼내기(most_common)
if cnt[0][1] == cnt[1][1]:
return cnt[1][0]
else:
return cnt[0][0]
def range_(num):
if n ==1:
return 0
else:
return a_list[-1] - a_list[0]
n = int(sys.stdin.readline())
a_list = []
for _ in range(n):
a_list.append(int(sys.stdin.readline()))
a_list.sort()
print(mean(a_list))
print(median(a_list))
print(most(a_list))
print(range_(a_list))
|
import json
from prepare_text import TextPreparation
class ComposeData:
def __init__(self, mapping, file_write):
self._mapping = mapping
self._file_write = file_write
def get_data_from_file(self):
data_and_type_mapping = {}
for file_name, type in self._mapping.items():
with open(file_name) as file:
data_and_type_mapping[type] = file.read()
return data_and_type_mapping
def write_data_to_file(self, data):
with open(self._file_write, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, ensure_ascii=False)
def get_data(self):
training_data = {}
data_and_type_mapping = self.get_data_from_file()
for type, data_text in data_and_type_mapping.items():
data_list = data_text.split('-------------------------------')
training_data[type] = []
for step, data in enumerate(data_list):
if step % 1000 == 0:
print(f'STEP: {step}')
if not data.strip():
continue
tp = TextPreparation(data)
prepared_data = tp.prepare_text()
dict = {
'text': prepared_data,
}
training_data[type].append(dict)
return training_data
def fill_training_data(self):
data = self.get_data()
self.write_data_to_file(data)
if __name__ == '__main__':
mapping = {
'data/suicide_data.txt': 'suicide',
'data/suicide_data2.txt': 'suicide',
'data/normal_data.txt': 'normal',
}
file_write = 'data/training_data.json'
cd = ComposeData(mapping, file_write)
cd.fill_training_data()
|
import unittest
from unittest.mock import Mock
from time import sleep
from zmqmw.implementations.notifier.publisher.PublisherNotifierStrategy import PublisherNotifierStrategy
from zmqmw.implementations.proxy.BrokerProxyStrategy import BrokerProxyStrategy
from zmqmw.implementations.proxy.publisher.PublisherProxyStrategy import PublisherProxyStrategy
from zmqmw.implementations.proxy.subscriber.SubscriberProxyStrategy import SubscriberProxyStrategy
from zmqmw.middleware.BrokerInfo import BrokerInfo
from zmqmw.middleware.PublisherInfo import PublisherInfo
from zmqmw.middleware.adapter.BrokerClient import BrokerClient
from zmqmw.middleware.adapter.PublisherClient import PublisherClient
from multiprocessing import Process, Value
from zmqmw.middleware.adapter.SubscriberClient import SubscriberClient
from zmqmw.middleware.handler.MessageHandler import MessageHandler
class TestHandler(MessageHandler):
def __init__(self, v):
self.value = v
def handle_message(self, value: str) -> None:
self.value.value += int(value.split(":")[1])
def start_proxy():
broker = BrokerProxyStrategy(broker_address="127.0.0.1",
broker_xpub_port=6000,
broker_xsub_port=7000)
proxy = BrokerClient(broker)
proxy.run()
def start_subscriber(th):
broker = BrokerInfo(broker_address="127.0.0.1", broker_pub_port=6000)
strategy = SubscriberProxyStrategy(broker_info=broker)
subscriber = SubscriberClient(subscriber_strategy=strategy)
subscriber.subscribe(topic='test', handlers=[th])
try:
subscriber.listen()
except Exception:
subscriber.close()
def start_publisher():
broker = BrokerInfo(broker_address="127.0.0.1", broker_sub_port=7000)
strategy = PublisherProxyStrategy(broker_info=broker)
publisher = PublisherClient(strategy=strategy)
publisher.register(topics=['test'])
for i in range(26):
publisher.publish(topic='test', val=1)
sleep(0.1)
class TestRun(unittest.TestCase):
def test_proxy_run(self):
v: Value = Value('d', 0)
th = TestHandler(v)
proxy = Process(target=start_proxy, args=())
proxy.start()
subscriber = Process(target=start_subscriber, args=[th])
subscriber.start()
publisher = Process(target=start_publisher, args=())
publisher.start()
sleep(3)
proxy.terminate()
subscriber.terminate()
publisher.terminate()
self.assertEqual(25.0, v.value)
|
from datetime import datetime
class Group(object):
def __init__(self, client, id, name, **kwargs):
self.client = client
self.id = id
if len(name) < 1:
raise("Group name cannot be < 1 chars")
else:
self.name = name
self.display_name = name
self.created = kwargs['created']
self.updated = kwargs['updated']
self.parent_id = kwargs['parent_id']
def __repr__(self):
items = ("%s = %r" % (k, v) for k, v in self.__dict__.items())
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
def has_parent(self):
if self.parent_id is not None:
return True
return False
@property
def id(self):
return self._id
@id.setter
def id(self, v):
self._id = int(v)
@property
def created(self):
return self._created
@created.setter
def created(self, v):
self._created = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%fZ")
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, v):
self._updated = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%fZ")
@property
def parent_id(self):
return self._parent_id
@parent_id.setter
def parent_id(self, v):
if v is not None:
self._parent_id = int(v)
else:
self._parent_id = None
|
import abc
class Cipher(metaclass=abc.ABCMeta):
"""Abstract base class for cipher."""
@abc.abstractmethod
def encryptor(self):
"""Return the encryptor context."""
@abc.abstractmethod
def decryptor(self):
"""Return the decryptor context."""
@abc.abstractmethod
def encrypt(self, data):
"""Encrypt data and return encrypted data."""
@abc.abstractmethod
def decrypt(self, data):
"""Decrypt data and return decrypted data."""
class StreamCipher(Cipher):
"""Abstract base class for stream cipher."""
class BlockCipher(Cipher):
"""Abstract base class for block cipher."""
class BlockCipherECB(BlockCipher):
"""Abstract base class for block cipher in ECB mode."""
class BlockCipherCBC(BlockCipher):
"""Abstract base class for block cipher in CBC mode."""
class BlockCipherCFB(BlockCipher):
"""Abstract base class for block cipher in CFB mode."""
class BlockCipherOFB(BlockCipher):
"""Abstract base class for block cipher in OFB mode."""
class BlockCipherCTR(BlockCipher):
"""Abstract base class for block cipher in CTR mode."""
|
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import random
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.model_selection import GridSearchCV
class BinaryClassifier():
def __init__(self, train_data, train_target):
#Data is loaded from sklearn.datasets.load_breast_cancer
#train_data is training feature data and train_target is your train label data.
#add new column of 1's to training data for w_0 as bias
newCol = np.ones((train_data.shape[0], 1))
train_data = np.append(train_data, newCol, 1)
#normalize data
scaler = StandardScaler()
train_data = scaler.fit_transform(train_data)
X_train, X_test, y_train, y_test=train_test_split(train_data,train_target,test_size=0.1)
X_train = np.array(X_train)
y_train = np.array([y_train]).T
X_test = np.array(X_test)
y_test = np.array([y_test]).T
self.X_test = X_test
self.y_test = y_test
self.X = X_train
self.y = y_train
self.X_batch = 0
self.y_batch = 0
self.bestLoss = float("inf")
self.bestW = None
self.bestLambda = None
self.bestAlpha = None
self.clf = None
def iterate_minibatches(self, inputs, targets, batchsize):
#helps generate mini-batches
assert inputs.shape[0] == targets.shape[0]
for start_idx in range(0, inputs.shape[0], batchsize):
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def logistic_training(self, alpha, lam, nepoch, epsilon):
"""Training process of logistic regression will happen here. User will provide
learning rate alpha, regularization term lam, specific number of training epoches,
and a variable epsilon to specify pre-mature end condition,
i.e., if error < epsilon, training stops.
Implementation includes 3-fold validation and mini-batch GD"""
ep = 10**-10
#3-fold cross validation
data = np.append(self.X, self.y, 1)
np.random.shuffle(data)
split1, split2, split3, = np.array_split(data, 3)
for cv in range(3):
if cv is 0:
validation = split1
training = np.append(split2, split3, 0)
elif cv is 1:
validation = split2
training = np.append(split1, split3, 0)
elif cv is 2:
validation = split3
training = np.append(split1, split2, 0)
X_train = training[:,:-1]
y_train = np.array([training[:,-1]]).T
X_val = validation[:,:-1]
y_val = np.array([validation[:,-1]]).T
mini_batch_size = 32
curAlpha = alpha[0]
curLambda = lam[0]
while (curAlpha <= alpha[1]): #mult min alpha by some number until max alpha
while (curLambda <= lam[1]): #mult min lam by some number until max lam
w = np.random.rand(1, X_train.shape[1])
for epoch in range(nepoch):
for batch in self.iterate_minibatches(X_train, y_train, mini_batch_size):
self.X_batch, self.y_batch = batch
#forward propagation
pred = 1/(1 + np.exp(-1 * np.dot(w,self.X_batch.T)))
#loss is sum of cross entropy (my loss is kinda more like cost)
loss = -(1/self.X_batch.shape[0]) * np.sum((self.y_batch * np.log(pred + ep) + (1-self.y_batch) * np.log(1-pred + ep))) + (1/2) * curLambda * np.sum(w**2)
#cost = (loss / mini_batch_size) + (1/2) * curLambda * np.sum(w**2)
#backward propagation
w_grad = w
#w_grad = -(1/mini_batch_size) * np.dot(self.X_batch.T, (pred - self.y_batch).T) + curLambda * w
w_grad = (1/self.X_batch.shape[0]) * np.sum(np.dot((self.y_batch - pred), self.X_batch)) + curLambda * w
#Adagrad
w = w - (curAlpha / np.sqrt(np.sum(w_grad**2))) * w_grad
#Vanilla Grad
#w = w - curAlpha * w_grad
#ceiling to prevent overflow
if (loss > 10000):
break
# Comparing loss to epsilon
if loss < epsilon:
break
cvPred = 1/(1 + np.exp(-1 * np.dot(w,X_val.T)))
cvLoss = (1/X_val.shape[0])*np.sum((y_val * np.log(cvPred + ep) + (1-y_val) * np.log(1-cvPred + ep))) + (1/2) * curLambda * np.sum(w**2)
if cvLoss < self.bestLoss:
self.bestLoss, self.bestW, self.bestAlpha, self.bestLambda = cvLoss, w, curAlpha, curLambda
curLambda *= 1.1
curAlpha *= 1.1
#train with all data
mini_batch_size = 32
w = self.bestW
curLambda = self.bestLambda
curAlpha = self.bestAlpha
for epoch in range(nepoch*3):
for batch in self.iterate_minibatches(self.X, self.y, mini_batch_size):
self.X_batch, self.y_batch = batch
#forward propagation
pred = 1/(1 + np.exp(-1 * np.dot(w,self.X_batch.T)))
#loss is sum of cross entropy
loss = (1 / self.X_batch.shape[0]) * np.sum((self.y_batch * np.log(pred + ep) + (1-self.y_batch) * np.log(1-pred + ep))) + (1/2) * curLambda * np.sum(w**2)
#currently don't do anything with cost
#cost = (loss / mini_batch_size) + (1/2) * curLambda * np.sum(w**2)
#backward propagation
w_grad = w
#w_grad = -(1/mini_batch_size) * np.dot(self.X_batch.T, (pred - self.y_batch).T) + curLambda * w
w_grad = -(1 / self.X_batch.shape[0]) * np.sum(np.dot((self.y_batch - pred), self.X_batch)) + curLambda * w
#Adagrad
w = w - (curAlpha / np.sqrt(np.sum(w_grad**2))) * w_grad
#Vanilla Grad
#w = w - curAlpha * w_grad
#ceiling to prevent overflow
if (loss > 10000):
break
# Comparing loss to epsilon
if loss < epsilon:
break
self.bestW = w
def logistic_testing(self, testX):
"""TestX should be a numpy array
Uses trained weight and bias to compute the predicted y values,
Predicted y values should be 0 or 1. returns the numpy array in shape n*1"""
#add new column of 1's to training data for w_0 as bias
newCol = np.ones((testX.shape[0], 1))
testX = np.append(testX, newCol, 1)
#normalize data
scaler = StandardScaler()
testX = scaler.fit_transform(testX)
y = np.ones(testX.shape[0])
y = 1/(1 + np.exp(-1 * np.dot(self.bestW,testX.T)))
y = (y < 0.5).astype(int)
y = y.T
return y
def svm_training(self, gamma, C):
"""Uses sklearn's built-in GridSearchCV and SVM methods for comparison with logistic regression model"""
parameters = {'gamma': gamma, 'C': C}
#defaults RBF
svc = svm.SVC()
self.clf = GridSearchCV(svc, parameters)
self.clf.fit(self.X, self.y)
def svm_testing(self, testX):
"""TestX should be a numpy array
Uses trained weight and bias to compute the predicted y values,
Predicted y values should be 0 or 1. returns the numpy array in shape n*1"""
#add new column of 1's to training data for w_0 as bias
newCol = np.ones((testX.shape[0], 1))
testX = np.append(testX, newCol, 1)
#normalize data
scaler = StandardScaler()
testX = scaler.fit_transform(testX)
y = self.clf.predict(testX)
y = (y > 0.5).astype(int)
y = np.array([y]).T
return y
#main testing
dataset = load_breast_cancer(as_frame=True)
#Dataset is divided into 90% and 10%, 90% for you to perform k-fold validation and 10% for testing
train_data = dataset['data'].sample(frac=0.9, random_state=0) # random state is a seed value
train_target = dataset['target'].sample(frac=0.9, random_state=0) # random state is a seed value
test_data = dataset['data'].drop(train_data.index)
test_target = dataset['target'].drop(train_target.index)
model = BinaryClassifier(train_data, train_target)
# Compute the time to do grid search on training logistic
logistic_start = time.time()
model.logistic_training([10**-10, 10], [10e-10, 1e10], 400, 10**-6)
logistic_end = time.time()
# Compute the time to do grid search on training SVM
svm_start = time.time()
model.svm_training([1e-9, 1000], [0.01, 1e10])
svm_end = time.time() |
"""
Comprehensive range of techniques :
1 . Using scaling on the KNN model to see the improvement in results
"""
from knn_model import *
from part_1_oop import BasicKnn
from part_2_a import WeightedKnn
class ScaledKnn:
def __init__(self, train_file, test_file, _plotgraph=False):
"""
:param train_file: The filename for the training instance
:param test_file: The filename for the test instance
:param _kvalue: The K value for the KNN model
"""
self.knn_model = Knnmodel(train_file, test_file)
self.knn_model.dataset(10)
self.knn_model.dataset_scaling()
self.results = np.apply_along_axis(self.knn_model.calculateDistances, 1, self.knn_model.scaled_test_data,
self.knn_model.scaled_train_data)
self.accuracy_graph_values = []
self.k_graph_values = []
self.plot_graph = _plotgraph
def prediction(self, k_value=1, n_value=1, type='basic'):
"""
Calculates the euclidean distance between each query instance and the train dataset and returns accuracy
prediction
:return: Accuracy of the prediction
"""
try:
if k_value < 1:
raise InvalidKValue(k_value)
except InvalidKValue as e:
print(f'Invalid neighbour value: {e.message} ')
return
try:
if type == 'basic':
percentage = self.knn_model.basic_knn_percentage(self.results, k_value)
print(f'The Scaled Basic KNN model with k = {k_value}, has and accuracy of {round(percentage, 2)} %')
elif type == 'weighted':
percentage = self.knn_model.weighted_knn_percentage(self.results, k_value, n_value)
print(f'The Scaled Weighted KNN model with k = {k_value} and n = {n_value},'
f' has and accuracy of {round(percentage, 2)} %')
if self.plot_graph:
self.accuracy_graph_values.append(round(percentage, 2))
self.k_graph_values.append(k_value)
except Exception as e:
print(f'Error finding accuracy for K = {k_value}, error {e}')
def clean_graph_values(self):
self.accuracy_graph_values = []
self.k_graph_values = []
if __name__ == '__main__':
PLOT_GRAPH = True
LEGEND = []
LIMIT = 20
n = 2
scaled_knn = ScaledKnn('trainingData_classification.csv', 'testData_classification.csv', _plotgraph=PLOT_GRAPH)
for k in range(1, LIMIT + 1):
scaled_knn.prediction(k, type='basic')
PlotGraph.plot_graph(scaled_knn.k_graph_values, scaled_knn.accuracy_graph_values)
LEGEND.append('Basic Scaled KNN')
scaled_knn.clean_graph_values()
for k in range(1, LIMIT + 1):
scaled_knn.prediction(k, n, type='weighted')
PlotGraph.plot_graph(scaled_knn.k_graph_values, scaled_knn.accuracy_graph_values)
LEGEND.append('Weighted Scaled KNN')
scaled_knn.clean_graph_values()
basic_knn = BasicKnn('trainingData_classification.csv', 'testData_classification.csv', _plotgraph=PLOT_GRAPH)
for k in range(1, LIMIT + 1):
basic_knn.prediction(k)
PlotGraph.plot_graph(basic_knn.k_graph_values, basic_knn.accuracy_graph_values)
LEGEND.append('Basic KNN')
weighted_knn = WeightedKnn('trainingData_classification.csv', 'testData_classification.csv', _plotgraph=PLOT_GRAPH)
for k in range(1, LIMIT + 1):
weighted_knn.prediction(k, n)
PlotGraph.plot_graph(weighted_knn.k_graph_values, weighted_knn.accuracy_graph_values)
LEGEND.append('Weighted KNN')
PlotGraph.show_graph(LEGEND)
|
# -*- coding: utf-8 -*-
import logging
__author__ = '''hongjie Zheng'''
__email__ = 'hongjie0923@gmail.com'
__version__ = '0.0.1'
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s')
from PyEventEmitter.EventEmitter import EventEmitter
__all__ = ['EventEmitter']
|
#!usr/bin/python
import os
import sys
os.system("yum install sshpass -y")
os.system("yum install nmap -y")
os.system("nmap 192.168.0.0/24 -oG /root/Desktop/project/ip1.txt")
os.system('cat /root/Desktop/project/ip1.txt|grep ssh|grep open|cut -f2 -d " ">/root/Desktop/project/ips.txt')
os.system("mkdir /root/Desktop/project/baba")
|
# convert a string in short form
s1= raw_input("enter a string :")
s1=" "+s1
c=0
k=0
for i in range(0,len(s1),1):
if(s1[i]==' ' and c<=2):
if(c<=1):
print s1[i+1],".",
c=c+1
k=i+1
print s1[k:len(s1)]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 03 18:28:58 2015
@author: Jye Smith
NEMA NU 2-2007
Set 'PathDicom' to dir with dicom files. Can calculate FWHM of up to 3 points sources in a image.
"""
## https://pyscience.wordpress.com/2014/09/08/dicom-in-python-importing-medical-image-data-into-numpy-with-pydicom-and-vtk/
import dicom
import os
import numpy
from numpy import unravel_index
import matplotlib.pyplot as pyplot
import NEMA_Resolution_lib
PathDicom = "DICOM"
lstFilesDCM = [] # create an empty list
for dirName, subdirList, fileList in os.walk(PathDicom):
for filename in fileList:
lstFilesDCM.append(os.path.join(dirName,filename))
# Read the first file to get header information
RefDs = dicom.read_file(lstFilesDCM[0])
# Load dimensions based on the number of rows, columns, and slices (along the Z axis)
ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))
print 'ConstPixelDims = ', ConstPixelDims[0], ConstPixelDims[1], ConstPixelDims[2]
# Load spacing values (in mm)
ConstPixelSpacing = (float(RefDs.PixelSpacing[0]), float(RefDs.PixelSpacing[1]), float(RefDs.SliceThickness))
print 'ConstPixelSpacing = ', ConstPixelSpacing[0], ConstPixelSpacing[1], ConstPixelSpacing[2]
print 'x FOV = ', round( ConstPixelSpacing[0] * ConstPixelDims[0], 2 ), ' mm'
print 'y FOV = ', round( ConstPixelSpacing[1] * ConstPixelDims[1], 2 ), ' mm'
print 'axial FOV = ', round( ConstPixelSpacing[2] * ConstPixelDims[2], 2 ), ' mm'
# The array is sized based on 'ConstPixelDims'
ArrayDicom = numpy.zeros(ConstPixelDims, dtype=float)
# loop through all the DICOM files and copy yo numpy array
for filenameDCM in lstFilesDCM:
ds = dicom.read_file(filenameDCM)
RescaleIntercept = float( ds[0x28,0x1052].value ) ## (0028, 1052) Rescale Intercept DS: '0'
RescaleSlope = float( ds[0x28,0x1053].value ) ## (0028, 1053) Rescale Slope DS: '2.97373'
ArrayDicom[:, :, ds[0x20,0x13].value - 1] = ds.pixel_array * RescaleSlope + RescaleIntercept ## [0x20,0x13] is the 'Instance Number'. This will order the image correctly in the array.
fig = pyplot.figure()
ax = fig.add_subplot(2, 2, 1)
ax.imshow(numpy.sum(ArrayDicom, axis=2), interpolation = 'none')
## Loop through for up to 3 points
for points in range(3):
print 'Point found number', points+1
## http://stackoverflow.com/questions/3584243/python-get-the-position-of-the-biggest-item-in-a-numpy-array
MaxIndices = unravel_index(ArrayDicom.argmax(), ArrayDicom.shape)
## calc 30mm cube size in pixels around point
pointx = int(round(30/ConstPixelSpacing[0]))
pointy = int(round(30/ConstPixelSpacing[1]))
pointz = int(round(30/ConstPixelSpacing[2]))
## extract cube around point
PointArray = ArrayDicom[MaxIndices[0]-int(pointx/2): pointx+MaxIndices[0]-int(pointx/2), MaxIndices[1]-int(pointy/2): pointy+MaxIndices[1]-int(pointy/2), MaxIndices[2]-int(pointz/2): pointz+MaxIndices[2]-int(pointz/2)]
print 'Line response counts = ', numpy.sum(PointArray), '. Must be atleast 100,000 counts.'
if numpy.sum(PointArray) > 100000:
## Sum cube in to square
FlatPointArray1 = numpy.sum(PointArray, axis=0)
FlatPointArray2 = numpy.sum(PointArray, axis=1)
## Sum squares in to line response function
xLineResponse = numpy.sum(FlatPointArray2, axis=1)
yLineResponse = numpy.sum(FlatPointArray1, axis=1)
zLineResponse = numpy.sum(FlatPointArray1, axis=0)
## Caclulate the FWHM of the line response function
x_info = NEMA_Resolution_lib.Calculate_x_Resolution(MaxIndices, pointx, xLineResponse, ConstPixelDims, ConstPixelSpacing)
y_info = NEMA_Resolution_lib.Calculate_y_Resolution(MaxIndices, pointy, yLineResponse, ConstPixelDims, ConstPixelSpacing)
z_info = NEMA_Resolution_lib.Calculate_z_Resolution(MaxIndices, pointz, zLineResponse, ConstPixelDims, ConstPixelSpacing)
Title = 'Location (' + str(x_info[2]) + ',' + str(y_info[2]) + ',' + str(z_info[2]) + '%) \n ' + \
'FWHM (' + str(x_info[0]) + ',' + str(y_info[0]) + ',' + str(z_info[0]) + ') mm ' + \
'(' + str(x_info[1]) + ',' + str(y_info[1]) + ',' + str(z_info[1]) + ') pixels'
# Plot the grid
ax = fig.add_subplot(2, 2, points+2)
ax.set_title(Title, fontsize=10)
ax.set_ylabel('Line response cnts = \n'+str(numpy.sum(PointArray)), fontsize=10)
ax.imshow(numpy.sum(PointArray, axis=2), interpolation = 'none')
## Set that point source to zero in the image so it wont be selected again in the analysis.
ArrayDicom[MaxIndices[0]-int(pointx/2): pointx+MaxIndices[0]-int(pointx/2), MaxIndices[1]-int(pointy/2): pointy+MaxIndices[1]-int(pointy/2), MaxIndices[2]-int(pointz/2): pointz+MaxIndices[2]-int(pointz/2)] = 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import cantera as cant
## Constants ##
Do = .25 #in
N_tubes = 56
kr = .015
L_tot = 14 #in
L = 4 #in
Ao = np.pi*Do*N_tubes*L_tot
w_tube = .022
Di = .25 - 2*w_tube
Ai = np.pi*N_tubes*L_tot
Kp = 16.3
Cp_w_in_tube = 75.364
rho_w = 54240
Tsat25 = 403.57
Tsat35 = 411.266
E_rough = .045
h_fg25 = 39159
h_fg35 = 38746
|
import json
from django.utils.functional import cached_property
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import TemplateView
from .models import Article, Chapter, Definition
def get_definitions(article):
return json.dumps({
definition_object.term: definition_object.definition.content
for definition_object in Definition.objects.filter(
language_code=article.language_code
)
})
def get_article_with_sections(article):
article.sections_list = []
for section in article.sections.filter(parent_index__isnull=True):
section.subsections = article.sections.filter(parent_index=section.index)
article.sections_list.append(section)
return article
class IndexView(TemplateView):
template_name = 'index.html'
@cached_property
def article(self):
return get_article_with_sections(
Article.objects.language().prefetch_related('chapter', 'sections').first()
)
def get_context_data(self, **kwargs):
try:
next_article = Article.objects.language().get(index=self.article.index+1)
except ObjectDoesNotExist:
next_article = None
return super().get_context_data(
chapters=Chapter.objects.language().prefetch_related('articles').all(),
article=self.article,
next_article=next_article,
definitions=get_definitions(self.article),
**kwargs
)
class ArticleView(TemplateView):
def get_template_names(self):
if self.request.is_ajax():
return '_article.html'
else:
return 'index.html'
@cached_property
def article(self):
index = self.kwargs['id']
article = Article.objects.language().prefetch_related(
'chapter',
'sections'
).get(index=index)
return get_article_with_sections(article)
def get_context_data(self, **kwargs):
try:
next_article = Article.objects.language().get(index=self.article.index+1)
except ObjectDoesNotExist:
next_article = None
return super().get_context_data(
chapters=Chapter.objects.language().all(),
article=self.article,
next_article=next_article,
definitions=get_definitions(self.article),
**kwargs
)
|
import moduldemo
ret = moduldemo.add(10,20);
print("Additiom is",ret);
ret = moduldemo.sub(10,20);
print("subtraction is",ret);
ret = moduldemo.mult(10,20);
print("multipliction is",ret);
ret = moduldemo.div(10,20);
print("division is",ret);
|
import logging
from TestProject import TestProject
import Params
from FXpathSeacher import XpathSearch
from decimal import Decimal
class WTest_Rep_11_1_v2(TestProject):
'''Class for user's 1 test '''
test_config = Params.params_1
tproperty_page = {
"row_count" : 0,
"col_sum" : 0,
"col_count" : 0
} # empty dict with Fact property values
def __init__(self):
""" Class constructor """
super(WTest_Rep_11_1_v2,self).__init__(self.test_config)
def check_property(self):
""" This function is written special for test 11_1 and for addition checks """
grid_search = XpathSearch(self.page_content)
res = grid_search.search("//table/tbody/tr",False)
res_cnt = len(res)-1
self.tproperty_page["row_count"] = res_cnt
matrix = []
for trs in res[1:len(res)]:
table_line = []
for t in trs:
if t.tag=='td':
table_line.append(str(t.text))
matrix.append(table_line)
test_col_num = 0 # first column in report
test_col_sum = Decimal(0.0)
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i == 0: # calculate fields in first row
test_col_num += 1
if j == 0: # calculate sum of cells in one column
try:
test_col_sum = test_col_sum + Decimal(
matrix[i][j].strip().replace(',', '.').replace(u'\xa0', ''))
except ValueError:
print(" error on line [", i, "]", end=" ")
self.tproperty_page["col_sum"] = float(test_col_sum)
self.tproperty_page["col_count"] = test_col_num
for key, value in self.test_custom_property.items():
logging.info("Expected ("+ str(key)+ ") - "+ str(value)+ " real "+ str(self.tproperty_page[key]))
if value == self.tproperty_page[key]:
logging.info(' SUCCESS')
else:
logging.info(' FAIL') |
__author__ = 'B.Ankhbold'
from sqlalchemy import Column, String, Float, Date, ForeignKey, Integer, Table
from sqlalchemy.orm import relationship
from geoalchemy2 import Geometry
from ClLanduseType import *
class CaParcelConservation(Base):
__tablename__ = 'parcel_conservation'
gid = Column(Integer, primary_key=True)
area_m2 = Column(Float)
polluted_area_m2 = Column(Float)
address_khashaa = Column(String)
address_streetname = Column(String)
address_neighbourhood = Column(String)
valid_from = Column(Date)
valid_till = Column(Date)
geometry = Column(Geometry('POLYGON', 4326))
# foreign keys:
conservation = Column(Integer, ForeignKey('cl_conservation_type.code'))
conservation_ref = relationship("ClConservationType")
|
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
class SeedcollectionSpider(CrawlSpider):
name = "seedcollection"
allowed_domains = ["theseedcollection.com.au"]
def __init__(self, tag=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_urls = [
F"https://www.theseedcollection.com.au/{tag or ''}"
]
rules = (
Rule(LinkExtractor(restrict_xpaths="//div[contains(@class, 'wrapper-thumbnail')]"), callback='parse_item'),
Rule(LinkExtractor(restrict_xpaths="//ul[@class='pagination']/li/a/i[@class='fa fa-angle-right']/parent::a")),
)
def parse_item(self, response):
images = response.xpath("//div[@class='thumb-image']/div/a/img")
about = response.xpath("//div[@id='facts']//thead/tr")
about_key = about.xpath("./th/text()").getall()
about_value = about.xpath("./td/text()").getall()
about_items = {k: v for (k, v) in dict(zip(about_key, about_value)).items()}
yield {
"title": response.xpath("normalize-space(//h1[@aria-label='Product Name']/text())").get(),
"price": response.xpath("normalize-space(//div[@class='productprice productpricetext']/text())").get(),
"about": {
**about_items
# about.xpath("./th/text()").get(): about.xpath("./td/text()").get()
},
"images": [
response.urljoin(response.xpath("//div[@class='zoom']/img[@id='main-image']/@src").get()),
*[response.urljoin(img.xpath("./@src").get()) for img in images]
],
} |
################ here we are checking the Second example ###############################
import datetime
d={'2020-01-01':4,'2020-01-02':4,'2020-01-03':6,'2020-01-04':8,'2020-01-05':2,'2020-01-06':-6,'2020-01-07':2,'2020-01-08':-2}
D={}
for ele in d:
dt=ele
year, month, day = (int(x) for x in dt.split('-')) # here year,month and day will be stored from the key of the dictionary
ans = datetime.date(year, month, day)
daynumber=ans.weekday() #here the number of the weekday will be stored starting from 0 i.e. Monday
if (daynumber==0):
D['Mon']=D.get('Mon',0)+d[dt]
elif (daynumber==1):
D['Tue']=D.get('Tue',0)+d[dt]
elif (daynumber==2):
D['Wed']=D.get('Wed',0)+d[dt]
elif (daynumber==3):
D['Thu']=D.get('Thu',0)+d[dt]
elif (daynumber==4):
D['Fri']=D.get('Fri',0)+d[dt]
elif (daynumber==5):
D['Sat']=D.get('Sat',0)+d[dt]
else:
D['Sun']=D.get('Sun',0)+d[dt]
print(D)
################### Here we are checking the third example ########################
d= {'2020-01-01':6,'2020-01-04': 12,'2020-01-05': 14,'2020-01-06':2,'2020-01-07':4}
D={}
for ele in d:
dt=ele
year, month, day = (int(x) for x in dt.split('-')) # here year,month and day will be stored from the key of the dictionary
ans = datetime.date(year, month, day)
daynumber=ans.weekday() #here the number of the weekday will be stored starting from 0 i.e. Monday
if (daynumber==0):
D['Mon']=D.get('Mon',0)+d[dt]
elif (daynumber==1):
D['Tue']=D.get('Tue',0)+d[dt]
elif (daynumber==2):
D['Wed']=D.get('Wed',0)+d[dt]
elif (daynumber==3):
D['Thu']=D.get('Thu',0)+d[dt]
elif (daynumber==4):
D['Fri']=D.get('Fri',0)+d[dt]
elif (daynumber==5):
D['Sat']=D.get('Sat',0)+d[dt]
else:
D['Sun']=D.get('Sun',0)+d[dt]
print(D)
#from here we will check that if we missed any day
day=['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
for ele in day:
if ele not in D:
curr_index=day.index(ele)
#if next day is present
if D.get(day[curr_index+1],'no')!='no':
D[day[curr_index]]=(D[day[curr_index-1]]+D[day[curr_index+1]])//2
#if the next day is not present
else:
D[day[curr_index]]=2*D[day[curr_index-1]]-D.get(day[curr_index-2],0)
print(D)
|
"""Demonstrate all business related API endpoints.
This module provides API endpoints to register business, view a single business, view all
businesses.
"""
from flask import Blueprint, abort, request
from flask_restful import (Resource, Api, reqparse)
from app import business
class BusinessRecord(Resource):
"""Illustrate API endpoints to register and view business.
Attributes:
reqparse (object): A request parsing interface designed to access simple and uniform
variables on the flask.request object.
"""
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('business_id',
required=True,
help='Business id is required',
location=['form', 'json']
)
self.reqparse.add_argument('business_owner',
required=True,
help='Business owner is required',
location=['form', 'json'])
self.reqparse.add_argument('business_name',
required=True,
help='Business name is required',
location=['form', 'json'])
self.reqparse.add_argument('business_category',
required=True,
help='Business category is required',
location=['form', 'json'])
self.reqparse.add_argument('business_location',
required=True,
help='Business location is required',
location=['form', 'json'])
self.reqparse.add_argument('business_summary',
required=True,
help='Business summary is required',
location=['form', 'json'])
def post(self):
"""Register a business.
Returns:
A success message to indicate successful registration.
Raises:
TypeError is raised when business id not a number.
ValueError is raised when business id a negative number.
KeyError is raised when business id already exist.
Error message when no data supplied.
"""
req_data = request.get_json()
business_id = req_data['business_id']
business_owner = req_data['business_owner']
business_name = req_data['business_name']
business_category = req_data['business_category']
business_location = req_data['business_location']
business_summary = req_data['business_summary']
save_result = business.create_business(business_id, business_owner, business_name, business_category,
business_location, business_summary)
return save_result["message"], 201
def get(self):
"""View all registered businesses.
Returns:
A json format records of the registered businesses.
"""
business_dict = business.view_all_businesses()
return business_dict, 200
class OneBusinessRecord(Resource):
"""Illustrate API endpoints to manipulate business data.
Attributes:
business (class): A class that implement business related methods.
"""
def get(self, business_id):
"""View a registered business by id.
Returns:
A json record of the registered business.
"""
response = business.view_business_by_id(business_id)
if response.get('message') == 'There is no registered business!':
return 'Business does not exist', abort(404)
return response, 200
def put(self, business_id):
"""Update a registered businesses.
Args:
business_id (int): business id parameter should be unique to identify each business.
Returns:
A successful message when the business record is deleted.
"""
req_data = request.get_json()
business_owner = req_data['business_owner']
business_name = req_data['business_name']
business_category = req_data['business_category']
business_location = req_data['business_location']
business_summary = req_data['business_summary']
response = business.update_business(business_id, business_owner, business_name, business_category,
business_location, business_summary)
return response, 200
def delete(self, business_id):
"""Delete a registered businesses.
Args:
business_id (int): business id parameter should be unique to identify each business.
Returns:
A successful message when the business record is deleted.
"""
response = business.delete_business(business_id)
return response, 200
business_api = Blueprint('resources.business', __name__)
api = Api(business_api)
api.add_resource(
BusinessRecord,
'/business',
endpoint='business'
)
api.add_resource(
OneBusinessRecord,
'/businesses/<int:business_id>',
endpoint='businesses'
)
|
import numpy as np
import matplotlib.pyplot as plt
zare_data_AB_cnn_30_epochs = np.load('CNN_AB_zare_classifier_30_epochs.npy')
zare_data_AB_rnn_30_epochs = np.load('zare_data_AB_rnn_30_epochs.npy')
class_data_all_cnn_30_epochs = np.load('CNN_class_all_letters_classifier_30_epochs.npy')
class_data_all_rnn_30_epochs = np.load('NN_class_all_letters_classifier_30_epochs.npy')
ax = plt.axes()
plt.title('Validation Accuracy vs. # of Epoch Iterations')
plt.xlabel('Epoch #')
plt.ylabel('Accuracy %')
x_axis = np.arange(1,31)
plt.plot(x_axis, zare_data_AB_cnn_30_epochs[0,:], label='Convolutional NN')
plt.plot(x_axis, zare_data_AB_rnn_30_epochs[0,:], label='Standard NN')
plt.legend()
plt.show()
plt.title('Validation Loss vs. # of Epoch Iterations')
plt.xlabel('Epoch #')
plt.ylabel('Loss Value')
x_axis = np.arange(1,31)
plt.plot(x_axis, zare_data_AB_cnn_30_epochs[1,:], label='Convolutional NN')
plt.plot(x_axis, zare_data_AB_rnn_30_epochs[1,:], label='Standard NN')
plt.legend()
plt.show()
ax = plt.axes()
plt.title('Validation Accuracy vs. # of Epoch Iterations')
plt.xlabel('Epoch #')
plt.ylabel('Accuracy %')
x_axis = np.arange(1,31)
plt.plot(x_axis, class_data_all_cnn_30_epochs[0,:], label='Convolutional NN', color='c')
plt.plot(x_axis, class_data_all_rnn_30_epochs[0,:], label='Standard NN', color='m')
plt.legend()
plt.show()
plt.title('Validation Loss vs. # of Epoch Iterations')
plt.xlabel('Epoch #')
plt.ylabel('Loss Value')
x_axis = np.arange(1,31)
plt.plot(x_axis, class_data_all_cnn_30_epochs[1,:], label='Convolutional NN', color='c')
plt.plot(x_axis, class_data_all_rnn_30_epochs[1,:], label='Standard NN', color='m')
plt.legend()
plt.show()
|
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import datetime
import logging
import math
from sklearn.preprocessing import StandardScaler
from action import Action
####################################
# TODO: remove this after API update
from pandas_datareader.google.daily import GoogleDailyReader
@property
def url(self):
return 'http://finance.google.com/finance/historical'
GoogleDailyReader.url = url
# remove ends
####################################
LOGGER = logging.getLogger(__name__)
class Environment:
MIN_DEPOSIT_PCT = 0.7
def __init__(self,
ticker_list,
initial_deposit=1000,
from_date=datetime.datetime(2007, 1, 1),
to_date=datetime.datetime(2017, 1, 1),
window=70,
min_days_to_hold=5,
max_days_to_hold=5,
days_step=10,
scaler=None):
self.initial_deposit = initial_deposit
self.window = window
self.max_days_to_hold = max_days_to_hold
def get(tickers, startdate, enddate):
def data(ticker):
return pdr.get_data_google(ticker, start=startdate, end=enddate)
datas = map(data, tickers)
return pd.concat(datas, keys=tickers, names=['Ticker', 'Date'])
self.data = get(ticker_list, from_date, to_date)
self.data.drop('Volume', inplace=True, axis=1)
days_to_holds = np.arange(min_days_to_hold,
max_days_to_hold + 1, days_step)
self.main_ticker = ticker_list[0]
self.action_space = [Action(self.main_ticker, act, days, 10)
for act in Action.acts
for days in days_to_holds] # for ticker in ticker_list
self.minimal_deposit = self.initial_deposit * Environment.MIN_DEPOSIT_PCT
self.scaler = scaler
self.preprocess_data()
self.reset()
def preprocess_data(self):
data_unstacked = self.data.unstack(level=0)
data_unstacked = data_unstacked.pct_change().fillna(0)
rows = data_unstacked.shape[0]
LOGGER.info('Data size: %d' % rows)
if self.scaler is None:
LOGGER.info('Create new scaler')
self.scaler = StandardScaler()
data_unstacked_scaled = self.scaler.fit_transform(data_unstacked)
else:
LOGGER.info('Use existing scaler')
data_unstacked_scaled = self.scaler.transform(data_unstacked)
self.scaled_data = pd.DataFrame(data=data_unstacked_scaled, columns=data_unstacked.columns,
index=data_unstacked.index)
def reset(self):
self.deposit = self.initial_deposit
self.max_current_index = len(self.scaled_data) - self.max_days_to_hold
self.current_index = self.window
self.actions = {}
return self.state()
def step(self, action_idx: int):
if action_idx == -1:
LOGGER.info('Skip action for {}'.format(self.data.loc[self.main_ticker].iloc[self.current_index - 1].name))
self.current_index += 1
next_state = self.state()
return next_state, None, (self.max_current_index < self.current_index)
action = self.action_space[action_idx]
covered_df = self.future_data_for_action(action)
on_date = covered_df.index[0]
first_day_price = covered_df.iloc[0]['Open']
last_day_price = covered_df.iloc[-1]['Close']
if action.act == Action.BUY:
reward = (last_day_price - first_day_price) / first_day_price
elif action.act == Action.SELL:
reward = (first_day_price - last_day_price) / first_day_price
else:
reward = 0
if math.isnan(reward): # sometimes the first_day_price is NaN
reward = 0
self.current_index += 1 # action.days
# store information for further inspectation
invested_amount = self.deposit * action.percentage / 100
deposit_reward = reward * invested_amount
self.deposit += deposit_reward
self.actions[on_date] = (action, reward, deposit_reward, first_day_price, last_day_price, invested_amount)
next_state = self.state()
done = self.deposit < self.minimal_deposit or \
self.max_current_index < self.current_index
return next_state, reward * 10000, done
def future_data_for_action(self, action: Action):
trade_day_index = self.current_index
return self.data.loc[action.ticker].iloc[trade_day_index: trade_day_index + action.days]
def state(self):
return self.scaled_data.iloc[self.current_index - self.window: self.current_index]
def state_size(self):
return self.state().shape
def action_size(self):
return len(self.action_space)
@staticmethod
def shrink_df_for_ticker(df, ticker):
idx = pd.IndexSlice
df = df.loc[:, idx[:, ticker]]
df.columns = df.columns.droplevel(1)
return df
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
class testClass(unittest.TestCase):
driver = None
@classmethod
def setUpClass(cls):
baseURL = "http://tagonsupport.cubixsource.com/administrator/login"
cls.driver = webdriver.Chrome("C:\\Users\\Bilal.Ikram\\PycharmProjects\\firstSeleniumTest\\venv\\selenium\\webdriver\\chromedriver.exe")
cls.driver.maximize_window()
cls.driver.get(baseURL)
def test_class(self):
a = self.driver.find_element(By.NAME, "email")
self.assertTrue(a, "'a' is not True")
a.send_keys("admin@tagonapp.com")
def test_class2(self):
b = self.driver.find_element(By.NAME, "password")
self.assertTrue(b, "'b' is not True")
b.send_keys("admin123")
def test_class3(self):
c = self.driver.find_element(By.CSS_SELECTOR, ".btn-primary")
self.assertTrue(c, "'c' is not True")
c.click()
time.sleep(3)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
print("case ended")
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.