text stringlengths 38 1.54M |
|---|
from django.db import models
class Instructor(models.Model):
name = models.CharField(max_length=100)
image = models.ImageField(default='default.png', blank=True)
slug = models.SlugField(unique=True)
facebook = models.URLField(max_length=1000, blank=True)
linkedin = models.URLField(max_length=1000, blank=True)
def __str__(self):
return self.name
|
"""
Check if input string is valid palindrome
"""
from collections import Counter
import unittest
def is_palindrome(str_):
"""
Checks whether any permutation of an input string is a palindrome.
Returns true if such permutation exists
"""
assert str_, \
'String should not be empty'
# String can be a palindrome if there is only one uneven character
counter = Counter(str_)
has_one_uneven_character = False
for key, value in counter.items():
# if number of characters is uneven
if value % 2 != 0:
if has_one_uneven_character:
# we just found another uneven character
return False
else:
has_one_uneven_character = True
return True
class TestValidatePalindromeChecks(
unittest.TestCase):
"""
Test is_palindrome function
"""
def test_valid_palindrome__all_even(self):
"""
We return True for valid palindrome with all even characters
"""
self.assertTrue(
is_palindrome('ccii'))
def test_valid_palindrome__one_uneven_character(self):
"""
We return True for valid palindrome with one uneven character
"""
self.assertTrue(
is_palindrome('civvvic'))
def test_not_a_palindrome(self):
"""
We return False for non palindrome function
"""
self.assertFalse(
is_palindrome('livci'))
def test_one_character_string(self):
"""
We return True if string consists of one character
"""
self.assertTrue(
is_palindrome('c'))
def test_empty_string(self):
"""
We raise AssertionError if empty string passed
"""
with self.assertRaises(AssertionError):
is_palindrome('')
if __name__ == '__main__':
unittest.main()
|
def bfs(heads, tails):
S = set((heads, tails))
Q = [(heads, tails, 0)]
qi = 0
while qi < len(Q):
h, t, d = Q[qi]
qi += 1
if (h, t) in S:
continue
else:
S.add((h, t))
if h == 2 and t == 0:
return d + 1
if t >= 1:
Q.append((h, t + 1, d + 1))
if t >= 2:
Q.append((h + 1, t - 2, d + 1))
if h >= 2:
Q.append((h - 2, t, d + 1))
return -1
def solve(heads, tails):
print(bfs(heads, tails))
if __name__ == "__main__":
while True:
heads, tails = map(int, input().split())
if heads == 0 and tails == 0:
break
solve(heads, tails)
|
import sys
sys.path.append("../../")
sys.path.append("../")
import os
import numpy as np
from hwr_utils import *
from hwr_utils.stroke_plotting import *
from hwr_utils.stroke_recovery import *
import json
def test():
json_path = "../../data/online_coordinate_data/8_stroke_vSmall_16/train_online_coords.json"
json_path = "/media/data/GitHub/simple_hwr/data/online_coordinate_data/MAX_stroke_vlargeTrnSetFull/train_online_coords.json"
parameter = "d"
with open(json_path) as f:
output_dict = json.load(f)
for x in output_dict:
output = prep_stroke_dict(x["raw"], time_interval=0, scale_time_distance=True) # list of dictionaries, 1 per file
x = output["x"]
y = output["y"]
is_start_stroke = output["start_strokes"]
gt = np.array([x,y,is_start_stroke]).transpose([1,0])
#img = draw_from_gt(gt, show=True, use_stroke_number=False, plot_points=False, linewidth=1)
# Resample
x_func, y_func = stroke_recovery.create_functions_from_strokes(output, parameter=parameter) # can be d if the function should be a function of distance
starts = output.start_times if parameter=="t" else output.start_distances
x, y, is_start_stroke = stroke_recovery.sample(x_func, y_func, starts, 200, noise=None)
gt = np.array([x,y,is_start_stroke]).transpose([1,0])
#img = draw_from_gt(gt, show=True, use_stroke_number=False, plot_points=False, linewidth=1)
if __name__=='__main__':
test() |
class Solution(object):
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
if num1 == '0' or num2 == '0':
return '0'
m = len(num1)
n = len(num2)
res = [0] * (m + n)
# Step 1: get all one-digit products
# Time complexity: O(n1*n2)
for i in xrange(m-1, -1, -1):
for j in xrange(n-1, -1, -1):
mul = (ord(num1[i]) - ord("0")) * (ord(num2[j]) - ord("0"))
p1 = i + j # the position index
p2 = i + j + 1
res[p1] += mul / 10
res[p2] += mul % 10
# Step 2: Sweep through the stored products with carry
# Time complexity: O(n1+n2)
carry = 0
for i in xrange(m+n-1, -1, -1):
temp = carry + res[i]
carry, res[i] = temp / 10, str(temp % 10)
return ''.join(res) if res[0] != '0' else ''.join(res[1:])
# https://discuss.leetcode.com/topic/30508/easiest-java-solution-with-graph-explanation
s = Solution()
print s.multiply('1','1') |
from __future__ import print_function
import functools
import logging
import pathlib
from collections import namedtuple
from itertools import chain
from multiprocessing import cpu_count
import click
import click_log
import flask
import gunicorn.app.base
from flask_iiif import IIIF
from flask_iiif.cache.simple import ImageSimpleCache
from flask_restful import Api
from iiif_prezi.factory import ManifestFactory
from index import DatabaseRepository, FilesystemRepository
SearchHit = namedtuple("SearchHit",
("match", "before", "after", "annotations"))
app = flask.Flask('hocrviewer', static_folder='./vendor/mirador',
static_url_path='/static')
ext = IIIF(app=app)
api = Api(app=app)
ext.init_restful(api, prefix="/iiif/image/")
repository = None
logger = logging.getLogger(__name__)
class ApiException(Exception):
status_code = 500
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(ApiException)
def handle_api_exception(error):
response = flask.jsonify(error.to_dict())
response.status_code = error.status_code
return response
def cors(origin='*'):
"""This decorator adds CORS headers to the response"""
def decorator(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
return resp
return decorated_function
return decorator
def locate_image(uid):
book_id, page_id = uid.split(':')
return repository.get_image_path(book_id, page_id)
class HocrViewerApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app):
self.options = {'bind': '0.0.0.0:5000',
'workers': cpu_count()*2+1}
self.application = app
app.config['IIIF_CACHE_HANDLER'] = ImageSimpleCache()
ext.uuid_to_image_opener_handler(locate_image)
super(HocrViewerApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in self.options.items()
if key in self.cfg.settings and value is not None])
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def build_manifest(book_id, book_path, metadata, pages):
fac = ManifestFactory()
base_url = flask.request.url_root[:-1]
fac.set_base_prezi_uri(
base_url + flask.url_for('get_book_manifest', book_id=book_id))
fac.set_base_image_uri(base_url + '/iiif/image/v2')
fac.set_iiif_image_info(2.0, 2)
manifest = fac.manifest(label=book_id)
manifest.set_description("Automatically generated from HOCR")
seq = manifest.sequence(ident='0')
for idx, (page_id, img_path, width, height) in enumerate(pages):
canvas = seq.canvas(ident=page_id,
label='Page {}'.format(idx))
anno = canvas.annotation(ident=page_id)
img = anno.image('{}:{}'.format(book_id, page_id), iiif=True)
img.set_hw(height, width)
canvas.height = img.height
canvas.width = img.width
canvas.annotationList(
base_url + flask.url_for('get_page_lines', book_id=book_id,
page_id=page_id),
label="Transcribed Text")
if not seq.canvases:
logger.error("{} has no images!".format(book_path))
return None
else:
return manifest
def get_canvas_id(book_id, page_id):
base_url = flask.request.url_root[:-1]
return (base_url + flask.url_for('get_book_manifest', book_id=book_id) +
'/canvas/' + page_id)
@app.route("/iiif/<book_id>")
@cors('*')
def get_book_manifest(book_id):
doc = repository.get_document(book_id)
if not doc:
raise ApiException(
"Could not find book with id '{}'".format(book_id), 404)
pages = repository.get_pages(book_id)
manifest = build_manifest(*doc, pages=pages)
if manifest is None:
raise ApiException(
"Could not build manifest for book with id '{}'"
.format(book_id), 404)
if isinstance(repository, DatabaseRepository):
manifest.add_service(
ident=(flask.request.base_url +
flask.url_for('search_in_book', book_id=book_id)),
context='http://iiif.io/api/search/1/context.json',
profile='http://iiif.io/api/search/1/search')
return flask.jsonify(manifest.toJSON(top=True))
@app.route("/iiif/<book_id>/list/<page_id>", methods=['GET'])
@app.route("/iiif/<book_id>/list/<page_id>.json", methods=['GET'])
@cors('*')
def get_page_lines(book_id, page_id):
lines = repository.get_lines(book_id, page_id)
if lines is None:
raise ApiException(
"Could not find lines for page '{}' in book '{}'"
.format(page_id, book_id), 404)
fac = ManifestFactory()
fac.set_base_prezi_uri(
flask.request.url_root[:-1] + '/iiif/' + book_id + '/' + page_id)
annotation_list = fac.annotationList(ident=book_id + '/' + page_id)
# FIXME: Workaround for a really stupid bug in iiif-prezi:
# The library sets .resources as a class-attribute, which is why:
# - it will not get serialized during toJSON
# - multiple instances share their resources
annotation_list.resources = []
for idx, (text, x, y, w, h) in enumerate(lines):
anno = annotation_list.annotation(ident='line-{}'.format(idx))
anno.text(text=text)
anno.on = (get_canvas_id(book_id, page_id) +
"#xywh={},{},{},{}".format(x, y, w, h))
out_data = annotation_list.toJSON(top=True)
if not annotation_list.resources:
# NOTE: iiif-prezi strips empty lists from the resulting JSON,
# so we have to add the empty list ourselves...
out_data['resources'] = []
return flask.jsonify(out_data)
@app.route("/iiif/<book_id>/search", methods=['GET'])
@cors('*')
def search_in_book(book_id):
if not isinstance(repository, DatabaseRepository):
raise ApiException(
"Searching is only supported if the content has been indexed. "
"Please run `hocrviewer index` to do so.", 501)
base_url = flask.request.url_root[:-1]
query = flask.request.args.get('q')
out = {
'@context': [
'http://iiif.io/api/presentation/2/context.json',
'http://iiif.io/api/search/1/context.json'],
'@id': (base_url + flask.url_for('search_in_book',
book_id=book_id) + '?q=' + query),
'@type': 'sc:AnnotationList',
'within': {
'@type': 'sc:Layer',
'ignored': [k for k in flask.request.args.keys() if k != 'q']
},
'resources': [],
'hits': []}
for page_id, match_text, line_infos in repository.search(query, book_id):
match_text = match_text.split()
start_idxs = [idx for idx, word in enumerate(match_text)
if "<hi>" in word]
end_idxs = [idx for idx, word in enumerate(match_text)
if "</hi>" in word]
for start_idx, end_idx in zip(start_idxs, end_idxs):
match = " ".join(match_text[start_idx:end_idx+1])
match = match.replace("<hi>", "").replace("</hi>", "")
before = "..." + " ".join(
match_text[max(0, start_idx-8):start_idx]),
after = " ".join(match_text[end_idx+1:end_idx+9]) + "..."
hit = SearchHit(match=match, before=before, after=after,
annotations=[])
match_words = chain.from_iterable(
((match_text[w.sequence_pos], w.sequence_pos,
w.start_x, l.y_pos, w.end_x - w.start_x, l.height)
for w in winfos if start_idx <= w.sequence_pos <= end_idx)
for l, winfos in line_infos)
for chars, pos, x, y, w, h in match_words:
anno = {
'@id': "/".join((get_canvas_id(book_id, page_id),
'words', str(pos))),
'@type': 'oa:Annotation',
'motivation': 'sc:Painting',
'resource': {
'@type': 'cnt:ContentAsText',
'chars': (chars.replace('<hi>', '')
.replace('</hi>', ''))},
'on': (get_canvas_id(book_id, page_id) +
"#xywh={},{},{},{}".format(x, y, w, h))}
hit.annotations.append(anno['@id'])
out['resources'].append(anno)
out['hits'].append({
'@type': 'sc:Hit',
'annotations': hit.annotations,
'match': hit.match,
'before': hit.before,
'after': hit.after})
return flask.jsonify(out)
@app.route("/iiif/<book_id>/autocomplete", methods=['GET'])
@cors('*')
def autocomplete_in_book(book_id):
if not isinstance(repository, DatabaseRepository):
raise ApiException(
"Autocompletion is only supported if the content has been "
"indexed. Please run `hocrviewer index` to do so.", 501)
base_url = flask.request.url_root[:-1]
query = flask.request.args.get('q')
min_cnt = int(flask.request.args.get('min', '1'))
out = {
"@context": "http://iiif.io/api/search/1/context.json",
"@id": (base_url +
flask.url_for('autocomplete_in_book', book_id=book_id) +
"?q=" + query + ('&min=' + min_cnt if min_cnt > 1 else '')),
"@type": "search:TermList",
"ignored": [k for k in flask.request.args.keys()
if k not in ('q', 'min')],
"terms": []}
for term, cnt in repository.autocomplete(query, book_id, min_cnt):
out['terms'].append({
'match': term,
'count': cnt,
'url': (base_url +
flask.url_for('search_in_book', book_id=book_id) +
'?q=' + term)})
return flask.jsonify(out)
@app.route('/')
def index():
return flask.render_template(
'index.html', book_ids=repository.document_ids())
@app.route('/view/<book_id>')
def view(book_id):
return flask.render_template(
'mirador.html',
manifest_uri=flask.url_for('get_book_manifest', book_id=book_id))
@click.group()
@click_log.simple_verbosity_option()
@click.pass_context
@click.option('-db', '--db-path', help='Target path for application database',
type=click.Path(dir_okay=False, readable=True, writable=True),
default=click.get_app_dir('hocrviewer') + '/hocrviewer.db')
def cli(ctx, db_path):
db_path = pathlib.Path(db_path)
ctx.obj['DB_PATH'] = db_path
if db_path.exists():
global repository
repository = DatabaseRepository(db_path)
@cli.command('serve')
@click.argument('base_directory', required=False,
type=click.Path(file_okay=False, exists=True, readable=True))
def serve(base_directory):
global repository
if repository is None:
if base_directory is None:
raise click.BadArgumentUsage("Please specify a base directory.")
repository = FilesystemRepository(pathlib.Path(base_directory))
HocrViewerApplication(app).run()
@cli.command('index')
@click.argument('hocr-files', nargs=-1,
type=click.Path(dir_okay=False, exists=True, readable=True))
@click.option('--autocomplete-min-count', type=int, default=5,
help="Only store terms with at least this frequency for "
"autocomplete (going from 5 to 1 doubles the database "
"size!)")
@click.pass_context
def index_documents(ctx, hocr_files, autocomplete_min_count):
def show_fn(hocr_path):
if hocr_path is None:
return ''
else:
return hocr_path.name
global repository
if repository is None:
repository = DatabaseRepository(ctx.obj['DB_PATH'])
hocr_files = tuple(pathlib.Path(p) for p in hocr_files)
with click.progressbar(hocr_files, item_show_func=show_fn) as hocr_files:
for hocr_path in hocr_files:
try:
repository.ingest_document(hocr_path, autocomplete_min_count)
except Exception as e:
logger.error("Could not ingest {}".format(hocr_path))
logger.exception(e)
if __name__ == '__main__':
cli(obj={})
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 15:09:28 2020
@author: allen
"""
import cv2
import os
import numpy as np
from random import shuffle
from tqdm import tqdm
from tflearn.data_augmentation import ImageAugmentation
'''Setting up the env'''
TRAIN_DIR = './imgfilename/'
TEST_DIR = './test/'
IMG_SIZE = 256
LR = 1e-4
MODEL_NAME = 'image_classification-{}-{}.model'.format(LR, '6conv-basic')
'''Labelling the dataset'''
def label_img(img):
# DIY One hot encoder
if img == '0': return [1, 0]
elif img == '1': return [0, 1]
'''Creating the training data'''
def create_train_data():
# Creating an empty list where we should store the training data
# after a little preprocessing of the data
training_data = []
# tqdm is only used for interactive loading
# loading the training data
for file in os.listdir(TRAIN_DIR):
file_list = TRAIN_DIR+file+'/'
for img in tqdm(os.listdir(file_list)):
# labeling the images
label = label_img(file)
path = os.path.join(file_list, img)
# loading the image from the path and then converting them into
# greyscale for easier covnet prob
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# resizing the image for processing them in the covnet
try :
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
except :
continue
# final step-forming the training data list with numpy array of the images
training_data.append([np.array(img), np.array(label)])
# shuffling of the training data to preserve the random state of our data
shuffle(training_data)
# saving our trained data for further uses if required
np.save('train_data.npy', training_data)
return training_data
'''Processing the given test data'''
# Almost same as processing the training data but
# we dont have to label it.
def process_test_data():
testing_data = []
for file in os.listdir(TEST_DIR):
file_list = TEST_DIR+file+'/'
for img in tqdm(os.listdir(file_list)):
path = os.path.join(file_list, img)
img_num = file
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
try :
img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (IMG_SIZE, IMG_SIZE))
except :
continue
testing_data.append([np.array(img), img_num])
shuffle(testing_data)
np.save('test_data.npy', testing_data)
return testing_data
'''Running the training and the testing in the dataset for our model'''
train_data = create_train_data()
#print(train_data)
test_data = process_test_data()
#print(test_data)
train_data_imgs = [item[0] for item in train_data]
train_data_lbls = [item[1] for item in train_data]
print(train_data_imgs)
print(train_data_lbls)
# train_data = np.load('train_data.npy')
# test_data = np.load('test_data.npy')
'''Creating the neural network using tensorflow'''
# Importing the required libraries
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.conv import conv_2d, max_pool_2d, residual_block, batch_normalization
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')
# conv layer 1 w/max pooling
conv1 = conv_2d(convnet, 32, 2, activation='relu')
conv1 = max_pool_2d(conv1, 2)
# conv layer 2 w/max pooling etc
conv2 = conv_2d(conv1, 64, 2, activation='relu')
conv2 = max_pool_2d(conv2, 2)
conv3 = conv_2d(conv2, 64, 2, activation='relu')
conv3 = max_pool_2d(conv3, 2)
conv4 = conv_2d(conv3, 128, 2, activation='relu')
conv4 = max_pool_2d(conv4, 2)
conv5 = conv_2d(conv4, 128, 2, activation='relu')
conv5 = max_pool_2d(conv5, 2)
conv6 = conv_2d(conv5, 256, 2, activation='relu')
conv6 = max_pool_2d(conv6, 2)
conv7 = conv_2d(conv6, 256, 2, activation='relu')
conv7 = max_pool_2d(conv7, 2)
conv8 = conv_2d(conv7, 512, 2, activation='relu')
conv8 = max_pool_2d(conv8, 2)
# fully connected layer
fc1 = fully_connected(conv8, 1024, activation='relu')
fc1 = dropout(fc1, 0.8)
# fc2
fc2 = fully_connected(fc1, 128, activation='relu')
fc2 = dropout(fc2, 0.8)
# output layer for classification
output = fully_connected(fc2, 2, activation='softmax')
output = regression(output, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(output, tensorboard_dir='log') # logs to temp file for tensorboard analysis
# Splitting the testing data and training data
train = train_data[:]
test = train_data[-500:]
'''Setting up the features and lables'''
# X-Features & Y-Labels
X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
test_y = [i[1] for i in test]
'''Fitting the data into our model'''
# epoch = 5 taken
model.fit(X, Y,n_epoch=50,validation_set=(test_x, test_y),snapshot_step=500,show_metric=True,run_id=MODEL_NAME)
model.save(MODEL_NAME)
'''Testing the data'''
import matplotlib.pyplot as plt
# if you need to create the data:
# test_data = process_test_data()
# if you already have some saved:
test_data = np.load('test_data.npy',allow_pickle=True)
fig = plt.figure()
ans = []
for num, data in enumerate(test_data[:100]):
img_num = data[1]
img_data = data[0]
orig = img_data
data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)
# model_out = model.predict([data])[0]
model.load('./'+str(MODEL_NAME))
model_out = model.predict([data])[0]
#print("model out:", model_out)
ans.append(np.argmax(model_out))
print(ans)
|
#!/usr/bin/env python
"""
fitterMethods.py
@author: John Swoboda
Holds class that applies the fitter.
"""
#imported basic modules
import os, inspect, time
import pdb
# Imported scipy modules
import scipy as sp
import scipy.optimize
# My modules
from IonoContainer import IonoContainer
from utilFunctions import readconfigfile
from RadarDataSim.specfunctions import ISRSfitfunction
def defaultparamsfunc(curlag,sensdict,simparams):
return(curlag,sensdict,simparams)
class Fitterionoconainer(object):
def __init__(self,Ionocont,Ionosig,inifile):
""" The init function for the fitter take the inputs for the fitter programs.
Inputs:
DataLags: A dictionary with keys 'Power' 'ACF','RG','Pulses' for
the returned value from the RadarData class function processdata.
NoiseLags: A dictionary with keys 'Power' 'ACF','RG','Pulses' for
the returned value from the RadarData class function processdata.
sensdict: The dictionary that holds the sensor info.
simparams: The dictionary that hold the specific simulation params"""
(self.sensdict,self.simparams) = readconfigfile(inifile)
self.Iono = Ionocont
self.sig = Ionosig
def fitNE(self,Tratio = 1):
""" This funtction will fit electron density assuming Te/Ti is constant
thus only the zero lag will be needed.
Inputs:
Tratio: Optional a scaler for the Te/Ti.
Outputs:
Ne: A numpy array that is NtxNbxNrg, Nt is number of times, Nb is number
of beams and Nrg is number of range gates."""
Ne = sp.absolute(self.Iono.Param_List[:,:,0]*(1.0+Tratio))
Nesig = sp.absolute(self.sig.Param_List[:,:,0]*(1.0+Tratio))
return (Ne,Nesig)
def fitdata(self,fitfunc,startvalfunc,d_funcfunc=defaultparamsfunc,exinputs=[]):
"""This funcition is used to fit data given in terms of lags """
# get intial guess for NE
Ne_start,Ne_sig =self.fitNE()
if self.simparams['Pulsetype'].lower()=='barker':
return(Ne_start[:,:,sp.newaxis],Ne_sig[:,:,sp.newaxis])
# get the data and noise lags
lagsData= self.Iono.Param_List.copy()
(Nloc,Nt,Nlags) = lagsData.shape
print('\nData Now being fit.')
first_lag = True
x_0all = startvalfunc(Ne_start,self.Iono.Cart_Coords,self.Iono.Time_Vector,exinputs)
nparams=x_0all.shape[-1]+1
for itime in range(Nt):
print('\tData for time {0:d} of {1:d} now being fit.'.format(itime,Nt))
for iloc in range(Nloc):
print('\t Time:{0:d} of {1:d} Location:{2:d} of {3:d} now being fit.'.format(itime,Nt,iloc,Nloc))
curlag = lagsData[iloc,itime]
d_func = d_funcfunc(curlag,self.sensdict,self.simparams)
x_0 = x_0all[iloc,itime]
#XXX Added random noise to start points
# add some random noise so we don't just go to the desired value right away
x_rand = sp.random.standard_normal(x_0.shape)*sp.sqrt(.1)*x_0
# x_0 =x_0+x_rand
if first_lag:
first_lag = False
fittedarray = sp.zeros((Nloc,Nt,nparams))
fittederror = sp.zeros((Nloc,Nt,nparams,nparams))
# get uncertianties
if self.simparams['FitType'].lower()=='acf':
# this is calculated from a formula
d_func = d_func+(self.sig.Param_List[iloc,itime],)
elif self.simparams['FitType'].lower()=='spectrum':
# these uncertianties are derived from the acf variances.
acfvar = self.sig.Param_List[iloc,itime]**2
Nspec = self.simparams['numpoints']
#XXX when creating these variences I'm assuming the lags are independent
# this isn't true and I should use the ambiguity function to fix this.
acfvarmat = sp.diag(acfvar)
# calculate uncertianties by applying the FFT to the columns and the
# ifft to the rows. Then multiply by the constant to deal with the different size ffts
specmat = sp.ifft(sp.fft(acfvarmat,n=Nspec,axis=0),n=Nspec,axis=1)*Nspec**2/Nlags
specsig = sp.sqrt(sp.diag(specmat.real))
d_func = d_func+(specsig,)
(x,cov_x,infodict,mesg,ier) = scipy.optimize.leastsq(func=fitfunc,
x0=x_0,args=d_func,full_output=True)
fittedarray[iloc,itime] = sp.append(x,Ne_start[iloc,itime])
if cov_x is None:
fittederror[iloc,itime,:-1,:-1] = sp.ones((len(x_0),len(x_0)))*float('nan')
else:
fittederror[iloc,itime,:-1,:-1] = sp.sqrt(sp.absolute(cov_x*(infodict['fvec']**2).sum()/(len(infodict['fvec'])-len(x_0))))
fittederror[iloc,itime,-1,-1] = Ne_sig[iloc,itime]
print('\t\tData for Location {0:d} of {1:d} fitted.'.format(iloc,Nloc))
return(fittedarray,fittederror)
#%% fit function stuff
def simpstart(Ne_init, loc,time,exinputs):
""" """
xarray = sp.zeros((loc.shape[0],len(time),5))
xarray[:,:,0] = Ne_init
xarray[:,:,2] = Ne_init
xarray[:,:,1] = 1e3
xarray[:,:,3] = 1e3
xarray[:,:,4] = 0.0
return xarray
#%% Testing
def main():
""" Test function for the RadarData class."""
t1 = time.time()
curpath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
testpath = os.path.join(os.path.split(curpath)[0],'Testdata')
Ionoin=IonoContainer.readh5(os.path.join(testpath,'lags.h5'))
inifile = os.path.join(testpath,'PFISRExample.pickle')
fitterone = Fitterionoconainer(Ionoin,inifile)
(fitteddata,fittederror) = fitterone.fitdata(ISRSfitfunction,simpstart)
(Nloc,Ntimes,nparams)=fitteddata.shape
fittederronly = fittederror[:,:,range(nparams),range(nparams)]
paramlist = sp.concatenate((fitteddata,fittederronly),axis=2)
paramnames = []
species = fitterone.simparams['species']
for isp in species[:-1]:
paramnames.append('Ni_'+isp)
paramnames.append('Ti_'+isp)
paramnames = paramnames+['Ne','Te','Vi']
paramnamese = ['n'+ip for ip in paramnames]
paranamsf = sp.array(paramnames+paramnamese)
Ionoout=IonoContainer(Ionoin.Sphere_Coords,paramlist,Ionoin.Time_Vector,ver =1,
coordvecs = Ionoin.Coord_Vecs, paramnames=paranamsf,species=species)
Ionoout.saveh5(os.path.join(testpath,'fittedtestdata.h5'))
t2 = time.time()
print(t2-t1)
if __name__== '__main__':
main() |
# This is just 40C20
current = 0
count = 0
mod = 1000000
def is_balanced(n):
s = str('{:040b}'.format(n))
count = 0
for i in s:
if(int(i) == 1):
count += 1
if(count == 20):
return True
else:
return False
while current < (2**40):
if(current > mod):
print(mod)
mod = mod + 1000000
if(is_balanced(current)):
count = count + 1
current = current + 1
print(count) |
# Program to perform Latent Semantic Analysis on a corpus of tweets in Arabic
# Tom Rishel
# University of Southern Mississippi
# April 2018
import os, glob, sys, glob2
from gensim import corpora, models, similarities
from pathlib import Path
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# function to read the tweets from text files into a list of strings
# also generates an csv file assigning an index value to each document
def files2Docs(path):
documents = []
counter = 0
o = open("documentIndex.csv", 'w')
#for currentFile in glob2.glob('C:\\Users\\Tom Rishel\\Documents\\TKX\\twitter_screen_names_by_country\\**\\*.txt'):
for currentFile in glob2.glob(str(path) + '\\**\\*.txt'):
#print("current file is: " + currentFile)
i = open(currentFile, 'r', encoding = "utf-8")
documents.append(i.read())
# testing output
#if counter >= 120:
# if counter < 122:
# print (currentFile)
# print ("\n")
# print(str(documents[counter]).encode("utf-8"))
# print("\n\n")
i.close()
o.write(str(counter) + "," + str(currentFile) + "\n" )
counter = counter + 1
print("counter = " + str(counter))
o.close()
return documents
# function to read stop words from the supplied file and store them in a set
def getStopWords(file):
i = open(file, 'r', encoding = "utf-8")
stoplist = set(i.read().split())
i.close()
return stoplist
# parent function to call sub functions
def ArabTweetBuildVectorCorpus(tweetFolder, stopFile):
documents = files2Docs(tweetFolder)
stoplist = getStopWords(stopFile)
#remove stop words from documents
texts = [[word for word in document.split() if word not in stoplist]
for document in documents]
dictionary = corpora.Dictionary(texts)
dictionary.save('.\\Arabic_tweet_LSI\\arab_tweet.dict') # store the dictionary, for future reference
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('.\\Arabic_tweet_LSI\\arab_tweet.mm', corpus) # store to disk, for later use
# function to generate the LSI model - consumes considerable computer resources and time
def ArabTweetTransformCorpus():
#print(str(os.getcwd()))
if (os.path.exists(".\\Arabic_tweet_LSI\\arab_tweet.dict")):
# load the dictionary from disk
dictionary = corpora.Dictionary.load('.\\Arabic_tweet_LSI\\arab_tweet.dict')
# load the corpus in Matrix Market format from disk
corpus = corpora.MmCorpus('.\\Arabic_tweet_LSI\\arab_tweet.mm')
# message to let us know that we successfully loaded the dictionary and corpus
print("Used files generated from first tutorial")
# create the tfidf-weighted space
tfidf = models.TfidfModel(corpus)
tfidf_corpus = tfidf[corpus]
lsi = models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=300)
lsi.save(".\\Arabic_tweet_LSI\\lsi.model")
else:
print("Please run first tutorial to generate data set")
#function to load the lsi model from disk and run similarities on hard-coded queries
# queryDocPath is the path and filename of the query document as a text file
# category is the category of the query (e.g. terror, soccer, religion, etc.)
def ArabTweetSimilarities(queryDocPath, category):
# load the dictionary from disk
dictionary = corpora.Dictionary.load('.\\Arabic_tweet_LSI\\arab_tweet.dict')
# load the corpus in Matrix Market format from disk
corpus = corpora.MmCorpus('.\\Arabic_tweet_LSI\\arab_tweet.mm')
# create the tfidf-weighted space
tfidf = models.TfidfModel(corpus)
tfidf_corpus = tfidf[corpus]
lsi = models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=300)
#index = similarities.Similarity(lsi[corpus]) # transform corpus to LSI space and index it
index = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it
index.save('.\\Arabic_tweet_LSI\\arab_tweet.index')
# testing semantic vector format for possible import into a neural network
test = lsi.get_topics()
# open and read the file storing the query document
i = open(queryDocPath, 'r', encoding = "utf-8")
vec_bow = dictionary.doc2bow(i.read().split())
vec_lsi = lsi[vec_bow] # convert the query to LSI space
sims = index[vec_lsi]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
o = open(category + "DocSims.csv", 'w')
categoryList = []
for sim in sims:
docId, docSim = sim
o.write(str(docId) + "," + str(docSim) + "\n")
o.close()
# function to load the previously saved index, read the query document, calculate
# the similarities, and save them to a csv file
# queryDocPath is the path and filename of the query document as a text file
# category is the category of the query (e.g. terror, soccer, religion, etc.)
def runQuery(queryDocPath, category):
# load the dictionary from disk
dictionary = corpora.Dictionary.load('.\\Arabic_tweet_LSI\\arab_tweet.dict')
# load the corpus in Matrix Market format from disk
corpus = corpora.MmCorpus('.\\Arabic_tweet_LSI\\arab_tweet.mm')
# create the tfidf-weighted space
tfidf = models.TfidfModel(corpus)
tfidf_corpus = tfidf[corpus]
lsi = models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=300)
# load the previously saved index
index = similarities.MatrixSimilarity.load('.\\Arabic_tweet_LSI\\arab_tweet.index')
# testing semantic vector format for possible import into a neural network
test = lsi.get_topics()
# open and read the file storing the query document
i = open(queryDocPath, 'r', encoding = "utf-8")
vec_bow = dictionary.doc2bow(i.read().split())
vec_lsi = lsi[vec_bow] # convert the query to LSI space
sims = index[vec_lsi]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
o = open(category + "DocSims.csv", 'w')
categoryList = []
for sim in sims:
docId, docSim = sim
o.write(str(docId) + "," + str(docSim) + "\n")
o.close()
# run ArabTweetLSA <parent folder with Arabic tweets> <stop word file name> from the command line
if __name__ == '__main__':
# to run similarities to a query using previously generated vector space use runQuery(<path to the query text document> <category of the query>)
runQuery("C:\\Users\\Tom Rishel\\OneDrive - The University of Southern Mississippi\\Documents\\Research\\TKX\\query_terrorism.txt", "terror")
# if running BuildVectorCorpus, pass the name of the parent directory containing
# the corpus data and the name of the stop word file
# syntax is ArabTweetLSA <parent folder with Arabic tweets> <stop word file name>
#ArabTweetBuildVectorCorpus(sys.argv[1], sys.argv[2])
#ArabTweetBuildVectorCorpus("C:\\Users\\Tom Rishel\\OneDrive - The University of Southern Mississippi\\Documents\\Research\\TKX\\twitter_screen_names_by_country",
# "C:\\Users\\Tom Rishel\\OneDrive - The University of Southern Mississippi\\Documents\\Research\\TKX\\arabic_stopword_list.txt")
#ArabTweetTransformCorpus()
#ArabTweetSimilarities()
#files2Docs(sys.argv[1])
|
from load_dataset import load_dataset
from train import *
# hyper params
real_size = (64, 64, 3)
z_size = 64
learning_rate_d = 0.00006
learning_rate_g = 0.0001
batch_size = 512
epochs = 800
beta1 = 0.6
if __name__ == '__main__':
images = load_dataset()
logger.logging.info('Training dataset: collection of {len} images'.format(len=len(images)))
view_dataset(images)
plt.savefig('plots/dataset.png')
logger.logging.info('Train process with hyper params: \n'
'real_size: {real_size} \n'
'z_size: {z_size} \n'
'learning_rate_d: {learning_rate_d} \n'
'learning_rate_g: {learning_rate_g} \n'
'batch_size: {batch_size} \n'
'epochs: {epochs} \n'
'beta1: {beta1} \n'
.format(real_size=real_size, z_size=z_size, learning_rate_d=learning_rate_d,
learning_rate_g=learning_rate_g, batch_size=batch_size, epochs=epochs, beta1=beta1)
)
# Create the network
net = GAN(real_size, z_size, learning_rate_d, learning_rate_g, beta1=beta1)
# Load the data and train the network here
losses, samples = train(net, images, epochs, batch_size, z_size, print_every=10, show_every=25, figsize=(10, 5))
_ = view_epoch_samples(samples)
plt.savefig('plots/epoch-samples.png')
_ = view_losses(np.array(losses))
plt.savefig('plots/training-losses.png')
|
# -*- coding: utf-8 -*-
__author__ = 'Vit'
from bs4 import BeautifulSoup
from data_format.url import URL
from data_format.fl_data import FLData
from common.util import _iter, quotes
from interface.view_manager_interface import ViewManagerFromModelInterface
from model.site.parser import BaseSiteParser
class PornoxoSite(BaseSiteParser):
@staticmethod
def can_accept_url(url: URL) -> bool:
return url.contain('pornoxo.com/')
@staticmethod
def create_start_button(view:ViewManagerFromModelInterface):
menu_items=dict(Best_Recent=URL('http://www.pornoxo.com/'),
Most_popular=URL('http://www.pornoxo.com/most-viewed/page1.html?s*'),
Latest=URL('http://www.pornoxo.com/newest/page1.html?s*'),
Top_Rated=URL('http://www.pornoxo.com/top-rated/page1.html?s*'),
Longest=URL('http://www.pornoxo.com/longest/page1.html?s*'))
view.add_start_button(picture_filename='model/site/resource/pornoxo.png',
menu_items=menu_items,
url=URL("http://www.pornoxo.com/", test_string='PornoXO'))
def get_shrink_name(self):
return 'PX'
def parse_thumbs(self, soup: BeautifulSoup, url: URL):
for thumbnail in _iter(soup.find_all('li', {'class': 'thumb-item'})):
href = URL(thumbnail.a.attrs['href'], base_url=url)
thumb_url = URL(thumbnail.img.attrs['src'], base_url=url)
label=thumbnail.img.attrs.get('alt','')
duration = thumbnail.find('span', {'class': 'fs11 viddata flr'})
dur_time = '' if duration is None else str(duration.contents[-1])
hd_span = thumbnail.find('span', {'class': 'text-active bold'})
hd = '' if hd_span is None else str(hd_span.string)
self.add_thumb(thumb_url=thumb_url, href=href, popup=label,
labels=[{'text':dur_time, 'align':'top right'},
{'text':label, 'align':'bottom center'},
{'text': hd, 'align': 'top left'}])
def parse_thumbs_tags(self, soup: BeautifulSoup, url: URL):
tags_container = soup.find('div', {'class': 'left-menu-box-wrapper'})
if tags_container is not None:
for tag in _iter(tags_container.find_all('a',{'href':lambda x: '/videos/' in x})):
self.add_tag(str(tag.string).strip(), URL(tag.attrs['href'], base_url=url))
def parse_pagination(self, soup: BeautifulSoup, url: URL):
pagination = soup.find('div', {'class': 'pagination'})
if pagination is not None:
for page in _iter(pagination.find_all('a',{'class': None})):
if page.string.isdigit():
self.add_page(page.string, URL(page.attrs['href'], base_url=url))
def parse_video(self, soup: BeautifulSoup, url: URL):
video = soup.find('div', {'class': 'videoDetail'})
if video is not None:
script=video.find('script', text=lambda x: 'jwplayer(' in str(x))
if script is not None:
data = str(script.string).replace(' ', '').replace('\t', '').replace('\n', '')
if 'sources:' in data:
sources=quotes(data,'sources:[{','}]').split('},{')
for item in sources:
file = quotes(item, 'file:"', '"')
label=quotes(item,'label:"','"')
self.add_video(label, URL(file, base_url=url))
elif "filefallback':" in data:
file=quotes(data,'filefallback\':"','"')
self.add_video('DEFAULT', URL(file, base_url=url))
self.set_default_video(-1)
def parse_video_tags(self, soup: BeautifulSoup, url: URL):
# adding "user" to video
user = soup.find('div', {'class': 'user-card'})
if user is not None:
href = user.find('a').attrs['href']
username = user.find('span', {'class': 'name'}).string
self.add_tag(username, URL(href, base_url=url), style=dict(color='blue'))
# adding tags to video
for item in _iter(soup.find_all('div', {'class': 'content-tags'})):
for href in _iter(item.find_all('a')):
if href.string is not None:
self.add_tag(str(href.string), URL(href.attrs['href'], base_url=url))
if __name__ == "__main__":
pass
|
import pygame
from random import *
#小型敌方飞机
class SmallPlane(pygame.sprite.Sprite):
energy = 1 #总血量
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('image/smallPlane.png').convert_alpha()
self.destroy_image = pygame.image.load('image/smallPlaneDestroy.png').convert_alpha()
self.rect = self.image.get_rect()
self.bg_width, self.bg_height = bg_size[0], bg_size[1]
self.speed = 2
self.rect.left, self.rect.top = \
randint(0, self.bg_width - self.rect.width), \
randint(-5 * self.bg_height, 0)#出现位置随机,在屏幕上方出现,制造从远方飞进屏幕的效果
self.is_alive = True
self.mask = pygame.mask.from_surface(self.image)#用于做完美碰撞检测
self.energy = SmallPlane.energy #当前血量
#飞机移动
def move(self):
if self.rect.top < self.bg_height:
self.rect.top += self.speed
else:#超过屏幕下边界则重置
self.reset()
def reset(self):
self.rect.left, self.rect.top = \
randint(0, self.bg_width - self.rect.width), \
randint(-5 * self.bg_height, 0)
self.is_alive = True
self.energy = SmallPlane.energy
#中型敌方飞机
class MiddlePlane(pygame.sprite.Sprite):
energy = 8
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('image/middlePlane.png').convert_alpha()
self.destroy_image = pygame.image.load('image/middlePlaneDestroy.png').convert_alpha()
self.rect = self.image.get_rect()
self.bg_width, self.bg_height = bg_size[0], bg_size[1]
self.speed = 1
self.rect.left, self.rect.top = \
randint(0, self.bg_width - self.rect.width), \
randint(-10 * self.bg_height, -self.bg_height)
self.is_alive = True
self.mask = pygame.mask.from_surface(self.image)
self.energy = MiddlePlane.energy
def move(self):
if self.rect.top < self.bg_height:
self.rect.top += self.speed
else:
self.reset()
def reset(self):
self.rect.left, self.rect.top = \
randint(0, self.bg_width - self.rect.width), \
randint(-10 * self.bg_height, -self.bg_height)
self.is_alive = True
self.energy = MiddlePlane.energy
#大型敌方飞机
class BigPlane(pygame.sprite.Sprite):
energy = 20
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
#两张图片可用于切换显示BOSS特技
self.image1 = pygame.image.load('image/bigPlane1.png').convert_alpha()
self.image2 = pygame.image.load('image/bigPlane2.png').convert_alpha()
self.destroy_image = pygame.image.load('image/bigPlaneDestroy.png').convert_alpha()
self.rect = self.image1.get_rect()
self.bg_width, self.bg_height = bg_size[0], bg_size[1]
self.speed = 1
self.rect.left, self.rect.top = \
randint(0, self.bg_width - self.rect.width), \
randint(-15 * self.bg_height, -5 * self.bg_height)
self.is_alive = True
self.mask = pygame.mask.from_surface(self.image1)
self.energy = BigPlane.energy
def move(self):
if self.rect.top < self.bg_height:
self.rect.top += self.speed
else:
self.reset()
def reset(self):
self.rect.left, self.rect.top = \
randint(0, self.bg_width - self.rect.width), \
randint(-15 * self.bg_height, -5 * self.bg_height)
self.is_alive = True
self.energy = BigPlane.energy
|
from .default import env
DATABASE_DEBUG=False
DATABASES = {
'default': env.db()
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['TEST'] = { "NAME": "{}_test".format(DATABASES['default']['NAME']) }
# enable timezone awareness by default
USE_TZ = True
|
list_received = input().split(".")
formatted_list = list(map(int, list_received))
number = int("".join(map(str, formatted_list)))
result = number + 1
print(".".join(str(result)))
|
charac=input('please enter your own character':)
if((charac>='a'and charac<='z')or(charac>='A' and charac <='Z')):
print('the given character',charac,'is an alphabet')
elif(charac>='0' and ch<='9'):
print('the given character',character,'is a digit')
else:
print('the given character',charac,'is not an albhabet or a digit')
|
class Solution(object):
def allPathsSourceTarget(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[List[int]]
"""
target = len(graph) - 1
res = []
queue = []
queue.append([0])
while len(queue) != 0:
path = queue.pop(0)
lastNode = path[-1]
if lastNode == target:
res.append(path)
else:
for adj in graph[lastNode]:
array = path[:]
array.append(adj)
queue.append(array)
return res |
import copy
from fractions import Fraction
import music21 as m21
import numpy as np
import torch
import glob
import os
import pickle
from jazz_rnn.utils.music.vectorXmlConverter import create_note, tie_idx_2_value, tie_2_value
from jazz_rnn.utilspy.meters import AverageMeter
PITCH_IDX_IN_NOTE, DURATION_IDX_IN_NOTE, TIE_IDX_IN_NOTE, \
MEASURE_IDX_IN_NOTE, LOG_PROB_IDX_IN_NOTE = 0, 1, 2, 3, 4
def notes_to_stream(notes, stream, chords, head_len, remove_head=False, head_early_start=False):
m = m21.stream.Measure()
if remove_head:
m.append(stream.flat.getElementsByClass(m21.tempo.MetronomeMark)[0])
stream = m21.stream.Stream()
else:
stream = copy.deepcopy(stream)
if head_early_start:
measure_idx = -1
else:
measure_idx = 0
# Insert chords to measure:
measure_dur = 0
note_num = 0
for n in notes:
note_num += 1
if n[DURATION_IDX_IN_NOTE] == 0:
continue
m.append(
create_note(n[PITCH_IDX_IN_NOTE], n[DURATION_IDX_IN_NOTE], tie_idx_2_value(n[TIE_IDX_IN_NOTE])))
measure_dur += n[DURATION_IDX_IN_NOTE]
if measure_dur >= 4 or note_num == notes.shape[0]:
measure_dur = 0
# Insert chords to measure:
m.insert(0, copy.deepcopy(chords[measure_idx % len(chords)][0]))
m.insert(2, copy.deepcopy(chords[measure_idx % len(chords)][1]))
m.number = measure_idx + head_len + 1
if not (remove_head and measure_idx == -1):
stream.append(m)
measure_idx += 1
m = m21.stream.Measure()
return stream
def notes_to_swing_notes(notes):
print()
notes = notes[notes[:, 1] != 0]
durations = notes[:, 1]
offsets = (np.cumsum(durations) - durations) % 4
ind = 0
while ind < len(durations) - 1:
if offsets[ind] % 1 == 0:
if durations[ind] == 0.5 and durations[ind + 1] == 0.5:
num = np.random.randint(-2, 4)
noise = Fraction(num / 48)
durations[ind] = Fraction(14, 24) - noise
durations[ind + 1] = Fraction(10, 24) + noise
no_tie = notes[ind - 1, 2] == 0 and notes[ind, 2] == 0
if ind > 0 and durations[ind] > Fraction(1, 3) and durations[ind - 1] > Fraction(1, 3) and no_tie:
delay = Fraction(np.random.randint(-3, 1) / 64)
if delay > 0:
durations[ind] = durations[ind] - delay
delay_note = copy.deepcopy(notes[ind])
delay_note[0] = notes[ind - 1, 0]
delay_note[1] = delay
delay_note[2] = tie_2_value['stop']
notes[ind - 1, 2] = tie_2_value['start']
np.insert(notes, ind, delay_note, axis=0)
np.insert(durations, ind, np.array([delay]))
np.insert(offsets, ind, offsets[ind])
ind += 1
elif delay < 0:
delay = 0 - delay
durations[ind - 1] = durations[ind - 1] - delay
delay_note = copy.deepcopy(notes[ind - 1])
delay_note[0] = notes[ind, 0]
delay_note[1] = delay
delay_note[2] = tie_2_value['start']
notes[ind, 2] = tie_2_value['stop']
notes = np.concatenate((notes[:ind], delay_note[np.newaxis, :], notes[ind:]))
durations = np.concatenate((durations[:ind], np.array([delay]), durations[ind:]))
offsets = np.concatenate((offsets[:ind], np.array([offsets[ind]]), offsets[ind:]))
ind += 1
ind = ind + 1
notes[:, 1] = durations
return notes
def get_topk_batch_indices_from_notes(notes, beam_width):
number_of_notes_in_measure = np.count_nonzero(notes[:, :, LOG_PROB_IDX_IN_NOTE], axis=0)[np.newaxis, :]
number_of_notes_in_measure = number_of_notes_in_measure.repeat(notes.shape[0], 0)
measure_log_likelihood = (notes[:, :, LOG_PROB_IDX_IN_NOTE] / number_of_notes_in_measure).sum(axis=0)
top_likelihood = measure_log_likelihood.max()
sorted_indices = np.flip(measure_log_likelihood.argsort(), axis=0).copy()
notes_set = set()
topk_indices = []
for ind in sorted_indices:
set_size = len(notes_set)
notes_set.add(tuple(notes[:, ind, :2].flatten()))
if len(notes_set) == set_size + 1:
topk_indices.append(ind)
if len(topk_indices) == beam_width:
break
if len(topk_indices) < beam_width:
for ind in sorted_indices:
if ind not in topk_indices:
topk_indices.append(ind)
if len(topk_indices) == beam_width:
break
return np.array(topk_indices), top_likelihood
class ScoreInference:
def __init__(self, model_path, converter, beam_width, threshold, batch_size, ensemble=False):
self.beam_width = beam_width
self.mean_score_meters = [AverageMeter() for _ in range(batch_size)]
self.ensemble = ensemble
self.threshold = threshold
self.converter = converter
reward_converter_path = '/'.join(model_path.split('/')[:-1]) + '/converter_and_duration.pkl'
with open(reward_converter_path, 'rb') as input_file:
self.reward_converter = pickle.load(input_file)
self.reward_supported_durs = list(self.reward_converter.bidict.keys())
self.reward_durations = self.converter.dur_2_ind_vec(self.reward_supported_durs)
self.reward_unsupported_durs = [i for i in range(len(self.converter.bidict)) if i not in self.reward_durations]
if ensemble:
self.model = []
model_dir = '/'.join(model_path.split('/')[:-1])
model_list = sorted(glob.glob(os.path.join(model_dir, "*f?.pt")), key=lambda x: int(x.split('/')[-1][-4]))
for model_path in model_list:
with open(model_path, 'rb') as f:
model = torch.load(f)
self.model.append(model)
else:
with open(model_path, 'rb') as f:
model = torch.load(f)
self.model = model
self.top_score = None
def update(self, notes, update_mask):
# change converters for score
notes_to_update = copy.deepcopy(notes)
notes_to_update[:, :, 1] = torch.Tensor(
self.reward_converter.dur_2_ind_vec(self.converter.ind_2_dur_vec(notes[:, :, 1].view(-1)))).reshape(
notes[:, :, 1].shape)
scores = self.get_scores(notes_to_update)
for ind, score in enumerate(scores):
if update_mask[ind] == 1:
score = score.item()
# UNCOMMENT TO PUNISH LONG DURATIONS
# if self.converter.ind_2_dur(notes[-1, ind, 1].item()) > 4:
# score = -100000
self.mean_score_meters[ind].update(score)
def get_scores(self, notes):
scores = self.get_ensemble_score(notes)
scores[torch.abs(scores) < self.threshold] = 0
scores = torch.sign(scores)
return scores
def get_topk_batch_indices_from_notes(self, notes):
measure_scores = np.array([m.avg for m in self.mean_score_meters])
top_score = measure_scores.max()
self.top_score = top_score
sorted_indices = np.flip(measure_scores.argsort(), axis=0).copy()
notes_set = set()
topk_indices = []
for ind in sorted_indices:
set_size = len(notes_set)
notes_set.add(tuple(notes[:, ind, :2].flatten()))
if len(notes_set) == set_size + 1:
topk_indices.append(ind)
if len(topk_indices) == self.beam_width:
break
if len(topk_indices) < self.beam_width:
for ind in sorted_indices:
if ind not in topk_indices:
topk_indices.append(ind)
if len(topk_indices) == self.beam_width:
break
new_meters = []
while len(new_meters) != len(self.mean_score_meters):
for k in topk_indices:
new_meters.append(copy.deepcopy(self.mean_score_meters[k]))
self.mean_score_meters = new_meters
return np.array(topk_indices), top_score
def get_ensemble_score(self, notes):
scores_models = []
for model in self.model:
h = model.init_hidden(batch_size=notes.shape[1])
scores, _ = model.forward_reward(notes, h)
scores = torch.tanh(scores)
scores_models.append(scores.squeeze())
return torch.mean(torch.stack(scores_models), 0)
class HarmonyScoreInference:
def __init__(self, converter, beam_width, beam_depth, batch_size):
self.beam_width = beam_width
self.beam_depth = beam_depth
self.converter = converter
self.mean_score_meters = [AverageMeter() for _ in range(batch_size)]
self.top_score = None
def update(self, notes, update_mask):
scores = self.get_sequence_score(notes)
note_pitches = notes[1:, :, 0].squeeze()
for ind, score in enumerate(scores):
if update_mask[ind] == 1 and note_pitches[ind].item() != 128:
self.mean_score_meters[ind].update(score.item())
def get_sequence_score(self, notes):
bptt, batch_size, _ = notes.shape
notes_1_octave = notes[1:, :, 0] % 12
durations = notes[1:, :, 1]
durations_flat = durations.view(-1)
durations_m21 = self.converter.ind_2_dur_vec(durations_flat)
durations_m21 = durations_m21.reshape(1, batch_size)
durations_float = torch.as_tensor(durations_m21.astype(np.float32)).cuda()
chord_notes = torch.nonzero(notes.view(-1, 31)[:, 17:29])[:, 1].reshape(bptt, batch_size, 4)[:1]
note_in_scale = torch.eq(notes_1_octave.unsqueeze(-1), chord_notes).sum(dim=-1)
scores = (note_in_scale.float() / durations_float).squeeze()
return scores
def get_topk_batch_indices_from_notes(self, notes):
measure_scores = np.array([m.avg for m in self.mean_score_meters])
top_score = measure_scores.max()
self.top_score = top_score
sorted_indices = np.flip(measure_scores.argsort(), axis=0).copy()
notes_set = set()
topk_indices = []
for ind in sorted_indices:
set_size = len(notes_set)
notes_set.add(tuple(notes[:, ind, :2].flatten()))
if len(notes_set) == set_size + 1:
topk_indices.append(ind)
if len(topk_indices) == self.beam_width:
break
if len(topk_indices) < self.beam_width:
for ind in sorted_indices:
if ind not in topk_indices:
topk_indices.append(ind)
if len(topk_indices) == self.beam_width:
break
new_meters = []
while len(new_meters) != len(self.mean_score_meters):
for k in topk_indices:
new_meters.append(copy.deepcopy(self.mean_score_meters[k]))
self.mean_score_meters = new_meters
return np.array(topk_indices), top_score
|
def createArray(size):
return ["_ "] * size
def createBoard(rows, cols):
matrix = createArray(rows)
for i in range(rows):
matrix[i] = createArray(cols)
return matrix
def displayBoard(b):
rows = len(b)
cols = len(b[0])
for i in range(rows):
for j in range(cols):
print(b[i][j], end = "")
print()
def move(b, s, player):
if (s == 1):
if b[0][0] != "_ ":
print("Taken.")
return
b[0][0] = player
elif (s == 2):
if b[0][1] != "_ ":
print("Taken.")
b[0][1] = player
elif (s == 3):
b[0][2] = player
elif (s == 4):
b[1][0] = player
elif (s == 5):
b[1][1] = player
elif (s == 6):
b[1][2] = player
elif (s == 7):
b[2][0] = player
elif (s == 8):
b[2][1] = player
elif (s == 9):
b[2][2] = player
else:
print("Invalid input.")
return
def main():
board = createBoard(3, 3)
displayBoard(board)
winFlag = False
whoseMove = True
while not winFlag:
# input from user for one space
space = int(input("Which space?"))
if whoseMove == True:
move(board, space, "X")
else:
move(board, space, "O")
displayBoard(board)
whoseMove = not whoseMove
main()
|
# create a function that takes a list and returns a new list with all the elements doubled
def doubleList():
myList = [10, 20, 30, 40]
doubledList = []
print("The original list: ", myList)
for i in range(len(myList)):
doubledList.append(myList[i]*2)
print("The doubled list: ", doubledList)
doubleList()
|
import requests
with requests.Session() as s:
dic = {
'hello': 'world'
}
res = s.post('http://httpbin.org/post', data=dic)
print(res.text)
|
# -*- coding: utf-8 -*-
from flask import render_template
from app import app
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='index')
@app.route('/info')
def info():
return render_template('data.html')
@app.route('/evadir')
def evadir():
arquivo = pd.read_csv('/home/WadsonGarbes/wmlapp/app/static/data/data.csv')
X = arquivo.drop('drop', axis=1)
Y = arquivo['drop']
x_train, x_test, y_train, y_test = train_test_split(X, Y,
test_size=0.30,
random_state=1)
logmodel = LogisticRegression()
logmodel.fit(x_train, y_train)
pred_probs = logmodel.predict_proba(arquivo.drop('drop', axis=1))
prob = pred_probs[:, 1]
prob = list(prob)
prob = str(prob)
prob = prob[1:-1]
return prob
@app.route('/permanecer')
def permanecer():
arquivo = pd.read_csv('/home/WadsonGarbes/wmlapp/app/static/data/data.csv')
X = arquivo.drop('drop', axis=1)
Y = arquivo['drop']
x_train, x_test, y_train, y_test = train_test_split(X, Y,
test_size=0.30,
random_state=1)
logmodel = LogisticRegression()
logmodel.fit(x_train, y_train)
pred_probs = logmodel.predict_proba(arquivo.drop('drop', axis=1))
prob = pred_probs[:, 0]
prob = list(prob)
prob = str(prob)
prob = prob[1:-1]
return prob
@app.route('/evadir/<int:num>')
def evadir_num(num):
try:
arquivo = pd.read_csv('/home/WadsonGarbes/wmlapp/app/static/data/data.csv')
X = arquivo.drop('drop', axis=1)
Y = arquivo['drop']
x_train, x_test, y_train, y_test = train_test_split(X, Y,
test_size=0.30,
random_state=1)
logmodel = LogisticRegression()
logmodel.fit(x_train, y_train)
pred_probs = logmodel.predict_proba(arquivo.drop('drop', axis=1))
prob = pred_probs[:, 1]
prob = list(prob)
prob = str(prob[num]) + "\n"
return prob
except IndexError:
return """list index out of range - from 0 to 400 only!\n"""
@app.route('/permanecer/<int:num>')
def permanecer_num(num):
try:
arquivo = pd.read_csv('/home/WadsonGarbes/wmlapp/app/static/data/data.csv')
X = arquivo.drop('drop', axis=1)
Y = arquivo['drop']
x_train, x_test, y_train, y_test = train_test_split(X, Y,
test_size=0.30,
random_state=1)
logmodel = LogisticRegression()
logmodel.fit(x_train, y_train)
pred_probs = logmodel.predict_proba(arquivo.drop('drop', axis=1))
prob = pred_probs[:, 0]
prob = list(prob)
prob = str(prob[num]) + "\n"
return prob
except IndexError:
return """list index out of range - from 0 to 400 only!\n"""
|
'''
递归:一个函数在自己函数内部调用自己。
循环
'''
num = 5
def func():
global num
if num == 0:
return 0
else:
print("12345")
num -= 1
func()
func()
# 复习递归遍历目录
|
# Define constants
# Boltzmann constant [ eV / K ]
k_Bol = float(8.6173303e-5)
# Plank constant [ eV * sec ]
h_bar = float(4.135667662e-15)
# 1 meV = 0.001 eV
meV_to_eV = float(0.001)
|
import sys
from PyQt5 import QtWidgets
from pythonlab.views.plotter import UserInterface
app = QtWidgets.QApplication(sys.argv)
ui = UserInterface()
ui.show()
sys.exit(app.exec()) |
#!/usr/bin/python
import os
import sys
import shutil
import re
import optparse
import os.path
import time
import glob
import commands
import subprocess
import getpass
from bcv import git_version
# =============================================================================
#
# PREAMBLE
#
# =============================================================================
def parse_commandline():
"""
Parse the options given on the command-line.
"""
parser = optparse.OptionParser(usage = __doc__,version=git_version.verbose_msg)
parser.add_option("-G", "--source",help="Path to source directory.",default='source')
parser.add_option("-A", "--sourceA",help="Path to source A directory.",default='0')
parser.add_option("-B", "--sourceB",help="Path to source B directory.",default='0')
parser.add_option("-r", "--fs",help="Expected sampling rate of (primary) input time series.",default = 16384)
parser.add_option("-T", "--threshMan",help="Manual threshold, on or off.",default = 1)
parser.add_option("-c", "--HbicohThresh",help="Manual threshold for function 1 or 2.",default = 0.996)
# updating HbicohThresh default from 0.9606 to 0.996 (3-19-18)
parser.add_option("-H", "--HbicohBspcThresh",help="Manual threshold for function 3.",default = 0.7)
parser.add_option("-F", "--fnct",help="Function selection.")
parser.add_option("-m", "--mode",help="Mode selection.",default = 0)
parser.add_option("-X", "--ChannelA",help="A channel name.",default = 'A')
parser.add_option("-Y", "--ChannelB",help="B channel name.",default = 'B')
parser.add_option("-x", "--filt",help="Filter, on/off.",default = 0)
parser.add_option("-s", "--segslice",help="Slice time series, on/off.",default = 0)
parser.add_option("-S", "--segStart",help="Segment slice start time (s).",default = 0.0)
parser.add_option("-E", "--segEnd",help="Segment slice end time (s).",default = 1.0)
parser.add_option("-N", "--full_SNR",help="Process SNR on full time segment before segment slice, on/off.",default = 1)
parser.add_option("-a", "--autom",help="Automatic overlap parameter calculating, on/off .",default = 1)
parser.add_option("-t", "--time_window",help="Desired length of time for analysis (s).",default = 4.0)
parser.add_option("-o", "--offset_multiplier",help="(See manual. Normally this should be calculated automatically by setting autom to 1.)",default = 1.0)
parser.add_option("-O", "--ol",help="Desired segment overlap (%).",default = 98.0)
parser.add_option("-g", "--seg",help="Desired number of experiments.",default = 30)
parser.add_option("-l", "--tile",help="Tiled option, on/off.",default = 0)
parser.add_option("-C", "--GPS_central",help="Expected GPS central time.",default = 0.0)
parser.add_option("-k", "--check",help="Check parameters, on/off.",default = 1)
parser.add_option("-D", "--dnsmple",help="Downsample, on/off.",default = 0)
parser.add_option("-d", "--decm",help="Decimate, on/off.",default = 1)
parser.add_option("-p", "--plot_img",help="Plot, on/off.",default = 0)
parser.add_option("-R", "--randomize",help="Use phase randomization, on/off.",default = 1)
parser.add_option("-q", "--preSeq",help="Use user-provided random number sequence, on/off.",default = 0)
parser.add_option("-w", "--sequenceN",help="User-provided random number list (typically a text file), full path (string).",default = '/home/bernard.hall/bispectral_suite/random_seq/seq_552015_2.txt')
parser.add_option("-f", "--fsd",help="Target downsampling or decimation sampling rate (Hz).",default = 4096)
parser.add_option("-u", "--uSnrV",help="User-input SNR value.",default = 1.0)
parser.add_option("-U", "--upr",help="Upper bispectral value for bispectrogram calculation.",default = 1.0)
parser.add_option("-P", "--path",help="Output path.",default = '')
parser.add_option("-i", "--ifoChannel",help="IFO and channel information.",default = '')
parser.add_option("-M", "--Memory",help="Amount of memory to request.",default = 8192)
parser.add_option("-K", "--checkpow2",help="Ensure cross bicoherence channels have a power of 2 sampling rate calculated.",default = 1)
opts, args = parser.parse_args()
return opts
# =============================================================================
#
# Main
#
# =============================================================================
def main():
#######################################################################
## parse the command line
opts = parse_commandline()
#sourceDir = 'gaussian_1000_source'
#sourceDir = 'gaussian_source_full'
sourceDir = opts.source
sourceDirA = opts.sourceA
sourceDirB = opts.sourceB
#funct = 2
funct = int(opts.fnct)
filt = int(opts.filt)
#outDir = 'gaussian_output'
#outDir = 'gaussian_T_0_8_50p'
outDir = opts.path
#outDir = 'gaussian_1000_output'
#overlap = 50.0 #98.0 (default)
overlap = float(opts.ol)
#decimate = 0
decimate = int(opts.decm)
fs = float(opts.fs)
threshMan = int(opts.threshMan)
HbicohThresh = float(opts.HbicohThresh)
HbicohBspcThresh = float(opts.HbicohBspcThresh)
mode = int(opts.mode)
ChannelA = opts.ChannelA
ChannelB = opts.ChannelB
segslice = int(opts.segslice)
segStart = float(opts.segStart)
segEnd = float(opts.segEnd)
full_SNR = int(opts.full_SNR)
autom = int(opts.autom)
time_window = float(opts.time_window)
offset_multiplier = float(opts.offset_multiplier)
seg = int(opts.seg)
tile = int(opts.tile)
GPS_central = float(opts.GPS_central)
checkM = int(opts.check)
dnsmple = int(opts.dnsmple)
plot_img = int(opts.plot_img)
randomize = int(opts.randomize)
preSeq = int(opts.preSeq)
sequenceN = opts.sequenceN
fsd = float(opts.fsd)
uSnrV = float(opts.uSnrV)
upr = float(opts.upr)
ifoChannel = opts.ifoChannel
mem = str(opts.Memory)
checkpow2 = int(opts.checkpow2)
if ((funct == 0) or (funct== 1)) and ((sourceDirA == '0') or (sourceDirB == '0')):
print 'Error: cross-bispectrum/cross-bicoherence selected, but no channel A or channel B information provided! Exiting...'
sys.exit()
###################################################################
accGroup = 'ligo.dev.o2.detchar.nonlin_coup.twochanveto'
#accGroup = 'ligo.dev.o1.detchar.nonlin_coup.bcv'
check = 1 # check whether a job has already been submitted to Condor
ext = 'txt' # string against which to compare the "tag" variable to verify type
#option = 2
#wd = os.getcwd()
# Get full path of current directory --------------------------------
wd = commands.getstatusoutput('pwd')
wd = wd[1]
#--------------------------------------------------------------------
#print wd
lim = commands.getstatusoutput('ulimit -Hn')
#currProcessCount = commands.getstatusoutput('lsof -u %s 2>/dev/null | wc -l'%getpass.getuser())
lim = int(lim[1])
spaceleft = commands.getstatusoutput('df -hT /home/%s/'%getpass.getuser())
spaceleft = spaceleft[1]
print spaceleft
#currProcessCount = int(currProcessCount[1])
#print lim
#print currProcessCount
###################################################################
#runList = os.listdir(sourceDir)
runList = glob.glob(sourceDir + '/*.' + ext)
runList.sort()
if sourceDirA != '0':
runListA = glob.glob(sourceDirA + '/*.' + ext)
if len(runList) != len(runListA): # check if the number of time-series files for channel A is the same as for GW channel; they should be equal
print 'Error: Channel A file list is not the same length as GW file list!'
sys.exit()
runListA.sort()
if sourceDirB != '0':
runListB = glob.glob(sourceDirB + '/*.' + ext)
if len(runList) != len(runListB): # see above
print 'Error: Channel B file list is not the same length as GW file list!'
sys.exit()
runListB.sort()
#print runList
if check:
doneList = os.listdir(outDir)
#limit = 100
limit = 2
k = 1
j = 1
#flag = 1
if not os.path.isfile('batch.ini'):
num = open('batch.ini', 'w')
num.write('1')
num.close()
f = open('list_'+ str(k) + '.txt', 'w')
flag = 1
for line in runList: # GW channel lists
if not (j % limit):
f.write(line + '\n')
f.close()
k = k + 1
f = open('list_'+ str(k) +'.txt', 'w')
flag = 0
if flag:
f.write(line + '\n')
else:
flag = 1
j = j + 1
f.close()
if sourceDirA != '0': # Channel A lists
k = 1
j = 1
f = open('list_A_'+ str(k) + '.txt', 'w')
flag = 1
for line in runListA:
if not (j % limit):
f.write(line + '\n')
f.close()
k = k + 1
f = open('list_A_'+ str(k) +'.txt', 'w')
flag = 0
if flag:
f.write(line + '\n')
else:
flag = 1
j = j + 1
f.close()
if sourceDirB != '0': # Channel B lists
k = 1
j = 1
f = open('list_B_'+ str(k) + '.txt', 'w')
flag = 1
for line in runListB:
if not (j % limit):
f.write(line + '\n')
f.close()
k = k + 1
f = open('list_B_'+ str(k) +'.txt', 'w')
flag = 0
if flag:
f.write(line + '\n')
else:
flag = 1
j = j + 1
f.close()
#print runList
num = open('batch.ini', 'r')
number = num.readline()
number = number.strip('\n')
num.close()
#print number
if os.path.isfile('list_'+ number + '.txt'):
print 'Opening ' + '"list_'+ number + '.txt"'
lt = open('list_'+ number + '.txt', 'r')
listThr = list(lt)
lt.close()
if sourceDirA != '0':
if os.path.isfile('list_A_'+ number + '.txt'):
print 'Opening ' + '"list_A_'+ number + '.txt"'
ltA = open('list_A_'+ number + '.txt', 'r')
listAThr = list(ltA)
ltA.close()
else:
print 'Missing channel A file(s)! (or incorrect list A file name)...exiting...'
sys.exit()
if sourceDirB != '0':
if os.path.isfile('list_B_'+ number + '.txt'):
print 'Opening ' + '"list_B_'+ number + '.txt"'
ltB = open('list_B_'+ number + '.txt', 'r')
listBThr = list(ltB)
ltB.close()
else:
print 'Missing channel B file(s)! (or incorrect list A file name)...exiting...'
sys.exit()
else:
print 'Reached end of list (or incorrect list file name)...exiting...'
sys.exit()
#print listThr
#num = open('batch.ini', 'w')
#num.write(str(int(number) + 1))
#num.close()
#print doneList
#print ('bicoh_out_300.txt' in doneList)
#sys.exit()
i = 1 + ((int(number) - 1) * 100)
if not outDir == '.':
if not os.path.isfile(outDir + '/links.txt'): # create a text file to record file linkage between source and output directories
match = open(outDir + '/links.txt', 'w')
else:
match = open(outDir + '/links.txt', 'a')
else:
if not os.path.isfile('links.txt'):
match = open('links.txt', 'w')
else:
match = open('links.txt', 'a')
#setcmd = 'export CONDOR_REQUIREMENTS="(Memory * 1024) >= ImageSize"'
#print "Setting environment variables..."
#os.system(setcmd)
if outDir == '.':
outDirC = ''
else:
outDirC = outDir + '/'
print len(listThr)
currProcessCount = 0
success = 1
#cntA = 0
#cntB = 0
print "Submitting jobs..."
#for line in listThr:
for cntAB, line in enumerate(listThr):
fl = 1
cPC = currProcessCount
#print cPC
try:
currProcessCount = commands.getstatusoutput('lsof -u %s 2>/dev/null | wc -l'%getpass.getuser())
currProcessCount = int(currProcessCount[1])
if (cPC > currProcessCount) and (currProcessCount < 5):
fl = 0
else:
print 'Current process count ' + str(currProcessCount) + ' of ' + str(lim) + ' limit...'
except:
fl = 0
success = 0
print 'Current process count ' + str(currProcessCount)
if (currProcessCount < lim) and fl:
line = line.split('/')
line = line[len(line) - 1]
#print line
prefix = line.split('.')
tag = prefix[len(prefix)-1] # get extension for filename to verify type
tag = tag.strip('\n')
tag = tag.strip('\r')
line = line.strip('\n')
line = line.strip('\r')
prefix = prefix[0:len(prefix)-1]
pjoin = '.'
prefix = pjoin.join(prefix)
chG = line.split('+')
chG = float(chG[0])
#print prefix
#print tag
#prefix = prefix[0]
if sourceDirA != '0': # check if source A directory parameter has been entered, or is blank/zero
lineA = listAThr[cntAB]
lineA = lineA.split('/')
lineA = lineA[len(lineA) - 1]
#print line
prefixA = lineA.split('.')
tagA = prefixA[len(prefixA)-1]
tagA = tagA.strip('\n')
tagA = tagA.strip('\r')
lineA = lineA.strip('\n')
lineA = lineA.strip('\r')
prefixA = prefixA[0:len(prefixA)-1]
pjoinA = '.'
prefixA = pjoinA.join(prefixA)
#cntA = cntA + 1
chA = lineA.split('+')
chA = float(chA[0])
if chA != chG: # if channels are not aligned in time, then there is a problem -> abort
print 'Error: GW/Channel A central time mismatch! Exiting...'
sys.exit()
if sourceDirB != '0': # check if source B directory parameter has been entered, or is blank/zero
lineB = listBThr[cntAB]
lineB = lineB.split('/')
lineB = lineB[len(lineB) - 1]
#print line
prefixB = lineB.split('.')
tagB = prefixB[len(prefixB)-1]
tagB = tagB.strip('\n')
tagB = tagB.strip('\r')
lineB = lineB.strip('\n')
lineB = lineB.strip('\r')
prefixB = prefixB[0:len(prefixB)-1]
pjoinB = '.'
prefixB = pjoinB.join(prefixB)
#cntB = cntB + 1
chB = lineB.split('+')
chB = float(chB[0])
if chB != chG:
print 'Error: GW/Channel B central time mismatch! Exiting...'
sys.exit()
if tag == ext: # may not need this now that this is accounted for with glob
if (funct == 0) or (funct== 1):
#shRun = '"bicoherence_analysis -G ' + sourceDir + '/' + line + ' -A ' + sourceDirA + '/' + lineA + ' -B ' + sourceDirB + '/' + lineB + ' -F ' + str(funct) +' -m ' + str(mode) + ' -u ' + str(uSnrV) + ' -P ' + wd + '/' + outDirC + ' -f ' + str(fsd) + ' -q ' + str(preSeq) + ' -w ' + sequenceN + ' -T ' + str(threshMan) + ' -d ' + str(decimate) + ' -i ' + prefix + ' -O ' + str(overlap) + ' -x ' + str(filt) + '"'
shRun = '"bicoherence_analysis -G ' + sourceDir + '/' + line + ' -A ' + sourceDirA + '/' + lineA + ' -B ' + sourceDirB + '/' + lineB + ' -F ' + str(funct) +' -m ' + str(mode) + ' -u ' + str(uSnrV) + ' -P ' + wd + '/' + outDirC + ' -f ' + str(fsd) + ' -q ' + str(preSeq) + ' -w ' + sequenceN + ' -T ' + str(threshMan) + ' -d ' + str(decimate) + ' -i ' + prefix + ' -O ' + str(overlap) + ' -x ' + str(filt) + ' -r ' + str(fs) + ' -c ' + str(HbicohThresh) + ' -s ' + str(segslice) + ' -S ' + str(segStart) + ' -E ' + str(segEnd) + ' -N ' + str(full_SNR) + ' -a ' + str(autom) + ' -t ' + str(time_window) + ' -o ' + str(offset_multiplier) + ' -g ' + str(seg) + ' -l ' + str(tile) + ' -C ' + str(GPS_central) + ' -k ' + str(checkM) + ' -D ' + str(dnsmple) + ' -R ' + str(randomize) + ' -U ' + str(upr) + ' -p ' + str(plot_img) + ' -X ' + str(checkpow2) + '"'
cmd = 'nohup condor_run -a RequestMemory=' + mem + ' -a accounting_group=' + accGroup + ' ' + shRun + ' > ' + outDirC + 'bicoh_out_'+ str(i) +'.txt &'
#i = i + 1
if check == 1:
if not ('bicoh_out_'+ str(i) +'.txt' in doneList):
print cmd + '\n'
match.write(sourceDir + ' ' + line + ' ' + outDir + ' bicoh_out_' + str(i) + '\n')
os.system(cmd)
if not (i % 10):
time.sleep(5)
else:
print 'Job already processed...skipping...(bicoh_out_'+ str(i) +'.txt)'
else:
print cmd + '\n'
match.write(sourceDir + ' ' + line + ' ' + outDir + ' bicoh_out_' + str(i) + '\n')
os.system(cmd)
if not (i % 10):
time.sleep(5)
i = i + 1
else:
#if option == 1:
#shRun = '"bicoherence_analysis -G source/' + line + ' -F 3 -w random_seq/seq_552015_2.txt -m 1 -u 14.026610259768070 -P /home/bernard.hall/bispectral_suite/output/ -f 4096 -q 1 -w /home/bernard.hall/bispectral_suite/random_seq/seq_552015_2.txt -l 1 -a 0 -i ' + prefix + '"'
# shRun = '"bicoherence_analysis -G ' + sourceDir + '/' + line + ' -F ' + str(funct) +' -m 1 -u 14.026610259768070 -P /home/bernard.hall/bispectral_suite/' + outDirC + ' -f 4096 -q 1 -w /home/bernard.hall/bispectral_suite/random_seq/seq_552015_2.txt -T 1 -c 0.8 -d 1 -i ' + prefix + '"'
#elif option == 2:
# shRun = '"bicoherence_analysis -G ' + sourceDir + '/' + line + ' -F ' + str(funct) +' -m 1 -u 14.026610259768070 -P ' + wd + '/' + outDirC + ' -f 4096 -q 1 -w /home/bernard.hall/bispectral_suite/random_seq/seq_552015_2.txt -T 1 -d ' + str(decimate) + ' -i ' + prefix + ' -O ' + str(overlap) + '"'
shRun = '"bicoherence_analysis -G ' + sourceDir + '/' + line + ' -F ' + str(funct) +' -m ' + str(mode) + ' -u ' + str(uSnrV) + ' -P ' + wd + '/' + outDirC + ' -f ' + str(fsd) + ' -q ' + str(preSeq) + ' -w ' + sequenceN + ' -T ' + str(threshMan) + ' -d ' + str(decimate) + ' -i ' + prefix + ' -O ' + str(overlap) + ' -x ' + str(filt) + ' -r ' + str(fs) + ' -c ' + str(HbicohThresh) + ' -s ' + str(segslice) + ' -S ' + str(segStart) + ' -E ' + str(segEnd) + ' -N ' + str(full_SNR) + ' -a ' + str(autom) + ' -t ' + str(time_window) + ' -o ' + str(offset_multiplier) + ' -g ' + str(seg) + ' -l ' + str(tile) + ' -C ' + str(GPS_central) + ' -k ' + str(checkM) + ' -D ' + str(dnsmple) + ' -R ' + str(randomize) + ' -U ' + str(upr) + ' -p ' + str(plot_img) + '"'
cmd = 'nohup condor_run -a RequestMemory=' + mem + ' -a accounting_group=' + accGroup + ' ' + shRun + ' > ' + outDirC + 'bicoh_out_'+ str(i) +'.txt &'
#i = i + 1
if check == 1:
if not ('bicoh_out_'+ str(i) +'.txt' in doneList):
print cmd + '\n'
match.write(sourceDir + ' ' + line + ' ' + outDir + ' bicoh_out_' + str(i) + '\n')
os.system(cmd)
if not (i % 10):
time.sleep(5)
else:
print 'Job already processed...skipping...(bicoh_out_'+ str(i) +'.txt)'
else:
print cmd + '\n'
match.write(sourceDir + ' ' + line + ' ' + outDir + ' bicoh_out_' + str(i) + '\n')
os.system(cmd)
if not (i % 10):
time.sleep(5)
i = i + 1
else: # process limit has been reached; no further submissions possible -> exit
print 'Process limit reached. Exiting...'
print '(Current process count ' + str(currProcessCount) + ')'
#print fl
match.close()
sys.exit()
match.close()
#success = 0
if success:
num = open('batch.ini', 'w')
num.write(str(int(number) + 1))
num.close()
spaceleft = commands.getstatusoutput('df -hT /home/%s/'%getpass.getuser()) # check and report space left on user's account
spaceleft = spaceleft[1]
print spaceleft
if __name__ == "__main__":
main()
|
import pygame
from Player import *
from Enemy import *
class Start_menu:
"""Classe qui représente le menu d'accueil du jeu"""
def __init__(self, game):
"""Constructeur de classe"""
self.game = game
self.title = pygame.image.load('assets/title/title.png').convert_alpha()
self.title_rect = self.title.get_rect()
self.title_rect.x = game.width / 2 - self.title_rect.width / 2
self.title_rect.y = game.height / 4
self.start = pygame.image.load('assets/buttons/button.png').convert_alpha()
self.start = pygame.transform.scale(self.start, (400, 400))
self.start_rect = self.start.get_rect()
self.start_rect.x = game.width / 2 - self.start_rect.width / 2
self.start_rect.y = game.height / 2
self.menu_repeat_music()
def start_menu(self, screen):
screen.blit(self.start, self.start_rect)
screen.blit(self.title, self.title_rect)
for event in pygame.event.get():
# detection de la fermeture de la fenetre
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# détection de pression d'une touche
elif event.type == pygame.MOUSEBUTTONDOWN:
if self.start_rect.collidepoint(event.pos):
self.game.is_playing = True
pygame.mixer.music.stop()
self.game.new_game()
elif event.type == self.game.SONG_END:
pygame.mixer.music.load('assets/music/menumusicrepeat.ogg')
pygame.mixer.music.play(-1)
def menu_repeat_music(self):
pygame.mixer.music.set_endevent(self.game.SONG_END)
pygame.mixer.music.load('assets/music/menumusicstart.ogg')
pygame.mixer.music.play(0)
|
from django.contrib import admin
from .models import Product
from .models import CartItem
from .models import Order
# Register your models here.
admin.site.register(Product)
admin.site.register(CartItem)
admin.site.register(Order)
|
#!/usr/bin/env python
from __future__ import print_function
import josepy
from cryptography.hazmat.backends import default_backend
from acme import client as acme_client
from cryptography.hazmat.primitives import serialization
from acme import errors, messages
import json
import sys
def main():
if sys.argv[1] == 'staging':
directory = 'https://acme-staging.api.letsencrypt.org/directory'
else:
directory = 'https://acme-v01.api.letsencrypt.org/directory'
key = josepy.JWKRSA(key=serialization.load_pem_private_key(
sys.stdin.read(),
password=None,
backend=default_backend())
)
net = acme_client.ClientNetwork(key)
client = acme_client.Client(
directory=directory,
key=key,
net=net
)
new_reg = messages.NewRegistration.from_data(
email=sys.argv[2]
)
acct = None
try:
regr = client.register(new_reg)
except errors.ConflictError as e:
acct = e.location
print(json.dumps({'body': {}, 'uri': acct}))
|
class Solution:
def numDecodings(self, s: str) -> int:
cache = [0 for _ in range(len(s))]
return self.ways(s, 0, len(s), cache)
def ways(self, string, index, length, cache):
if index >= length:
return 1
if cache[index] != 0:
print("cache used")
return cache[index]
indexChar = int(string[index])
if indexChar == 0:
return 0
result = self.ways(string, index + 1, length, cache)
if index+1 < length and (indexChar == 1 or (indexChar == 2 and int(string[index+1]) <= 6)):
result += self.ways(string, index + 2, length, cache)
cache[index] = result
return result
print(Solution().numDecodings("226")) |
result = [el for el in list(range(20, 241))
if el % 20 == 0 or el % 21 == 0]
print(result)
|
# model parameters:
import copy
import numpy as np
import transport_graph as tg
import oracles
import dual_func_calculator as dfc
import universal_similar_triangles_method as ustm
import universal_gradient_descent_method as ugd
#from numba import jit
import math
class Model:
def __init__(self, graph_data, graph_correspondences, total_od_flow, mu = 0.25, rho = 0.15, gamma = 1.):
self.total_od_flow = total_od_flow
self.mu = mu
self.rho = rho
self.gamma = gamma
self.inds_to_nodes, self.graph_correspondences, graph_table = self._index_nodes(graph_data['graph_table'],
graph_correspondences)
self.graph = tg.TransportGraph(graph_table, len(self.inds_to_nodes), graph_data['links number'])
def _index_nodes(self, graph_table, graph_correspondences):
table = graph_table.copy()
inits = np.unique(table['init_node'][table['init_node_thru'] == False])
terms = np.unique(table['term_node'][table['term_node_thru'] == False])
through_nodes = np.unique([table['init_node'][table['init_node_thru'] == True],
table['term_node'][table['term_node_thru'] == True]])
nodes = np.concatenate((inits, through_nodes, terms))
nodes_inds = list(zip(nodes, np.arange(len(nodes))))
init_to_ind = dict(nodes_inds[ : len(inits) + len(through_nodes)])
term_to_ind = dict(nodes_inds[len(inits) : ])
table['init_node'] = table['init_node'].map(init_to_ind)
table['term_node'] = table['term_node'].map(term_to_ind)
correspondences = {}
for origin, dests in graph_correspondences.items():
dests = copy.deepcopy(dests)
correspondences[init_to_ind[origin]] = {'targets' : list(map(term_to_ind.get , dests['targets'])),
'corrs' : dests['corrs']}
inds_to_nodes = dict(zip(range(len(nodes)), nodes))
return inds_to_nodes, correspondences, table
def find_equilibrium(self, solver_name = 'ustm', composite = True, solver_kwargs = {}):
if solver_name == 'ustm':
solver_func = ustm.universal_similar_triangles_method
starting_msg = 'Universal similar triangles method...'
if not 'L_init' in solver_kwargs:
solver_kwargs['L_init'] = 0.1 * self.graph.max_path_length * self.total_od_flow / self.gamma
elif solver_name == 'ugd':
solver_func = ugd.universal_gradient_descent_method
starting_msg = 'Universal gradient descent method...'
if not 'L_init' in solver_kwargs:
solver_kwargs['L_init'] = 0.1 * self.graph.max_path_length * self.total_od_flow / self.gamma
else:
raise NotImplementedError('Unknown solver!')
phi_big_oracle = oracles.PhiBigOracle(self.graph, self.graph_correspondences, gamma = self.gamma)
h_oracle = oracles.HOracle(self.graph.free_flow_times, self.graph.capacities,
rho = self.rho, mu = self.mu)
primal_dual_calculator = dfc.PrimalDualCalculator(phi_big_oracle, h_oracle,
self.graph.free_flow_times, self.graph.capacities,
rho = self.rho, mu = self.mu)
if composite == True:
print('Composite optimization...')
oracle = phi_big_oracle
prox = h_oracle.prox
else:
print('Non-composite optimization...')
oracle = phi_big_oracle + h_oracle
def prox_func(grad, point, A):
"""
Computes argmin_{t: t \in Q} <g, t> + A / 2 * ||t - p||^2
where Q - the feasible set {t: t >= free_flow_times},
A - constant, g - (sub)gradient vector, p - point at which prox is calculated
"""
return np.maximum(point - grad / A, self.graph.free_flow_times)
prox = prox_func
print('Oracles created...')
print(starting_msg)
result = solver_func(oracle, prox,
primal_dual_calculator,
t_start = self.graph.free_flow_times,
**solver_kwargs)
#TODO: add equilibrium travel times between zones
return result
|
import sys
from src import configurator, shared, steps
from src.steps import files, covers, publish
from src import data as _data
def help():
message = """You'll be asked about several things.
The format used to ask stuff is the following:
property[=default]: (enter a value here)
(The [=default] may be ommitted)
If a default value exists, simply press enter without typing anything
to use the default value.
Off you go!
"""
print(message)
def main(args):
# getting config
config = configurator.wizard(configfile=args.config, write=True, no_auto_json=args.no_auto_json)
# showing help
if config.get('options/show-help'):
help()
# getting user data, initiating Utils & Schemer
utils = shared.Utils(config)
data = _data.get(config, utils)
schemes = shared.Schemer(config, data)
# renaming (obligatory step)
renamed = files.rename(config, data, args)
if not renamed:
sys.exit()
# metadata
files.metadata(config, data, args)
|
#coding:utf8
import os
import json
import sys
import re
reload(sys)
sys.setdefaultencoding('utf8')
import utils
import traceback
import pdb
try:
conn = utils.persist.connection()
cur = conn.cursor()
cur.execute('set character_set_client=utf8')
cur.execute('set character_set_connection=utf8')
cur.execute('set character_set_database=utf8')
cur.execute('set character_set_results=utf8')
cur.execute('set character_set_server=utf8')
conn.commit()
mode = re.compile(r'\d+')
path = 'd:/naren/recommend/email_log1/'
for fle in os.listdir(path):
with open(path + fle, 'r') as file:
lines = file.readline()
com_lines = ''.join(lines).decode('unicode_escape').replace('null', "''").replace('\n', '')
com_lines = com_lines.replace('": "', "''':'''")
com_lines = com_lines.replace('"], "', "'''],'''")
com_lines = com_lines.replace('", "', "''','''")
com_lines = com_lines.replace(', "', ",'''")
com_lines = com_lines.replace('": {"', "''':{'''")
com_lines = com_lines.replace('": [{"', "''':[{'''")
com_lines = com_lines.replace('"}], "', "'''}],'''")
com_lines = com_lines.replace('": ["', "''':['''")
com_lines = com_lines.replace('"}', "'''}")
com_lines = com_lines.replace('":', "''':")
com_lines = com_lines.replace('{"', "{'''")
com_lines = com_lines.replace('"', '“')
com_pro_inf = eval(com_lines)
resume = com_pro_inf['resume']
position = com_pro_inf['position']
try:
sql = '''insert into mail_profile(gender, latesttitle, latestcollege, selfappraise, latestcompany,\
latestdegree, curemploystatus, dessalary, age, desworkproperty, destitle, \
desindustry, desworklocation, position_id, resume_id) values("%s", "%s", "%s", "%s", "%s", "%s", "%s",\
"%s", %d, "%s", "%s", "%s", "%s", %d, %d)''' % (resume['sex'], resume['latesttitle'], \
resume['latestcollege'], resume['selfappraise'], resume['latestcompany'], resume['latestdegree'],\
resume['curemploystatus'], resume['dessalary'], resume.get('age', 100), resume['desworkproperty'], \
resume['destitle'], resume['desindustry'], resume['desworklocation'], \
int(position['position_distinct_id']), int(resume['resume_id']))
cur.execute(sql)
except:
pdb.set_trace()
traceback.print_exc()
conn.commit()
conn.close()
except Exception as e:
traceback.print_exc()
conn.close()
print e |
# -*- coding=utf8 -*-
'''
@Filename : main.py
@Author : Gaozong
@Date : 2020-02-04
@Contact : zong209@163.com
@Describe : Entry function
'''
import os
import torch
import numpy as np
import time
import torch.optim as optim
from torch import nn
from dataset import AnimalDataset
from torchvision import transforms
CUDA_AVALIABLE = torch.cuda.is_available()
def init_folder():
dirs = ['data', 'models', 'logs']
for dir in dirs:
if not os.path.exists(dir):
os.mkdir(dir)
print("[INFO] Initialed folders")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr, lr_steps, weight_decay):
"""Sets the learning rate to the initial LR decayed by 10 every step"""
decay = 0.1**(sum(epoch >= np.array(lr_steps)))
for param_group in optimizer.param_groups:
param_group['lr'] = decay * param_group['lr']
param_group['weight_decay'] = decay * param_group['weight_decay']
return optimizer
def train(pertrained=False, resume_file=None):
if pertrained:
from model import alexnet
net = alexnet(pretrained=True, num_classes=NUMBER_CLASSES)
else:
from model import AlexNet
net = AlexNet(num_classes=NUMBER_CLASSES)
valid_precision = 0
policies = net.parameters()
optimizer = optim.SGD(policies,
lr=LR,
momentum=MOMENTUM,
weight_decay=WEIGHT_DECAY)
train_log = open(
"logs/train_logs_{}.log".format(
time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())), "w")
valid_log = open(
"logs/valid_logs_{}.log".format(
time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())), "w")
train_log.write("{}\t{}\t{}\n".format("epoch", "losses ", "correct"))
valid_log.write("{}\t{}\t{}\n".format("epoch", "losses ", "correct"))
# 恢复训练
if resume_file:
if os.path.isfile(resume_file):
print(("=> loading checkpoint '{}'".format(resume_file)))
checkpoint = torch.load(resume_file)
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['model_state_dict'])
print(("=> loaded checkpoint '{}' (epoch {})".format(
resume_file, checkpoint['epoch'])))
else:
start_epoch = 0
print(("=> no checkpoint found at '{}'".format(resume_file)))
# valid_precision = valid(net)
for epoch in range(start_epoch, EPOCHES):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
correct = AverageMeter()
end = time.time()
optimizer = adjust_learning_rate(optimizer, epoch, LR, LR_steps,
WEIGHT_DECAY)
for i_batch, sample_batched in enumerate(train_dataloader):
# measure data loading time
data_time.update(time.time() - end)
inputs, labels = sample_batched
if CUDA_AVALIABLE:
outputs = net.forward(inputs.cuda())
labels = labels.long().flatten().cuda()
else:
outputs = net.forward(inputs)
labels = labels.long().flatten()
outputs = outputs.reshape([-1, NUMBER_CLASSES])
loss = criterion(outputs, labels)
# 更新统计数据
losses.update(loss.item(), inputs.size(0))
_, predicted = torch.max(outputs.data, 1)
# 计算准确率
correct.update(
(predicted == labels.long()).sum().item() / len(labels),
inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_batch % 10 == 0:
print(('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(
epoch,
i_batch,
len(train_dataloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=correct,
lr=optimizer.param_groups[-1]['lr'])))
train_log.write("{:5d}\t{:.5f}\t{:.5f}\n".format(
epoch, losses.avg, correct.avg))
train_log.flush()
if epoch % 1 == 0:
valid_precision = valid(net, epoch, valid_log)
# 保存网络
if (epoch > 0 and epoch % 10 == 0) or epoch == EPOCHES - 1:
save_path = os.path.join(
"models",
"{:d}_{}_{:d}_{:d}_{:.5f}.pt".format(int(time.time()),
"alexnet", epoch,
BATCHSIZE,
valid_precision))
print("[INFO] Save weights to " + save_path)
torch.save(
{
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dir': optimizer.state_dict,
'loss': loss
}, save_path)
train_log.close()
valid_log.close()
def valid(net, epoch=None, valid_log=None):
batch_time = AverageMeter()
losses = AverageMeter()
correct = AverageMeter()
net.eval()
end = time.time()
with torch.no_grad():
for i_batch, sample_batched in enumerate(valid_dataloader):
inputs, labels = sample_batched
if CUDA_AVALIABLE:
outputs = net.forward(inputs.cuda())
labels = labels.long().flatten().cuda()
else:
outputs = net.forward(inputs)
labels = labels.long().flatten()
outputs = outputs.reshape([-1, NUMBER_CLASSES])
loss = criterion(outputs, labels)
# 更新统计数据
losses.update(loss.item(), inputs.size(0))
_, predicted = torch.max(outputs.data, 1)
# 计算准确率
correct.update(
(predicted == labels.long()).sum().item() / len(labels),
inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_batch % 10 == 0 or i_batch == len(valid_dataloader) - 1:
print(('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(
i_batch + 1,
len(valid_dataloader),
batch_time=batch_time,
loss=losses,
top1=correct)))
if valid_log:
valid_log.write("{:5d}\t{:.5f}\t{:.5f}\n".format(
epoch, losses.avg, correct.avg))
valid_log.flush()
return correct.avg
if __name__ == "__main__":
init_folder()
TRAIN_DIR = "data/train"
VALID_DIR = "data/val"
NUMBER_CLASSES = 2
BATCHSIZE = 64
EPOCHES = 200
LR = 0.01
LR_steps = [60, 120]
MOMENTUM = 0.9
WEIGHT_DECAY = 0.005
criterion = nn.CrossEntropyLoss()
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
# resize随机长宽比裁剪原始图片,最后将图片resize到设定好的size- 输出的分辨率 scale- 随机crop的大小区间
transforms.RandomHorizontalFlip(),
# 依据概率p对PIL图片进行水平翻转 0.5
# 将PIL Image或者 ndarray 转换为tensor,并且归一化至[0-1] 注意事项:归一化至[0-1]是直接除以255
transforms.ToTensor(),
# 对数据按通道进行标准化,即先减均值,再除以标准差,注意是 hwc
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.255])
])
valid_transforms = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
# transforms.Resize(256),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.255])
])
train_datasets = AnimalDataset(TRAIN_DIR,
"jpg",
transform=train_transforms)
valid_datasets = AnimalDataset(VALID_DIR,
"jpg",
transform=valid_transforms)
train_dataloader = torch.utils.data.DataLoader(train_datasets,
batch_size=BATCHSIZE,
shuffle=True)
valid_dataloader = torch.utils.data.DataLoader(valid_datasets,
batch_size=BATCHSIZE,
shuffle=True)
train(pertrained=False,
resume_file="models/1580961759_alexnet_10_64_0.51562.pt")
|
# Generated by Django 2.2.1 on 2019-06-16 18:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clientes', '0005_auto_20190602_1643'),
]
operations = [
migrations.CreateModel(
name='Colaborador',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Departamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100)),
('empregados', models.ManyToManyField(null=True, to='clientes.Colaborador')),
],
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from general.models import Event, UserAccount
from general.views import getAllOpenEvent
from django.shortcuts import render, redirect
from wallet.account_standing import account_standing
from django.utils import timezone
from wallet.models import Bank, Betpayments
from wallet.views import generate_purchaseRef, purchase_ref
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from gameplay.models import Gameplay, DailyJackPot, DailyJackpotEntries
from django.core.urlresolvers import reverse
from general.custom_functions import *
from general.models import VendorClient, WalletBalances
import random, datetime
# Create your views here.
def strip_phone_number(value):
value = value.replace(" ","").replace("+","").replace("-","")
if value[0] == "0":
value = list(value)
value[0] = "234"
value = ''.join(value)
return value
@login_required
def betting(request):
balance = account_standing(request, request.user)
try:
useraccount = UserAccount.objects.get(user=request.user)
if not useraccount.profile_updated:
messages.warning(request, "Please update your profile before playing")
return redirect(reverse('general:profile'))
except Exception as e:
print "e", e
messages.warning(request, "Please update your profile before playing")
return redirect(reverse('general:profile'))
if request.method == "POST":
if request.POST.get('bot_catcher') != "":
return redirect(reverse('general:homepage'))
# print request.POST
try:
amount = float(request.POST.get('amount'))
except:
messages.warning(request, "Invalid amount supplied")
return redirect(request.META['HTTP_REFERER'])
choice = str(request.POST.get('user-choice'))
if amount == "" or amount < 200:
messages.error(request, "Amount added is below minimum amount")
return redirect(request.META['HTTP_REFERER'])
if choice == "":
messages.error(request, "You need to make a choice")
return redirect(request.META['HTTP_REFERER'])
if amount > balance:
messages.error(request, "You do not have sufficient money in your wallet to play this game")
return redirect(request.META['HTTP_REFERER'])
else:
# choice = str(request.POST.get('user-choice'))
# print 'choice', choice
event = Event.objects.get(id=request.POST.get('event'))
if event.end_date <= timezone.now().date():
if event.end_date == timezone.now().date():
today = datetime.datetime.now() # To get current date and time
current_time = today.strftime("%I:%M %p") # to get current time in string format
print "current_time", current_time
dt = datetime.datetime.strptime(current_time,"%I:%M %p").time() # to convert datetime.datetime object to datetime.time object
# print "dt", dt
# endz_time = datetime.datetime.strptime(event.end_time, "%I:%M %p").time()
# print endz_time
if event.end_time <= dt:
messages.error(request, "This event is closed")
return redirect(request.META['HTTP_REFERER'])
else:
mesaages.error(request, "This event is closed")
return redirect(request.META['HTTP_REFERER'])
user = UserAccount.objects.get(user=request.user)
try:
gameplay = Gameplay.objects.get(event=event, user=user)
# print gameplay
messages.warning(request, "You have already Participated in this Event")
return redirect(request.META['HTTP_REFERER'])
except Exception as e:
print "e", e
random_ref = purchase_ref()
gameplay = Gameplay.objects.create(user=user, event=event, amount=amount, choice=choice,
date=timezone.now(), status="OPEN", ref_number=random_ref)
# gameplay = Gameplay.objects.create(user=user,event=event,amount=amount,choice=choice,date=timezone.now(),status="OPEN")
gameplay.current_ac_bal = balance
gameplay.date_played = timezone.now().date()
gameplay.save()
event.total_amt += amount
event.counter += 1
event.save()
user.djp_wjp_cat = True
user.total_cat_games_played += 1
user.wallet_funded = True
user.game_played = True
user.save()
random_ref = purchase_ref()
bank_record = Bank.objects.create(user=request.user, txn_type="Remove", amount=amount, ref_no=random_ref,
created_at=timezone.now(), status="Successful", bank="Gameplay",
message="Gameplay for event " + event.event_id)
bank_record.date_created = timezone.now().date()
bank_record.save()
payment = Betpayments.objects.create(user=request.user, amount=amount, game=gameplay, date=timezone.now())
payment.date_created = timezone.now().date()
payment.save()
title = "Prediction Confirmation"
text = "general/game_confirmation.html"
mail_user (request, user.user, title, text, pkg=gameplay)
messages.success(request, 'Congratulations!!!! You have successfully made your prediction!!!.')
WalletBalances.objects.create(balance_bf=balance,description="Category Games",current_bal=float(balance - amount),amount=amount,user=request.user.useraccount)
user.wallet_balance = float(balance - amount)
user.save()
return redirect(request.META['HTTP_REFERER'])
return redirect(reverse('general:homepage'))
def weeklyjackpot(request):
context = {}
balance = account_standing(request, request.user)
context['balance'] = balance
return render(request, 'general/weeklyjackpot.html', context)
def weeklyjackpot2(request):
context = {}
balance = account_standing(request, request.user)
context['balance'] = balance
return render(request, 'general/weeklyjackpot.html', context)
def dailyjackpot(request):
# from datetime import datetime
balance = account_standing(request, request.user)
if request.method == "POST":
if not request.user.is_authenticated():
return redirect(reverse('general:login'))
try:
useraccount = UserAccount.objects.get(user=request.user)
if not useraccount.profile_updated:
messages.warning(request, "Please update your profile before Answering the Question")
return redirect(reverse('general:profile'))
except Exception as e:
print "e", e
messages.warning(request, "Please update your profile before you can play")
return redirect(reverse('general:profile'))
question = request.POST.get('event')
choice = request.POST.get('user-choice')
try:
no_of_entry = int(request.POST.get('no_of_entries'))
except:
messages.error(request, "Amount added is invalid")
return redirect(request.META['HTTP_REFERER'])
# print no_of_entry
entry_amt = float(request.POST.get('entry_amt')) * no_of_entry
# print "amt", entry_amt
# if request.method == "POST":
if request.POST.get('bot_catcher') != "":
return redirect(reverse('general:homepage'))
# choice = str(request.POST.get('user-choice'))
if entry_amt == "" or entry_amt < 25:
messages.error(request, "Amount added is below minimum amount")
return redirect(request.META['HTTP_REFERER'])
if choice == "":
messages.error(request, "You need to make a choice")
return redirect(request.META['HTTP_REFERER'])
if entry_amt > balance:
messages.error(request, "You do not have sufficient money in your wallet to play the game")
return redirect(request.META['HTTP_REFERER'])
# ref = purchase_ref()
event = DailyJackPot.objects.get(id=question)
today = datetime.datetime.now() # To get current date and time
current_time = today.strftime("%I:%M %p") # to get current time in string format
print "current_time", current_time
dt = datetime.datetime.strptime(current_time,"%I:%M %p").time() # to convert datetime.datetime object to datetime.time object
print "dt", dt
if event.stop_time < dt:
messages.error(request, "Daily Jackpot for today has ended")
return redirect(request.META['HTTP_REFERER'])
user = UserAccount.objects.get(user=request.user)
counter = 0
while (counter < no_of_entry):
ref = purchase_ref()
entry = DailyJackpotEntries.objects.create(dailyjackpot=event, user_obj=user, choice=choice,
ticket_no=ref)
entry.date = timezone.now().date()
entry.user_email = user.user.email
entry.user_first_name = user.user.first_name
entry.user_last_name = user.user.last_name
entry.telephone_no = user.phone_number
entry.save()
user.djp_wjp_cat = True
user.total_djp_played += 1
user.wallet_funded = True
user.game_played = True
user.save()
event.total_entries += 1
event.save()
bank_record = Bank.objects.create(user=request.user, txn_type="Remove", amount=event.entry_amount,
ref_no=ref,
created_at=timezone.now(), status="Successful", bank="DailyJackPot",
message="DailyJackPot for the day")
bank_record.date_created = timezone.now().date()
bank_record.save()
payment = Betpayments.objects.create(user=request.user, amount=event.entry_amount, djp=event, date=timezone.now())
payment.date_created = timezone.now().date()
payment.save()
counter += 1
# print "counter", counter
WalletBalances.objects.create(balance_bf=balance, description="Daily Jackpot", current_bal=float(balance - (event.entry_amount * counter)), amount=float(event.entry_amount * counter), user=request.user.useraccount)
user.wallet_balance = float(balance - entry_amt)
user.save()
messages.success(request, 'Your answer has been successfully submitted!!!.')
return redirect(request.META['HTTP_REFERER'])
else:
context = {}
now = timezone.now()
today = timezone.now().date()
events_all = getAllOpenEvent(request)
categories = []
for event in events_all:
if event.category in categories:
pass
else:
categories.append(event.category)
context['categories'] = categories
# balance = account_standing(request,request.user)
context['balance'] = balance
try:
event = DailyJackPot.objects.get(created_on_date=today)
print event
todays_date = timezone.now().date()
past_dates = timezone.now() - timezone.timedelta(days=7)
yesterday = timezone.now() - timezone.timedelta(days=1)
# print yesterday
jackpots = DailyJackPot.objects.filter(created_on_date__range=[past_dates, todays_date])
# print jackpots
yesterdays_game = DailyJackPot.objects.get(created_on_date=yesterday)
# print yesterdays_game
context['event'] = event
context['jackpots'] = jackpots
context['yesterday_event'] = yesterdays_game
print event.stop_time
# print now
djp = ""
if event.end_time >= now:
djp = True
else:
djp = False
# print djp
context['djp'] = djp
except:
event = []
todays_date = timezone.now().date()
past_dates = timezone.now() - timezone.timedelta(days=7)
yesterday = timezone.now() - timezone.timedelta(days=1)
# print yesterday
jackpots = DailyJackPot.objects.filter(created_on_date__range=[past_dates, todays_date])
# print jackpots
yesterdays_game = DailyJackPot.objects.get(created_on_date=yesterday)
# print yesterdays_game
context['event'] = event
context['jackpots'] = jackpots
context['yesterday_event'] = yesterdays_game
# print event.end_time
# print now
djp = ""
context['djp'] = djp
return render(request, 'general/dailyjackpot.html', context)
def dailyjackpot2(request):
# from datetime import datetime
balance = account_standing(request, request.user)
if request.method == "POST":
if not request.user.is_authenticated():
return redirect(reverse('general:login'))
try:
useraccount = UserAccount.objects.get(user=request.user)
if not useraccount.profile_updated:
messages.warning(request, "Please update your profile before Answering the Question")
return redirect(reverse('general:profile'))
except Exception as e:
print "e", e
messages.warning(request, "Please update your profile before you can play")
return redirect(reverse('general:profile'))
question = request.POST.get('event')
choice = request.POST.get('user-choice')
try:
no_of_entry = int(request.POST.get('no_of_entries'))
except:
messages.error(request, "Amount added is invalid")
return redirect(request.META['HTTP_REFERER'])
# print no_of_entry
entry_amt = float(request.POST.get('entry_amt')) * no_of_entry
# print "amt", entry_amt
# if request.method == "POST":
if request.POST.get('bot_catcher') != "":
return redirect(reverse('general:homepage'))
# choice = str(request.POST.get('user-choice'))
if entry_amt == "" or entry_amt < 25:
messages.error(request, "Amount added is below minimum amount")
return redirect(request.META['HTTP_REFERER'])
if choice == "":
messages.error(request, "You need to make a choice")
return redirect(request.META['HTTP_REFERER'])
if entry_amt > balance:
messages.error(request, "You do not have sufficient money in your wallet to play the game")
return redirect(request.META['HTTP_REFERER'])
# ref = purchase_ref()
event = DailyJackPot.objects.get(id=question)
today = datetime.datetime.now() # To get current date and time
current_time = today.strftime("%I:%M %p") # to get current time in string format
print "current_time", current_time
dt = datetime.datetime.strptime(current_time,"%I:%M %p").time() # to convert datetime.datetime object to datetime.time object
print "dt", dt
if event.stop_time < dt:
messages.error(request, "Daily Jackpot for today has ended")
return redirect(request.META['HTTP_REFERER'])
user = UserAccount.objects.get(user=request.user)
counter = 0
while (counter < no_of_entry):
ref = purchase_ref()
entry = DailyJackpotEntries.objects.create(dailyjackpot=event, user_obj=user, choice=choice,
ticket_no=ref)
entry.date = timezone.now().date()
entry.user_email = user.user.email
entry.user_first_name = user.user.first_name
entry.user_last_name = user.user.last_name
entry.telephone_no = user.phone_number
entry.save()
user.djp_wjp_cat = True
user.total_djp_played += 1
user.wallet_funded = True
user.game_played = True
user.save()
event.total_entries += 1
event.save()
bank_record = Bank.objects.create(user=request.user, txn_type="Remove", amount=event.entry_amount,
ref_no=ref,
created_at=timezone.now(), status="Successful", bank="DailyJackPot",
message="DailyJackPot for the day")
bank_record.date_created = timezone.now().date()
bank_record.save()
payment = Betpayments.objects.create(user=request.user, amount=event.entry_amount, djp=event, date=timezone.now())
payment.date_created = timezone.now().date()
payment.save()
counter += 1
# print "counter", counter
WalletBalances.objects.create(balance_bf=balance, description="Daily Jackpot", current_bal=float(balance - (event.entry_amount * counter)), amount=float(event.entry_amount * counter), user=request.user.useraccount)
user.wallet_balance = float(balance - entry_amt)
user.save()
messages.success(request, 'Your answer has been successfully submitted!!!.')
return redirect(request.META['HTTP_REFERER'])
else:
context = {}
now = timezone.now()
today = timezone.now().date()
events_all = getAllOpenEvent(request)
categories = []
for event in events_all:
if event.category in categories:
pass
else:
categories.append(event.category)
context['categories'] = categories
# balance = account_standing(request,request.user)
context['balance'] = balance
try:
event = DailyJackPot.objects.get(created_on_date=today)
print event
todays_date = timezone.now().date()
past_dates = timezone.now() - timezone.timedelta(days=7)
yesterday = timezone.now() - timezone.timedelta(days=1)
# print yesterday
jackpots = DailyJackPot.objects.filter(created_on_date__range=[past_dates, todays_date])
# print jackpots
yesterdays_game = DailyJackPot.objects.get(created_on_date=yesterday)
# print yesterdays_game
context['event'] = event
context['jackpots'] = jackpots
context['yesterday_event'] = yesterdays_game
print event.stop_time
# print now
djp = ""
if event.end_time >= now:
djp = True
else:
djp = False
# print djp
context['djp'] = djp
except:
event = []
todays_date = timezone.now().date()
past_dates = timezone.now() - timezone.timedelta(days=7)
yesterday = timezone.now() - timezone.timedelta(days=1)
# print yesterday
jackpots = DailyJackPot.objects.filter(created_on_date__range=[past_dates, todays_date])
# print jackpots
yesterdays_game = DailyJackPot.objects.get(created_on_date=yesterday)
# print yesterdays_game
context['event'] = event
context['jackpots'] = jackpots
context['yesterday_event'] = yesterdays_game
# print event.end_time
# print now
djp = ""
context['djp'] = djp
return render(request, 'general/dailyjackpot.html', context)
@login_required
def vendor_game_play(request):
# print 'rp',request.POST
balance = account_standing(request, request.user)
random_ref = purchase_ref()
try:
useraccount = UserAccount.objects.get(user=request.user)
if not useraccount.profile_updated:
messages.warning(request, "Please update your profile before playing the game")
return redirect(reverse('general:profile'))
except Exception as e:
print "e", e
messages.warning(request, "Please update your profile before playing the game")
return redirect(reverse('general:profile'))
if request.method == "POST":
if request.POST.get('bot_catcher') != "":
return redirect(reverse('general:homepage'))
# print request.POST
amount = float(request.POST.get('amount'))
choice = str(request.POST.get('user-choice'))
if amount == "" or amount < 200:
messages.error(request, "Amount added is below minimum amount")
return redirect(request.META['HTTP_REFERER'])
if choice == "":
messages.error(request, "You need to make a choice")
return redirect(request.META['HTTP_REFERER'])
if amount > balance:
messages.error(request, "You do not have sufficient money in your wallet to play the game")
return redirect(request.META['HTTP_REFERER'])
else:
# choice = str(request.POST.get('user-choice'))
print 'choice', choice
event = Event.objects.get(pk=request.POST.get('event_id'))
user = UserAccount.objects.get(user=request.user)
vendorClient, created = VendorClient.objects.get_or_create(useraccount=user,
phone_number=request.POST.get('phone'))
if created:
vendorClient.client_code = purchase_ref()
vendorClient.save()
else:
pass
try:
gameplay = Gameplay.objects.get(user=user, event=event, vendorClientCode=vendorClient.client_code,
tel_no=request.POST.get('phone'), nvp=True)
# print gameplay
print 'saw this'
messages.warning(request, "You have already played a game for this client in this Event")
return redirect(request.META['HTTP_REFERER'])
except Exception as e:
print "e", e
gameplay = Gameplay.objects.create(user=user, event=event, amount=amount, choice=choice,
date=timezone.now(), status="OPEN",tel_no=request.POST.get('phone'),
vendorClientCode=vendorClient.client_code, nvp=True,
ref_number=random_ref)
gameplay.date_played = timezone.now().date()
gameplay.save()
event.total_amt += amount
event.counter += 1
event.save()
user.djp_wjp_cat = True
user.total_cat_games_played += 1
user.wallet_funded = True
user.game_played = True
user.save()
bank_record = Bank.objects.create(user=request.user, txn_type="Remove", amount=amount, ref_no=random_ref,
created_at=timezone.now(), status="Successful", bank="Gameplay",
message="Gameplay for event " + event.event_id)
bank_record.date_created = timezone.now().date()
bank_record.save()
payment = Betpayments.objects.create(user=request.user, amount=amount, game=gameplay, date=timezone.now())
payment.date_created = timezone.now().date()
payment.save()
title = "Prediction Confirmation"
text = "general/game_confirmation.html"
mail_user(request, user.user, title, text, pkg=gameplay)
WalletBalances.objects.create(balance_bf=balance,description="Category Games",current_bal=float(balance - amount),amount=amount,user=request.user.useraccount)
user.wallet_balance = float(balance - amount)
user.save()
messages.success(request, 'Congratulations!!!! You have successfully made your prediction!!!.')
return redirect(request.META['HTTP_REFERER'])
messages.warning(request, "You have already Participated in this Event")
return redirect(request.META['HTTP_REFERER'])
@login_required
def vendor_dailyjackpot(request):
# from datetime import datetime
balance = account_standing(request, request.user)
if request.method == "POST":
if not request.user.is_authenticated():
return redirect(reverse('general:login'))
try:
useraccount = UserAccount.objects.get(user=request.user)
if not useraccount.profile_updated:
messages.warning(request, "Please update your profile before Playing Daily Jackpot")
return redirect(reverse('general:profile'))
except Exception as e:
print "e", e
messages.warning(request, "Please update your profile before you can play")
return redirect(reverse('general:profile'))
question = request.POST.get('event_id')
choice = request.POST.get('user-choice')
no_of_entry = int(request.POST.get('no_of_entries'))
# print no_of_entry
phone_number = request.POST.get("phone")
# 02345678
# +345 890 6559
# 2345377973597
entry_amt = 25 * no_of_entry
print "amt", entry_amt
# if request.method == "POST":
if request.POST.get('bot_catcher') != "":
messages.error(request, "Invalid input detected")
return redirect(request.META['HTTP_REFERER'])
# choice = str(request.POST.get('user-choice'))
if entry_amt == "" or entry_amt < 25:
messages.error(request, "Amount added is below minimum amount")
return redirect(request.META['HTTP_REFERER'])
if choice == "":
messages.error(request, "You need to make a choice")
return redirect(request.META['HTTP_REFERER'])
if entry_amt > balance:
messages.error(request, "You do not have sufficient money in your wallet to play this game")
return redirect(request.META['HTTP_REFERER'])
# ref = purchase_ref()
event = DailyJackPot.objects.get(id=question)
today = datetime.datetime.now() # To get current date and time
current_time = today.strftime("%I:%M %p") # to get current time in string format
print "current_time", current_time
dt = datetime.datetime.strptime(current_time,"%I:%M %p").time() # to convert datetime.datetime object to datetime.time object
print "dt", dt
if event.stop_time < dt:
messages.error(request, "Daily Jackpot for today has ended")
return redirect(request.META['HTTP_REFERER'])
user = UserAccount.objects.get(user=request.user)
counter = 0
tickets_number_list = []
while (counter < no_of_entry):
ref = purchase_ref()
entry = DailyJackpotEntries.objects.create(
telephone_no = phone_number,
dailyjackpot=event,
user_obj=user,
choice=choice,
ticket_no=ref)
entry.date = timezone.now().date()
entry.user_email = user.user.email
entry.user_first_name = user.user.first_name
entry.user_last_name = user.user.last_name
entry.telephone_no = user.phone_number
entry.save()
user.djp_wjp_cat = True
user.total_djp_played += 1
user.wallet_funded = True
user.game_played = True
user.save()
event.total_entries += 1
event.save()
bank_record = Bank.objects.create(user=request.user,
txn_type="Remove",
amount=event.entry_amount,
ref_no=ref,
created_at=timezone.now(),
status="Successful",
bank="DailyJackPot",
message="DailyJackPot for the day")
bank_record.date_created = timezone.now().date()
bank_record.save()
payment = Betpayments.objects.create(user=request.user,amount=event.entry_amount, djp=event, date=timezone.now())
payment.date_created = timezone.now().date()
payment.save()
counter += 1
tickets_number_list.append(entry)
message = "Tk No: " + entry.ticket_no + ", N25" + ", Date: " + str(entry.date) + ", Choice: " + entry.choice
# send_sms(msg=message, num=strip_phone_number(phone_number))
# print "counter", counter
vendorClient, created = VendorClient.objects.get_or_create(useraccount=user,phone_number=request.POST.get('phone'))
if created:
vendorClient.client_code = purchase_ref()
vendorClient.save()
else:
pass
WalletBalances.objects.create(balance_bf=balance, description="Daily Jackpot", current_bal=float(balance - (event.entry_amount * counter)), amount=float(event.entry_amount * counter), user=request.user.useraccount)
user.wallet_balance = float(balance - entry_amt)
user.save()
messages.success(request, 'Your answer has been successfully submitted!!!.')
# title = "Daily Jackpot"
# text = "general/djp_confirmation.html"
# mail_user_djp(request, user.user, title, text, djp=event, pkg=tickets_number_list)
return redirect(request.META['HTTP_REFERER'])
return redirect(request.META['HTTP_REFERER'])
@login_required
def user_entries(request):
context = {}
try:
today = timezone.now().date()
event = DailyJackPot.objects.get(created_on_date=today)
context['event'] = event
new_dailyjackpot = DailyJackpotEntries.objects.filter(user_obj=request.user.useraccount)
except:
new_dailyjackpot = DailyJackpotEntries.objects.filter(user_obj=request.user.useraccount)
# print "djp", dailyjackpot
context['dailyjackpot'] = new_dailyjackpot
return render(request, 'general/user_daily_jackpot_entries.html', context)
|
# import pandas, numpy, and matplotlib
import pandas as pd
from feature_engine.encoding import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
pd.set_option('display.width', 75)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 100)
pd.options.display.float_format = '{:,.0f}'.format
nls97compba = pd.read_csv("data/nls97compba.csv")
feature_cols = ['satverbal','satmath','gpascience',
'gpaenglish','gpamath','gpaoverall','gender','motherhighgrade',
'fatherhighgrade','parentincome']
# separate NLS data into train and test datasets
X_train, X_test, y_train, y_test = \
train_test_split(nls97compba[feature_cols],\
nls97compba[['completedba']], test_size=0.3, random_state=0)
# do one hot encoding and scaling
ohe = OneHotEncoder(drop_last=True, variables=['gender'])
ohe.fit(X_train)
X_train_enc, X_test_enc = \
ohe.transform(X_train), ohe.transform(X_test)
scaler = StandardScaler()
standcols = X_train_enc.iloc[:,:-1].columns
scaler.fit(X_train_enc[standcols])
X_train_enc = \
pd.DataFrame(scaler.transform(X_train_enc[standcols]),
columns=standcols, index=X_train_enc.index).\
join(X_train_enc[['gender_Female']])
X_test_enc = \
pd.DataFrame(scaler.transform(X_test_enc[standcols]),
columns=standcols, index=X_test_enc.index).\
join(X_test_enc[['gender_Female']])
# logistic regression for feature importance
lr = LogisticRegression(C=1, penalty="l1", solver='liblinear')
regsel = SelectFromModel(lr, max_features=5)
regsel.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[regsel.get_support()]
selcols
lr.fit(regsel.transform(X_train_enc), y_train.values.ravel())
y_pred = lr.predict(regsel.transform(X_test_enc))
accuracy_score(y_test, y_pred)
# random forest classification for feature importance
rfc = RandomForestClassifier(n_estimators=100, max_depth=2,
n_jobs=-1, random_state=0)
rfcsel = SelectFromModel(rfc, max_features=5)
rfcsel.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[rfcsel.get_support()]
selcols
rfc.fit(rfcsel.transform(X_train_enc), y_train.values.ravel())
y_pred = rfc.predict(rfcsel.transform(X_test_enc))
accuracy_score(y_test, y_pred)
|
from typing import Tuple
from PIL import Image
def resize(input: str, heigth: int, width: int, output: str) -> None:
img = Image.open(input)
img_resized = img.resize((width, heigth)).convert('RGB')
img_resized.save(output, "JPEG")
|
#!/usr/bin/env python
import argparse
import csv
import datetime
import sys
from collections import defaultdict
import settings
detailed_report_date_format = "%Y-%m-%dT%H:%M:%S"
def week(date_str):
d = datetime.datetime.strptime(date_str, detailed_report_date_format)
d -= datetime.timedelta(days=d.weekday())
return d.strftime(settings.report_date_format)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Generate individual report CSV from detailed report CSV. "
"Detailed report CSV accepted from standard input, "
"individual report printed to stadard output.\n Detailed "
"report entries also validated, validation notes printed to"
" stderr.\n"
"Typical usage:\n"
" ./detailed_report.py | tee detailed_report.csv | "
"./individual_report.py > individual_report.csv 2> "
"reporting_violations.csv")
parser.add_argument('-i', '--input', default="-", nargs="?",
type=argparse.FileType('r'),
help='File to use as input, empty or "-" for stdin')
parser.add_argument('-o', '--output', default="-",
type=argparse.FileType('w'),
help='Output filename, "-" or skip for stdout')
parser.add_argument('-n', '--threshold', type=int, default=10,
help='time record threshold in hours')
args = parser.parse_args()
# helper variables
last_records = {}
week_names = []
# individual_report[user][team][project][week_name] = hours
individual_report = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(lambda: 0))))
# record.keys() = ['user', 'team', 'project', 'start', 'duration']
reader = csv.DictReader(args.input)
err_writer = csv.DictWriter(
sys.stderr, ['user', 'team', 'duration', 'project', 'date', 'rule'])
err_writer.writeheader()
for record in reader:
week_name = week(record['start'])
if not week_names or week_names[-1] != week_name:
week_names.append(week_name)
hours = float(record['duration'])
# example of record['start'] = 2015-05-29T16:07:20
record_date = record['start'][:10]
# -1 minute is to compensate for round error in conversion to hours
# this duration is only used to check for overlapping entries and
# should not affect overall statistics
duration = datetime.timedelta(hours=hours, minutes=-1)
end = datetime.datetime.strptime(
record['start'], detailed_report_date_format) + duration
record['end'] = end.strftime(detailed_report_date_format)
user = record['user']
project = record['project']
team = record['team']
# TIME LOGGING SANITY CHECK
# long records, missing project, overlapping
# missing project
if not project:
record['project'] = project = '(no project)'
err_writer.writerow({
'user': user,
'team': team,
'rule': 'record without project',
'duration': hours,
'project': project,
'date': record_date,
})
# check for overlapping entry
if user in last_records and \
last_records[user]['end'] > record['start']:
err_writer.writerow({
'user': user,
'team': team,
'rule': 'overlaps: %(start)s %(project)s' % record,
'duration': hours,
'project': project,
'date': record_date,
})
if record['end'] > last_records[user]['end']:
last_records[user] = record
else:
last_records[user] = record
# long records
if hours > args.threshold:
err_writer.writerow({
'user': user,
'team': team,
'rule': 'record > %s hours' % args.threshold,
'duration': hours,
'project': project,
'date': record_date,
})
individual_report[user][team][project][week_name] += hours
# Now we'll aggregate stats, calculate average etc
report_writer = csv.DictWriter(
args.output, ['user', 'team', 'project', 'average'] + week_names)
report_writer.writeheader()
for user, user_records in individual_report.items():
for team, user_team_records in user_records.items():
for project, user_team_project_records in user_team_records.items():
records = {
week: round(user_team_project_records[week], 2)
for week in week_names
}
average = sum(records.values()) / len(records)
records.update({
'user': user,
'team': team,
'project': project,
'average': round(average, 2),
})
report_writer.writerow(records)
|
#!/usr/bin/env python3
"""Python for Secret Agents
Chapter 4 example 5
Norfolk health inspection HTML reading.
This requires Beautiful Soup 4
"""
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import json
from types import SimpleNamespace
# A place we're going to work with.
some_place = { 'name': 'The Name', 'address': '333 Waterside Drive' }
some_place['lat']= 36.844305
some_place['lng']= -76.29111999999999
# A class definition for a Restaurant.
class Restaurant:
def __init__( self, name, address ):
self.name= name
self.address= address
# Example of an object of class Restaurant.
some_place= Restaurant( 'The Name', '333 Waterside Drive' )
some_place.lat= 36.844305
some_place.lng= -76.29111999999999
some_place= SimpleNamespace( name='The Name', address='333 Waterside Drive' )
# A place to get healthcode data.
scheme_host= "http://healthspace.com"
def get_food_list_by_name():
path= "/Clients/VDH/Norfolk/Norolk_Website.nsf/Food-List-ByName"
form = {
"OpenView": "",
"RestrictToCategory": "FAA4E68B1BBBB48F008D02BF09DD656F",
"count": "400",
"start": "1",
}
query= urllib.parse.urlencode( form )
with urllib.request.urlopen(scheme_host + path + "?" + query) as data:
soup= BeautifulSoup( data.read() )
return soup
# Explore the BeautifulSoup object to gather data.
def food_table_iter( soup ):
"""Get the food table from a parsed Soup.
Columns are 'Name', '', 'Facility Location', 'Last Inspection', 'Details URL'
"""
table= soup.html.body.table
for row in table.find_all("tr"):
columns = [ td.text.strip() for td in row.find_all("td") ]
for td in row.find_all("td"):
if td.a:
url= urllib.parse.urlparse( td.a["href"] )
form= urllib.parse.parse_qs( url.query )
columns.append( form['RestrictToCategory'][0] )
yield columns
def food_row_iter( table_iter ):
"""Translate raw column names to more useful names
'Name', '', 'Facility Location', 'Last Inspection' plus a URL
to a namespace with attributes
name, address, last_inspection, category
"""
heading= next(table_iter)
assert ['Name', '', 'Facility Location', 'Last Inspection'] == heading
for row in table_iter:
yield SimpleNamespace(
name= row[0], address= row[2], last_inspection= row[3],
category= row[4]
)
# Demonstrate that we're finding the food table in the healthcode data.
for row in food_table_iter(get_food_list_by_name()):
print(row)
# Get facility history
def get_food_facility_history( category_key ):
url_detail= "/Clients/VDH/Norfolk/Norolk_Website.nsf/Food-FacilityHistory"
form = {
"OpenView": "",
"RestrictToCategory": category_key
}
query= urllib.parse.urlencode( form )
with urllib.request.urlopen(scheme_host + url_detail + "?" + query) as data:
soup= BeautifulSoup( data.read() )
return soup
# Translate HTML column titles to Python attribute names.
vdh_detail_translate = {
'Phone Number:': 'phone_number',
'Facility Type:': 'facility_type',
'# of Priority Foundation Items on Last Inspection:':
'priority_foundation_items',
'# of Priority Items on Last Inspection:': 'priority_items',
'# of Core Items on Last Inspection:': 'core_items',
'# of Critical Violations on Last Inspection:': 'critical_items',
'# of Non-Critical Violations on Last Inspection:': 'non_critical_items',
}
# Get inspection details, creating details for a given Business object.
def inspection_detail( business ):
"""Rows in table are
'Phone Number:',
'Facility Type:',
'# of Priority Foundation Items on Last Inspection:',
'# of Priority Items on Last Inspection:',
'# of Core Items on Last Inspection:',
'# of Critical Violations on Last Inspection:'
'# of Non-Critical Violations on Last Inspection:',
Translate to more useful names
phone_number, facility_type, priority_foundation_items,
priority_items, core_items, critical_items, non_critical_items
"""
soup= get_food_facility_history( business.category )
business.name2= soup.body.h2.text.strip()
table= soup.body.table
#print( table )
for row in table.find_all("tr"):
column = list( row.find_all( "td" ) )
#print( column )
name= column[0].text.strip()
value= column[1].text.strip()
setattr( business, vdh_detail_translate[name], value )
return business
# Get geocode details for a business.
def geocode_detail( business ):
scheme_netloc_path = "https://maps.googleapis.com/maps/api/geocode/json"
form = {
"address": business.address + ", Norfolk, VA",
"sensor": "false",
#"key": Your API Key,
}
query = urllib.parse.urlencode( form, safe="," )
with urllib.request.urlopen( scheme_netloc_path+"?"+query ) as geocode:
response= json.loads( geocode.read().decode("UTF-8") )
lat_lon = response['results'][0]['geometry']['location']
business.latitude= lat_lon['lat']
business.longitude= lat_lon['lng']
return business
# Demonstrate that we have the basic tools in place.
soup= get_food_list_by_name()
for row in food_row_iter( food_table_iter( soup ) ):
inspection_detail( row )
# Add our haversine() function to compute distances.
from ch_4_ex_3 import haversine
# Iterate through restaurants, providing distance from our HQ.
def choice_iter():
base= SimpleNamespace( address= '333 Waterside Drive' )
geocode_detail( base )
print( base ) # latitude= 36.844305, longitude= -76.29111999999999 )
soup= get_food_list_by_name()
for row in food_row_iter( food_table_iter( soup ) ):
geocode_detail( row )
inspection_detail( row )
row.distance= haversine(
(row.latitude, row.longitude),
(base.latitude, base.longitude) )
yield row
# This may be slow; depends on your internet connection.
#for c in choice_iter():
# print( c )
|
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "board.settings")
django.setup()
from movie_back.movies.models import Movie
print(Movie.objects.all())
# From now onwards start your script.. |
while True:
print ("#####################################################")
print ("# This is loop for comparison between three numbers #")
print ("# type a for positivity test and b for comparison #")
print ("# also, type q for quit #")
print ("#####################################################")
switch = input("Type a character: ")
if switch == 'q':
break
x = input("Now, type the first number, x, you want to compare.: ")
y = input("Second number, y.: ")
z = input("Last number, z.: ")
x = int(x)
y = int(y)
z = int(z)
if switch == 'a':
if x > 0:
print ("x is positive")
if y > 0:
print ("y is positive")
else:
print ("y is not positive")
if z > 0:
print ("z is positive")
elif z < 0:
print("z is negative")
else:
print("z must be 0")
elif switch == 'b':
print("x is equal to y: ", x == y)
print("x is not equal to y: ", x != y)
print("x is equal to z: ", x == z)
print("x is not equal to z: ", x != z)
else:
continue
|
def turn(instruction, facing):
if (facing == 'n'):
if (instruction == 'R'):
return 'e'
else:
return 'w'
elif (facing == 'e'):
if (instruction == 'R'):
return 's'
else:
return 'n'
elif (facing == 's'):
if (instruction == 'R'):
return 'w'
else:
return 'e'
elif (facing == 'w'):
if (instruction == 'R'):
return 'n'
else:
return 's'
def move(instruction, facing, location):
moved = location[facing]
location[facing] = moved + instruction
location = {'n': 0, 's': 0, 'w': 0, 'e': 0}
facing = 'n'
directions = []
with open("input.dat") as f:
directions = f.read().split(", ")
for direction in directions:
facing = turn(direction[0], facing)
move(int(direction[1:]), facing, location)
vertical = abs(location['n'] - location['s'])
horizontal = abs(location['w'] - location['e'])
print(vertical + horizontal)
|
from django.conf.urls.static import static
from django.urls import include, path
from . import views
urlpatterns = [
path("", views.index, name="main"),
]
|
# Copyright (C) 2015-2022 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from ctypes import *
from ._c_bridge import VscfRsa
from ._c_bridge._vscf_error import vscf_error_t
from ._c_bridge import VscfImplTag
from ._c_bridge import VscfStatus
from .raw_public_key import RawPublicKey
from .raw_private_key import RawPrivateKey
from virgil_crypto_lib.common._c_bridge import Data
from virgil_crypto_lib.common._c_bridge import Buffer
from .key_alg import KeyAlg
from .key_cipher import KeyCipher
from .key_signer import KeySigner
class Rsa(KeyAlg, KeyCipher, KeySigner):
"""RSA implementation."""
# Defines whether a public key can be imported or not.
CAN_IMPORT_PUBLIC_KEY = True
# Define whether a public key can be exported or not.
CAN_EXPORT_PUBLIC_KEY = True
# Define whether a private key can be imported or not.
CAN_IMPORT_PRIVATE_KEY = True
# Define whether a private key can be exported or not.
CAN_EXPORT_PRIVATE_KEY = True
def __init__(self):
"""Create underlying C context."""
self._lib_vscf_rsa = VscfRsa()
self._c_impl = None
self._ctx = None
self.ctx = self._lib_vscf_rsa.vscf_rsa_new()
def __delete__(self, instance):
"""Destroy underlying C context."""
self._lib_vscf_rsa.vscf_rsa_delete(self.ctx)
def set_random(self, random):
self._lib_vscf_rsa.vscf_rsa_use_random(self.ctx, random.c_impl)
def generate_ephemeral_key(self, key):
"""Generate ephemeral private key of the same type.
Note, this operation might be slow."""
error = vscf_error_t()
result = self._lib_vscf_rsa.vscf_rsa_generate_ephemeral_key(self.ctx, key.c_impl, error)
VscfStatus.handle_status(error.status)
instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))
return instance
def import_public_key(self, raw_key):
"""Import public key from the raw binary format.
Return public key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA public key must be imported from the format defined in
RFC 3447 Appendix A.1.1."""
error = vscf_error_t()
result = self._lib_vscf_rsa.vscf_rsa_import_public_key(self.ctx, raw_key.ctx, error)
VscfStatus.handle_status(error.status)
instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))
return instance
def export_public_key(self, public_key):
"""Export public key to the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA public key must be exported in format defined in
RFC 3447 Appendix A.1.1."""
error = vscf_error_t()
result = self._lib_vscf_rsa.vscf_rsa_export_public_key(self.ctx, public_key.c_impl, error)
VscfStatus.handle_status(error.status)
instance = RawPublicKey.take_c_ctx(result)
return instance
def import_private_key(self, raw_key):
"""Import private key from the raw binary format.
Return private key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA private key must be imported from the format defined in
RFC 3447 Appendix A.1.2."""
error = vscf_error_t()
result = self._lib_vscf_rsa.vscf_rsa_import_private_key(self.ctx, raw_key.ctx, error)
VscfStatus.handle_status(error.status)
instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))
return instance
def export_private_key(self, private_key):
"""Export private key in the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA private key must be exported in format defined in
RFC 3447 Appendix A.1.2."""
error = vscf_error_t()
result = self._lib_vscf_rsa.vscf_rsa_export_private_key(self.ctx, private_key.c_impl, error)
VscfStatus.handle_status(error.status)
instance = RawPrivateKey.take_c_ctx(result)
return instance
def can_encrypt(self, public_key, data_len):
"""Check if algorithm can encrypt data with a given key."""
result = self._lib_vscf_rsa.vscf_rsa_can_encrypt(self.ctx, public_key.c_impl, data_len)
return result
def encrypted_len(self, public_key, data_len):
"""Calculate required buffer length to hold the encrypted data."""
result = self._lib_vscf_rsa.vscf_rsa_encrypted_len(self.ctx, public_key.c_impl, data_len)
return result
def encrypt(self, public_key, data):
"""Encrypt data with a given public key."""
d_data = Data(data)
out = Buffer(self.encrypted_len(public_key=public_key, data_len=len(data)))
status = self._lib_vscf_rsa.vscf_rsa_encrypt(self.ctx, public_key.c_impl, d_data.data, out.c_buffer)
VscfStatus.handle_status(status)
return out.get_bytes()
def can_decrypt(self, private_key, data_len):
"""Check if algorithm can decrypt data with a given key.
However, success result of decryption is not guaranteed."""
result = self._lib_vscf_rsa.vscf_rsa_can_decrypt(self.ctx, private_key.c_impl, data_len)
return result
def decrypted_len(self, private_key, data_len):
"""Calculate required buffer length to hold the decrypted data."""
result = self._lib_vscf_rsa.vscf_rsa_decrypted_len(self.ctx, private_key.c_impl, data_len)
return result
def decrypt(self, private_key, data):
"""Decrypt given data."""
d_data = Data(data)
out = Buffer(self.decrypted_len(private_key=private_key, data_len=len(data)))
status = self._lib_vscf_rsa.vscf_rsa_decrypt(self.ctx, private_key.c_impl, d_data.data, out.c_buffer)
VscfStatus.handle_status(status)
return out.get_bytes()
def can_sign(self, private_key):
"""Check if algorithm can sign data digest with a given key."""
result = self._lib_vscf_rsa.vscf_rsa_can_sign(self.ctx, private_key.c_impl)
return result
def signature_len(self, private_key):
"""Return length in bytes required to hold signature.
Return zero if a given private key can not produce signatures."""
result = self._lib_vscf_rsa.vscf_rsa_signature_len(self.ctx, private_key.c_impl)
return result
def sign_hash(self, private_key, hash_id, digest):
"""Sign data digest with a given private key."""
d_digest = Data(digest)
signature = Buffer(self.signature_len(private_key=private_key))
status = self._lib_vscf_rsa.vscf_rsa_sign_hash(self.ctx, private_key.c_impl, hash_id, d_digest.data, signature.c_buffer)
VscfStatus.handle_status(status)
return signature.get_bytes()
def can_verify(self, public_key):
"""Check if algorithm can verify data digest with a given key."""
result = self._lib_vscf_rsa.vscf_rsa_can_verify(self.ctx, public_key.c_impl)
return result
def verify_hash(self, public_key, hash_id, digest, signature):
"""Verify data digest with a given public key and signature."""
d_digest = Data(digest)
d_signature = Data(signature)
result = self._lib_vscf_rsa.vscf_rsa_verify_hash(self.ctx, public_key.c_impl, hash_id, d_digest.data, d_signature.data)
return result
def setup_defaults(self):
"""Setup predefined values to the uninitialized class dependencies."""
status = self._lib_vscf_rsa.vscf_rsa_setup_defaults(self.ctx)
VscfStatus.handle_status(status)
def generate_key(self, bitlen):
"""Generate new private key.
Note, this operation might be slow."""
error = vscf_error_t()
result = self._lib_vscf_rsa.vscf_rsa_generate_key(self.ctx, bitlen, error)
VscfStatus.handle_status(error.status)
instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))
return instance
@classmethod
def take_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_rsa = VscfRsa()
inst.ctx = c_ctx
return inst
@classmethod
def use_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_rsa = VscfRsa()
inst.ctx = inst._lib_vscf_rsa.vscf_rsa_shallow_copy(c_ctx)
return inst
@property
def c_impl(self):
return self._c_impl
@property
def ctx(self):
return self._ctx
@ctx.setter
def ctx(self, value):
self._ctx = self._lib_vscf_rsa.vscf_rsa_shallow_copy(value)
self._c_impl = self._lib_vscf_rsa.vscf_rsa_impl(self.ctx)
|
import re
from lib.task.base import Task, MultipleChoiceTask
from typing import Dict, List
class Task09(MultipleChoiceTask):
RULE_PG = 'ПГ'
RULE_NG = 'НГ'
RULE_CG = 'ЧГ'
def __init__(self, data: Dict):
super().__init__(data)
self.type = Task.TYPE_09
self.lines = [re.findall(r'([а-яё]+\.\.[а-яё]+)', c['text'].lower()) for c in self.question['choices']]
self.rule = None
if 'непроверяемая' in self.text.lower():
self.rule = self.RULE_NG
elif 'проверяемая' in self.text.lower():
self.rule = self.RULE_PG
elif 'чередующаяся' in self.text.lower():
self.rule = self.RULE_CG
|
# file: $pyIBM/src/case.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# BarbaGroup (lorenabarba.com)
import os
class Case:
"""Sets up information about the case."""
def __init__(self, path):
"""Gets the path and name of the case.
Arguments
---------
path -- location of the case folder (root is the path of pyIBM.py).
"""
Case.path = os.getcwd() + '/' + path
Case.name = os.path.basename(os.path.normpath(Case.path))
Case.images = Case.path + '/images'
# create sub-folder containing images
if not os.path.isdir(Case.images):
os.system('mkdir ' + Case.images)
|
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
train = mnist.train
test = mnist.test
validation = mnist.validation
nexamples = 9
examples = train.next_batch(nexamples)[0].reshape((nexamples, 28, 28))
plt.gray()
fig, axes = plt.subplots(3, 3)
axes[0, 0].imshow(examples[0])
axes[0, 1].imshow(examples[1])
axes[0, 2].imshow(examples[2])
axes[1, 0].imshow(examples[3])
axes[1, 1].imshow(examples[4])
axes[1, 2].imshow(examples[5])
axes[2, 0].imshow(examples[6])
axes[2, 1].imshow(examples[7])
axes[2, 2].imshow(examples[8])
plt.show()
fig, axes = plt.subplots(3, 3)
axes[0, 0].plot(examples[0].reshape((28*28)))
axes[0, 1].plot(examples[1].reshape((28*28)))
axes[0, 2].plot(examples[2].reshape((28*28)))
axes[1, 0].plot(examples[3].reshape((28*28)))
axes[1, 1].plot(examples[4].reshape((28*28)))
axes[1, 2].plot(examples[5].reshape((28*28)))
axes[2, 0].plot(examples[6].reshape((28*28)))
axes[2, 1].plot(examples[7].reshape((28*28)))
axes[2, 2].plot(examples[8].reshape((28*28)))
plt.show()
|
from cloudshell.layer_one.core.response.resource_info.entities.attributes import \
NumericAttribute
from cloudshell.layer_one.core.response.resource_info.entities.base import ResourceInfo
from cloudshell.layer_one.core.response.resource_info.entities.port import Port
class VLEPort(Port):
PROTOCOL_TYPE_VALUES = {
"1g": "81",
"10g": "82",
"25g": "83",
"40g": "84",
"100g": "85"
}
def __init__(self, logical_id):
name = self.NAME_TEMPLATE.format(str(logical_id).zfill(3))
ResourceInfo.__init__(self, logical_id, name, self.FAMILY_NAME, self.MODEL_NAME,
"NA")
def set_protocol_type_by_speed(self, value):
""" Set protocol type. """
num_value = self.PROTOCOL_TYPE_VALUES.get(value.lower())
if num_value:
self.attributes.append(NumericAttribute("Protocol Type", num_value))
def set_protocol(self, value):
""" Set protocol. """
if value:
self.attributes.append(NumericAttribute("Protocol", value))
|
a,b = map(int, input().split())
ans= list(map(int, input().split()))
low= 0
high = 1000000000
while low <= high:
mid = (low + high) // 2
num = 0
for i in ans:
if i - mid > 0:
num += i - mid
if num >= b:
low = mid + 1
else:
high = mid - 1
print(high)
|
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import dash_leaflet as dl
from dash_leaflet import express as dlx
import json
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import plotly.express as px
import os
import glob
import flask
import pandas as pd
#import seaborn as sns
data_path = '../data/matrix/matrix_consol_v2.zip'
basins_map_path = '../data/shapes/Macro_Cuencas.json'
################# DEFINE THE DASH APP ####################
app = dash.Dash(external_stylesheets=[dbc.themes.MATERIA])
# image_directory = os.getcwd() + '/img/'
# list_of_images = [os.path.basename(x) for x in glob.glob('{}*.*'.format(image_directory))]
# static_image_route = '/static/'
# Add a static image route that serves images from local directory
# @app.server.route('{}<image_path>'.format(static_image_route))
# def serve_image(image_path):
# image_name = '{}'.format(image_path)
# if image_name not in list_of_images:
# raise Exception('"{}" is excluded from the allowed static files'.format(image_path))
# return flask.send_from_directory(image_directory, image_name)
############################################################
########################## LAYOUT ##########################
############################################################
########## HEADER ###########
header = html.Div([
dbc.Row([
dbc.Col(html.Img(src=f"assets/img/DS4A.svg", className="vertical-center"), md = 2),
dbc.Col(html.H3(u"IMPACT OF FOREST COVER LOSS ON RIVER FLOW REGIME IN COLOMBIA", id="titulo",
style={"color":"purple", "text-align": "center"}),
md = 8),
dbc.Col(html.Img(src=f"assets/img/col-gov-logo.png", width="200px"), md = 2),
], className="vertical-center")
],
className="container_ds4a container")
########### CARDS #############
scenarios = {1:'1', 2:'2', 3:'3', 4:'4'}
scn_slider = html.Div([
dcc.Slider(
min=1,
max=4,
step=None,
marks=scenarios,
value=2,
)
],
style= {'display': 'none'},
id = "scenarios"
)
cards = html.Div([
dbc.Row([
dbc.Col(
dbc.Card([
html.Img(src="assets/img/cover_loss.png", className="card-img"),
html.Div([
html.H5("Cover Loss", className="card-title"),
html.H1(["30", html.Small("%")], className="display-4")
], className = "card-img-overlay card_ds4a"),
], className = "text-right"),
md = 3
),
dbc.Col(
dbc.Card([
html.Img(src="assets/img/flow.png", className="card-img"),
html.Div([
html.H5("Flow", className="card-title"),
html.H1(["17", html.Small("mm")], className="display-4")
], className = "card-img-overlay card_ds4a"),
], className = "text-left"),
md = 3
),
dbc.Col(
dbc.Card([
html.Img(src="assets/img/precipitation.png", className="card-img"),
html.Div([
html.H5("Precipitation", className="card-title"),
html.H1(["25", html.Small("mm")], className="display-4")
], className = "card-img-overlay card_ds4a"),
], className = "text-left"),
md = 3
),
dbc.Col(
[
dbc.Card([
html.Img(src="assets/img/temperature.png", className="card-img"),
html.Div([
html.H5("Temperature", className="card-title"),
html.H1(["28", html.Small("°"), "C"], className="display-4")
], className = "card-img-overlay card_ds4a"),
], className = "text-right"
),
scn_slider,
],
md = 3
),
], className="vertical-center")
],
className="container_ds4a container")
########### BODY #############
switch = html.Div([
html.Div([
dbc.Label("Predictive", style={"margin-bottom": "5px",}),
html.Br(),
dbc.Label("Descriptive"),
],
className="switch-container",
),
html.Div(
[
dbc.Checklist(
options=[
{"value": 1},
],
value=[],
id="predictive-descriptive-switch",
inline=True,
switch=True,
)
],
className = "custom-control custom-switch"
)
])
year_slider = html.Div([
dcc.Slider(
min=2000,
max=2019,
step=None,
marks={value: str(value) for value in range(2000, 2020)},
value=2010,
className="slider-ds4a",
id='year-slider',
)
])
months = {1:'JAN', 2:'FEB', 3:'MAR', 4:'APR', 5:'MAY', 6:'JUN',
7:'JUL', 8:'AGO', 9:'SEP', 10:'OCT', 11:'NOV', 12:'DEC'}
month_slider = html.Div([
dcc.Slider(
min=1,
max=12,
step=None,
marks=months,
value=10,
id='month-slider',
)
])
#https://dash-leaflet.herokuapp.com/#us_states
basins_map_js = pd.read_json(basins_map_path)
basins_map_data = None
with open(basins_map_path) as f:
basins_map_data = json.load(f)
marks = [0, 7, 14, 21, 28, 35, 42, 48]
colorscale = ['#FFEDA0', '#FED976', '#FEB24C', '#FD8D3C', '#FC4E2A', '#E31A1C', '#BD0026', '#800026']
def get_style(feature):
#return dict(fillColor='#FFEDA0', weight=2, opacity=1, color='white', dashArray='3', fillOpacity=0.7)
color = [colorscale[i] for i, item in enumerate(marks) if feature["properties"]["Macrocuenca"] > item][-1]
return dict(fillColor=color, weight=2, opacity=1, color='white', dashArray='3', fillOpacity=0.7)
def get_info(feature=None):
header = [html.H4("Macrobasin")]
#return header
if not feature:
return header + ["Hoover over a macrobasin"]
return header + [html.B(feature["properties"]["Macrocuenca"]), html.Br(),
"{:.1f} ha".format(feature["properties"]["Area"])]
def get_macrobasin_id(feature=None):
return feature["properties"]["Macrocuenca"]
ctg = ["{}+".format(mark, marks[i + 1]) for i, mark in enumerate(marks[:-1])] + ["{}+".format(marks[-1])]
colorbar = dlx.categorical_colorbar(categories=ctg, colorscale=colorscale, width=300, height=30, position="bottomleft")
options = dict(hoverStyle=dict(weight=5, color='#666', dashArray=''), zoomToBoundsOnClick=True)
basins_map_json = dlx.geojson(basins_map_data, id="basins_map", defaultOptions=options, style=get_style)
info = html.Div(children=get_info(), id="info", className="info",
style={"position": "absolute", "top": "10px", "right": "10px", "z-index": "1000"})
map_graph = [dl.Map(children=[dl.TileLayer(), basins_map_json, colorbar, info], center=[4.60971, -74.08175], zoom=5)]
@app.callback(Output("info", "children"), [Input("basins_map", "featureHover")])
def info_hover(feature):
return get_info(feature)
#graficos
data = pd.read_csv(data_path, parse_dates = ['date'])
data.set_index(['mc'], inplace = True)
def plot_data(macrobasin, variables, year, month=1):
dfc = data.loc[macrobasin].copy()
#_figs = []
#for i, var in enumerate(variables):
print('+'*30, macrobasin, variables, year, month,'+'*30)
var = variables[0]
_fig = px.line(dfc, x='date', y=var
, range_x=[str(year)+'-01-01',str(year)+'-'+ str(month)+ '-' + ('28' if (month == 2) else ('30' if month in [4, 6, 9, 11] else '31'))]
, height=250)
#_figs.append(_fig)
return _fig
#flow_graph = dcc.Graph(id="flow-graph")
#flow_graph.figure = plot_data(1, ['v_rainfall_total'], '2010', '1')
#TODO: Capturar el cambio y filtrar la data, las gráficas estén supervisando esta data para que se propague
@app.callback(Output('scenarios', 'style'),[Input("predictive-descriptive-switch", 'value')])
def on_switch(value):
return {"display": "block" if value else "none"}
@app.callback(Output('flow-graph', 'figure'),
[Input('year-slider', 'value'), Input('month-slider', 'value'), Input("basins_map", "featureClick")])
def update_flow_graph(y_value, m_value, feature=None):
macrobasin_id = 1
if not feature is None:
macrobasin_id = get_macrobasin_id(feature)
return plot_data(macrobasin_id, ['v_flow_mean'], y_value, m_value, )
@app.callback(Output('precipitation-graph', 'figure'),
[Input('year-slider', 'value'), Input('month-slider', 'value'), Input("basins_map", "featureClick")])
def update_precipitation_graph(y_value, m_value, feature=None):
macrobasin_id = 1
if not feature is None:
macrobasin_id = get_macrobasin_id(feature)
return plot_data(macrobasin_id, ['v_rainfall_total'], y_value, m_value)
@app.callback(Output('temperature-graph', 'figure'),
[Input('year-slider', 'value'), Input('month-slider', 'value'), Input("basins_map", "featureClick")])
def update_temperature_graph(y_value, m_value, feature=None):
macrobasin_id = 1
if not feature is None:
macrobasin_id = get_macrobasin_id(feature)
return plot_data(macrobasin_id, ['v_temperature_mean'], y_value, m_value)
main_card = html.Div(
dbc.Card([
dbc.Row([ #switch and timeline
dbc.Col(#md2 predictive/descriptive switch
switch,
md=2
),
dbc.Col(#md10 years slider
year_slider,
md=10,
),
]),
dbc.Row([#map and graphs
dbc.Col([#map and month_slider
dbc.Row([
dbc.Col(
month_slider,
)
]),
dbc.Row([
dbc.Col(
html.Div(map_graph,
style={'width': '100%', 'height': '50vh', 'margin': "auto", "display": "block"}, id="map"
)
),
],
),
],
md=6
),
dbc.Col([#graphs
dbc.Row([
dbc.Col(
dcc.Graph(id="flow-graph"),
#style={"margin-left":" 10px"}
),
],no_gutters=True,
),
dbc.Row([
dbc.Col(
dcc.Graph(id="precipitation-graph"),
#style={"margin-left":" 10px"}
),
],no_gutters=True,
),
dbc.Row([
dbc.Col(
dcc.Graph(id="temperature-graph"),
#style={"margin-left":" 10px"} figure=figs[2],
),
],no_gutters=True,
),
],
md=6
)
])
],
className='main-card'
),
className="container"
)
app.layout = dbc.Container(
[
header,
cards,
main_card
],
fluid=True,
)
if __name__ in ["__main__"]:
app.run_server(debug=True) |
import pytest
from .env import H2Conf, H2TestEnv
@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
class TestInvalidHeaders:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
H2Conf(env).add_vhost_cgi().install()
assert env.apache_restart() == 0
# let the hecho.py CGI echo chars < 0x20 in field name
# for almost all such characters, the stream returns a 500
# or in httpd >= 2.5.0 gets aborted with a h2 error
# cr is handled special
def test_h2_200_01(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
data = f'name=x%{x:02x}x&value=yz'
r = env.curl_post_data(url, data)
if x in [13]:
assert 0 == r.exit_code, f'unexpected exit code for char 0x{x:02}'
assert 200 == r.response["status"], f'unexpected status for char 0x{x:02}'
elif x in [10] or env.httpd_is_at_least('2.5.0'):
assert 0 == r.exit_code, f'unexpected exit code for char 0x{x:02}'
assert 500 == r.response["status"], f'unexpected status for char 0x{x:02}'
else:
assert 0 != r.exit_code, f'unexpected exit code for char 0x{x:02}'
#
env.httpd_error_log.ignore_recent(
lognos = [
"AH02429" # Response header name contains invalid characters
],
matches = [
r'.*malformed header from script \'hecho.py\': Bad header: x.*'
]
)
# let the hecho.py CGI echo chars < 0x20 in field value
# for almost all such characters, the stream returns a 500
# or in httpd >= 2.5.0 gets aborted with a h2 error
# cr and lf are handled special
def test_h2_200_02(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
if 9 != x:
r = env.curl_post_data(url, "name=x&value=y%%%02x" % x)
if x in [10, 13]:
assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
assert 200 == r.response["status"], "unexpected status for char 0x%02x" % x
elif env.httpd_is_at_least('2.5.0'):
assert 0 == r.exit_code, f'unexpected exit code for char 0x{x:02}'
assert 500 == r.response["status"], f'unexpected status for char 0x{x:02}'
else:
assert 0 != r.exit_code, "unexpected exit code for char 0x%02x" % x
#
env.httpd_error_log.ignore_recent(
lognos = [
"AH02430" # Response header value contains invalid characters
]
)
# let the hecho.py CGI echo 0x10 and 0x7f in field name and value
def test_h2_200_03(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for h in ["10", "7f"]:
r = env.curl_post_data(url, "name=x%%%s&value=yz" % h)
if env.httpd_is_at_least('2.5.0'):
assert 0 == r.exit_code, f"unexpected exit code for char 0x{h:02}"
assert 500 == r.response["status"], f"unexpected exit code for char 0x{h:02}"
else:
assert 0 != r.exit_code
r = env.curl_post_data(url, "name=x&value=y%%%sz" % h)
if env.httpd_is_at_least('2.5.0'):
assert 0 == r.exit_code, f"unexpected exit code for char 0x{h:02}"
assert 500 == r.response["status"], f"unexpected exit code for char 0x{h:02}"
else:
assert 0 != r.exit_code
#
env.httpd_error_log.ignore_recent(
lognos = [
"AH02429", # Response header name contains invalid characters
"AH02430" # Response header value contains invalid characters
]
)
# test header field lengths check, LimitRequestLine
def test_h2_200_10(self, env):
conf = H2Conf(env)
conf.add("""
LimitRequestLine 1024
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
val = 200*"1234567890"
url = env.mkurl("https", "cgi", f'/?{val[:1022]}')
r = env.curl_get(url)
assert r.response["status"] == 200
url = env.mkurl("https", "cgi", f'/?{val[:1023]}')
r = env.curl_get(url)
# URI too long
assert 414 == r.response["status"]
# test header field lengths check, LimitRequestFieldSize (default 8190)
def test_h2_200_11(self, env):
conf = H2Conf(env)
conf.add("""
LimitRequestFieldSize 1024
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
val = 200*"1234567890"
# two fields, concatenated with ', '
# LimitRequestFieldSize, one more char -> 400 in HTTP/1.1
r = env.curl_get(url, options=[
'-H', f'x: {val[:500]}', '-H', f'x: {val[:519]}'
])
assert r.exit_code == 0, f'{r}'
assert r.response["status"] == 200, f'{r}'
r = env.curl_get(url, options=[
'--http1.1', '-H', f'x: {val[:500]}', '-H', f'x: {val[:523]}'
])
assert 400 == r.response["status"]
r = env.curl_get(url, options=[
'-H', f'x: {val[:500]}', '-H', f'x: {val[:520]}'
])
assert 431 == r.response["status"]
# test header field count, LimitRequestFields (default 100)
# see #201: several headers with same name are mered and count only once
def test_h2_200_12(self, env):
url = env.mkurl("https", "cgi", "/")
opt = []
# curl sends 3 headers itself (user-agent, accept, and our AP-Test-Name)
for i in range(97):
opt += ["-H", "x: 1"]
r = env.curl_get(url, options=opt)
assert r.response["status"] == 200
r = env.curl_get(url, options=(opt + ["-H", "y: 2"]))
assert r.response["status"] == 200
# test header field count, LimitRequestFields (default 100)
# different header names count each
def test_h2_200_13(self, env):
url = env.mkurl("https", "cgi", "/")
opt = []
# curl sends 3 headers itself (user-agent, accept, and our AP-Test-Name)
for i in range(97):
opt += ["-H", f"x{i}: 1"]
r = env.curl_get(url, options=opt)
assert r.response["status"] == 200
r = env.curl_get(url, options=(opt + ["-H", "y: 2"]))
assert 431 == r.response["status"]
# test "LimitRequestFields 0" setting, see #200
def test_h2_200_14(self, env):
conf = H2Conf(env)
conf.add("""
LimitRequestFields 20
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
opt = []
for i in range(21):
opt += ["-H", "x{0}: 1".format(i)]
r = env.curl_get(url, options=opt)
assert 431 == r.response["status"]
conf = H2Conf(env)
conf.add("""
LimitRequestFields 0
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
opt = []
for i in range(100):
opt += ["-H", "x{0}: 1".format(i)]
r = env.curl_get(url, options=opt)
assert r.response["status"] == 200
# the uri limits
def test_h2_200_15(self, env):
conf = H2Conf(env)
conf.add("""
LimitRequestLine 48
""")
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/")
r = env.curl_get(url)
assert r.response["status"] == 200
url = env.mkurl("https", "cgi", "/" + (48*"x"))
r = env.curl_get(url)
assert 414 == r.response["status"]
# nghttp sends the :method: header first (so far)
# trigger a too long request line on it
# the stream will RST and we get no response
url = env.mkurl("https", "cgi", "/")
opt = ["-H:method: {0}".format(100*"x")]
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
assert not r.response
# invalid chars in method
def test_h2_200_16(self, env):
if not env.h2load_is_at_least('1.45.0'):
pytest.skip(f'nhttp2 version too old')
conf = H2Conf(env)
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
url = env.mkurl("https", "cgi", "/hello.py")
opt = ["-H:method: GET /hello.py"]
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
assert r.response is None
url = env.mkurl("https", "cgi", "/proxy/hello.py")
r = env.nghttp().get(url, options=opt)
assert r.exit_code == 0, r
assert r.response is None
|
from django.shortcuts import render,redirect
from .models import Appointment,Service
from .utils import random_string_generator
from accounts.models import Employee,Store
from .models import Complain
from django.contrib import messages
from django.views.generic.edit import CreateView, UpdateView, DeleteView
import random
# Create your views here.
def book(request):
employees = Employee.objects.values_list('name',flat=True)
services_ = Service.objects.values_list('name',flat=True)
services = []
for i in services_:
services.append(i)
context = {
'employees':employees,
'services':services,
}
return render(request,'book.html',context=context)
def home(request):
services = Service.objects.all()
return render(request,'home.html',{'services':services})
def contact(request):
if request.method=='POST':
name = request.POST.get('name')
message = request.POST.get('message')
email = request.POST.get('email')
subject = request.POST.get('subject')
obj = Complain.objects.create(name=name,email=email,subject=subject,message=message)
messages.info(request,"Thanks for reaching out.We will get in touch soon!!!")
return redirect('contact')
return render(request,'contact.html')
def services(request):
return render(request,'service.html')
def appointment(request):
if request.method=='POST':
name = request.POST.get('name')
email = request.POST.get('email')
service = request.POST.get('service')
contact = request.POST.get('contact')
date = request.POST.get('date')
time = request.POST.get('time')
note = request.POST.get('note')
obj = Appointment.objects.create(name=name,email=email,service=service,contact=contact,date=date,time=time,note=note)
return redirect('home')
return redirect('home')
def success(request):
print(request.POST)
order_id = random_string_generator()
email = request.POST.get('email')
service = request.POST.get('service')
date = request.POST.get('date')
time = request.POST.get('time')
stylist = request.POST.get('employee')
if stylist == 'Random Stylist':
stylists = Employee.objects.values_list('name', flat=True)
print(stylists)
stylist = random.choice(stylists)
name = request.POST.get('name')
obj = Service.objects.filter(name=service).first()
try:
amount = obj.price
except:
amount = "N/A"
context = {
'email':email,
'name':name,
'service':service,
'stylist':stylist,
'date':date,
'time':time,
'order_id':order_id,
'contact':contact,
'amount':amount,
}
return render(request,'success.html',context=context)
def search(request):
store = None
stores = Store.objects.all()
if request.method=='POST':
name = request.POST.get('name')
for s in stores:
if name==s.get_name():
store = s
break
store_list = []
for i in stores:
store_list.append(i.get_name())
context = {
'stores':store_list,
'store':store,
}
return render(request,'search.html',context=context)
|
from __future__ import annotations
import errno
import os
import shutil
from pathlib import Path
from typing import Union
from .string_utilities import is_blank
def file_checksum(file_path: Union[str, Path], hashlib_callable):
"""Given path of the file and hash function, calculates file digest"""
if os.path.isfile(file_path) and callable(hashlib_callable):
hash_obj = hashlib_callable()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_obj.update(chunk)
return hash_obj.hexdigest()
return None
def silent_create_dirs(dir_path: Union[str, Path]):
"""Tries to create directory and silently skips if it exists."""
try:
os.makedirs(os.path.abspath(dir_path))
except OSError as exception:
# We don't care if dir already exists
if exception.errno != errno.EEXIST or (
exception.errno == errno.EEXIST
and not os.path.isdir(os.path.abspath(dir_path))
):
raise
def abspath_if_relative(relative_path: Union[str, Path], relative_to: Union[str, Path]):
"""Creates absolute path from relative, but places it under other path.
Example:
>>> abspath_if_relative('foo/bar/baz', relative_to='/tmp')
'/tmp/foo/bar/baz'
"""
retv = relative_path
if not is_blank(relative_path):
if not is_blank(relative_to):
if not os.path.isabs(relative_path):
retv = os.path.abspath(os.path.join(relative_to, relative_path))
else:
retv = os.path.abspath(relative_path)
return retv
def move_and_create_dest(src_path: Union[str, Path], dst_dir: Union[str, Path]):
"""
Moves ``src_path`` to ``dst_dir`` directory.
Expects ``dst_dir`` to be directory and if it doesn't exits, tries to
create it.
"""
silent_create_dirs(dst_dir)
shutil.move(src_path, dst_dir)
return os.path.join(dst_dir, os.path.basename(src_path))
def silent_remove(file_or_dir_path: Union[str, Path]):
"""
Deletes file or directory (even if not empty). Doesn't rise if it doesn't
exist.
"""
if is_blank(file_or_dir_path):
return None
try:
if os.path.isdir(file_or_dir_path):
shutil.rmtree(file_or_dir_path)
else:
os.remove(file_or_dir_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def switch_extension(file_path: str, new_extension: str):
if is_blank(new_extension):
return file_path
if not new_extension.startswith("."):
new_extension = "." + new_extension
return os.path.join(
os.path.dirname(file_path),
os.path.splitext(os.path.basename(file_path))[0] + new_extension,
)
|
# -*- coding: utf-8 -*-
from functools import wraps
from urwid import AttrMap
from pyquery import PyQuery
from ..ui import IfWidget
def convert_string_to_node(string):
return PyQuery(string)
def detect_class(f):
@wraps(f)
def _detect_class(*args, **kwargs):
pq = kwargs.get('node', args[0])
class_name = pq.attr['class'] or ''
class_names = class_name.split(' ')
node = f(*args, **kwargs)
for class_name in class_names:
if class_name:
node = AttrMap(node, class_name)
return node
return _detect_class
def detect_if(f):
@wraps(f)
def _detect_class(*args, **kwargs):
pq = kwargs.get('node', args[0])
if_ = pq.attr['if'] or ''
state = if_ == 'True'
node = f(*args, **kwargs)
if if_:
node = IfWidget(node, state=state)
return node
return _detect_class
|
def prompt_for_city():
"""
Asks user to specify a city to analyze.
Returns:
(str) city - name of the city to analyze
"""
valid_cities = ["chicago", "new york city", "washington"]
while True:
city = input("Enter a city to analyze (Chicago, New York City, or Washington): ").lower()
if city in valid_cities:
return city
else:
print("Invalid city entered.\n")
def prompt_for_month(valid_months):
"""
Asks user to specify a month to filter on.
Returns:
(str) month - month to filter on
"""
while True:
month = input("Enter a month to filter on (All, January, February, March, April, May or June): ").lower()
if month in valid_months:
return month
else:
print("Invalid month entered.\n")
def prompt_for_day():
"""
Asks user to specify a day to filter on.
Returns:
(str) day - day to filter on
"""
valid_days = ["all", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
while True:
day = input("Enter a day to filter on (All, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday): ").lower()
if day in valid_days:
return day
else:
print("Invalid day entered.\n")
def prompt_for_raw_data(is_continue=False):
"""
Prompts the user for whether to display raw data
Args:
(bool) is_continue - Whether this is a continuation prompt
"""
print()
input_question = ""
if is_continue:
input_question = "Would you like to continue? Enter yes or no: "
else:
input_question = "Would you like to view the raw data? Enter yes or no: "
while True:
user_input = input(input_question).lower()
if user_input in ["yes", "no"]:
return user_input
else:
print("Invalid entry. Enter yes or no") |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 05:37:45 2020
@author: $Hamtchi $
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 05:31:13 2020
@author: $Hamtchi $
"""
import os
import re
import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('excelFile.xlsx')
worksheet = workbook.add_worksheet()
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': 1})
def liste_fichier_repertoire(folder):
file, rep= [], []
for r, d, f in os.walk(folder):
for a in d:
rep.append(r + "\\" + a)
for a in f:
file.append(r + "\\" + a)
return file, rep
folder = r"."
file, fold = liste_fichier_repertoire(folder)
fichiers=[]
folds=[]
for i, f in enumerate(file):
fichiers.append(file)
print("fichier ", f)
for i, f in enumerate(fold):
folds.append(fold)
print("répertoire ", f)
tab=[["fileNAme","folderName","series","author","testCase"]]
for i, f in enumerate(file):
series=""
author=""
testCase=""
fileName=""
folderName=""
lignes=[]
filin = open(file[i], "r")
chaine=filin.readlines()
if chaine.count("@series = TC_BCP43547\n")>=1:
##print("there is ",chaine.count("@series = TC_BCP43547\n"), "file ")
## print("the path of the file is " ,file[i],"\n")
string=file[i]
listeString=string.split("\\")
fileName=listeString[-1]
folderName=listeString[-2]
for ligne in chaine:
if re.search('^@series',ligne):
series=series+" - "+ligne[:-1]
## print(series)
if re.search('^@author',ligne):
author=author+" - "+ligne[:-1]
## print(author)
if re.search('^TestCase ',ligne):
testCase=testCase+" - "+ligne[:-1]
##print(testCase)
lignes.append(fileName)
lignes.append(folderName)
lignes.append(series)
lignes.append(author)
lignes.append(testCase)
if lignes!=["","","","",""]:
tab.append(lignes)
filin.close()
for i in tab :
print(i)
# Start from the first cell below the headers.
row = 1
col = 0
for fileNAme,folderName,series,author,testCase in (tab):
worksheet.write_string(row, col,fileNAme)
worksheet.write_string(row, col + 1,folderName)
worksheet.write_string(row, col + 2,series)
worksheet.write_string(row, col + 3,author)
worksheet.write_string(row, col + 4,testCase)
row += 1
workbook.close()
|
profile = {
'age': input("Input your age: "),
'first_name': input("Input yor first name: "),
'last name': input("Input your last name: "),
'email': input("Input your email: ")
}
for key, value in profile.items():
print(f'{key}: {value}')
|
# -*- coding: utf-8 -*-
import argparse
import json
import os
import pandas as pd
import csv
import re
from collections import OrderedDict
import sys
sys.path.append(os.path.join(os.path.expanduser('~'), 'mc', 'utils'))
from tokenizer import Tokenizer
from tqdm import tqdm
def main():
args = get_args()
prepro(args)
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser("~")
base_dir = os.path.join(home, 'tqa')
source_dir = os.path.join(base_dir, "data")
target_dir = os.path.join(base_dir, 'prepro','data')
parser.add_argument('-b', "--base_dir", default=base_dir)
parser.add_argument('-s', "--source_dir", default=source_dir)
parser.add_argument('-t', "--target_dir", default=target_dir)
parser.add_argument('-d', "--debug", action='store_true')
parser.add_argument('-top', "--single-topic", action='store_true') # store_true -> False!!
parser.add_argument('-p', "--if-pair", action='store_true')
parser.add_argument('-dq', "--diagram_questions", action='store_true')
parser.add_argument('-to', "--tokenizer", default='spacy')
# TODO : put more args here
return parser.parse_args()
def remove_delim(word):
word = re.sub(r"\.", "", word)
return word
def prepro(args):
prepro_each(args, 'train')
prepro_each(args, 'test')
prepro_each(args, 'val')
def prepro_each(args, data_type):
# open
source_path = os.path.join(args.source_dir, "{}/tqa_v1_{}.json".format(data_type, data_type))
source_data = json.load(open(source_path, 'r'))
# tabularize
tabular = []
pair = []
stat = {}
stat['max_topic_words'] = 0
stat['max_question_words'] = 0
stat['max_answer_words'] = 0
stat['max_answers'] = 0
stat['max_topics'] = 0
tokenizer = Tokenizer(args.tokenizer)
tokenize = tokenizer.tokenize
for index_l, lesson in enumerate(tqdm(source_data)):
topics = []
stat['max_topics'] = max(stat['max_topics'], len(lesson['topics']))
for key_t, topic in lesson['topics'].items():
stat['max_topic_words'] = max(stat['max_topic_words'], len(tokenize(topic)))
topics.append(topic['content']['text'])
def getqs(q_raw_data, with_diagram=False, if_pair=False, stat=None):
q_tabular = []
for key_q, q in q_raw_data.items():
question = q['beingAsked']['processedText']
stat['max_question_words'] = max(stat['max_question_words'], len(tokenize(question)))
# handling weird exception of questions without any correct answer
# WARNING: obviously this rules out the entire test set effectively
if 'correctAnswer' not in q:
#print(q['globalID'])
continue
correct_answer = q['correctAnswer']['processedText']
ans_sorted = OrderedDict(sorted(q['answerChoices'].items()))
answers = []
correct_index = -1
for index_a, (key_a, answer) in enumerate(ans_sorted.iteritems()):
answer_str = answer['processedText']
idstruct = answer['idStructural']
answers.append(answer_str)
stat['max_answer_words'] = max(stat['max_answer_words'], len(tokenize(answer_str)))
if answer_str == correct_answer or tokenize(correct_answer) == tokenize(remove_delim(idstruct)):
correct_index = index_a
stat['max_answers'] = max(stat['max_answers'], len(answers))
ltos = listToString()
relev_topic = ltos.run(topics)
# TODO: process diagrams to CNN features
# ignore data if the question does not have a valid answer
if correct_index >= 0:
correct_answer = answers[correct_index]
answer_string = ltos.run(answers)
if if_pair:
for ans in answers:
q_tabular.append({'question':question, 'correct_answer':correct_answer, 'wrong_answer':ans, 'topic':relev_topic, 'id': q['globalID']})
else:
q_tabular.append({'question':question, 'correct_answer':correct_index, 'answers':answer_string, 'topic':relev_topic, 'id': q['globalID']})
return q_tabular
nondq_raw = lesson['questions']['nonDiagramQuestions']
nondq_tab = getqs(nondq_raw, False, False, stat)
if args.if_pair:
nondq_pair = getqs(nondq_raw, False, True, stat)
pair += nondq_pair
tabular += nondq_tab
'''if args.diagram_questions:
dq_tab = getqs(nondq_raw, False, stat)
dq_raw = lesson['questions']['diagramQuestions']
tabular += dq_tab'''
# save
stats = {u'topic_size': stat['max_topic_words'], u'topic_num': stat['max_topics'], u'question_size': stat['max_question_words'], u'answer_size': stat['max_answers'], u'answer_num': stat['max_answer_words']}
save(args, tabular, data_type, stats)
if args.if_pair:
save(args, pair, '{}_{}'.format(data_type, 'pair'), stats)
class listToString():
def __init__(self):
self.regex_quote = re.compile(r'\'*')
self.regex_doublequote = re.compile(r'\"*')
def run(self, list_data):
return '[' + ''.join('"{}", '.format(self.regex_quote.sub(r'', self.regex_doublequote.sub(r'', datum))) for datum in list_data)[:-2] + ']'
def save(args, data, data_type, stats):
add_opt = ''
if not args.single_topic:
add_opt = '_full'
data_path = os.path.join(args.target_dir, "data_{}{}.tsv".format(data_type, add_opt))
df = pd.DataFrame(data)
df.to_csv(data_path, index=False, sep='\t', encoding='utf-8', quoting=csv.QUOTE_NONE)
with open(os.path.join(args.target_dir, "stat_{}{}.json".format(data_type, add_opt)), 'w') as file:
json.dump(stats, file)
if __name__ == "__main__":
main()
|
from sanic import Blueprint
from sanic.response import json
zh = Blueprint('zh', url_prefix='/zh')
@zh.post("/hownet/<target>")
async def extract(request, target):
"""
$ curl -d '{"word":"大学生"}' \
-H "Content-Type: application/json" -X POST \
localhost:1700/zh/hownet/sememes | json
$ curl -d '{"word":"大学生"}' \
-H "Content-Type: application/json" -X POST \
localhost:1700/zh/hownet/sense | json
:param request:
:return:
"""
from sagas.zh.hownet_procs import hownet_procs
rd = request.json
merge=rd['merge'] if 'merge' in rd else True
pos='*' if 'pos' not in rd else rd['pos']
fn_map={'sememes': lambda : hownet_procs.build_sememe_trees(rd['word'], merge=merge, pos=pos),
'sense': lambda : hownet_procs.get_sense(rd['word'], pos),
'by_sense_id': lambda: hownet_procs.get_sense_by_id(rd['id']),
}
result=fn_map[target]() if target in fn_map else []
return json(result)
|
# coding: utf-8
# In[41]:
def pairs(arr,finds):
seen = set()
op = set()
for ele in arr:
target = finds - ele
if target not in seen:
seen.add(ele)
else:
op.add((target,ele))
return op
# In[42]:
pairs([1,3,2,2],4)
# In[ ]:
# In[ ]:
|
"""
Schrijf een recursieve functie grootste_getal die
het grootste getal in een lijst zoekt.
"""
def grootste_getal(lijst):
if len(lijst) == 0:
return
elif len(lijst) == 1:
return lijst[0]
elif lijst[0] < lijst[1]:
del lijst[0]
else:
del lijst[1]
return grootste_getal(lijst)
# Tests om te kijken of de implementatie correct is
assert grootste_getal([2, 15, 5, 16, 20, 9, 3, 15, 6]) == 20
assert grootste_getal([5, 11, 5, 14, 12, 2, 5, 14, 11]) == 14
assert grootste_getal([10, 7, 19, 12, 5, 3, 13, 9, 4]) == 19
assert grootste_getal([17, 5, 17, 5, 6, 2, 3, 16, 6]) == 17 |
import time, psutil
import matplotlib.pyplot as plt
import sklearn
print ('sklearn version: ', sklearn.__version__) # 0.24
from sklearn.datasets import load_wine
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OrdinalEncoder
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.compose import ColumnTransformer
########################
X, y = load_wine(as_frame=True, return_X_y=True) # available from version >=0.23
########################
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=0)
X_train.head(3)
### standadrdizing transformation that leads to numpy array (dataframe in newer release)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler() #.set_output(transform='pandas') #change here, doesn't exist in version below 1.2
# AttributeError: 'StandardScaler' object has no attribute 'set_output'
scaler.fit(X_train)
X_test_scaled = scaler.transform(X_test)
print ('type after scaling: ', type(X_test_scaled))
# X_test_scaled.head(3) # throws error
###fetch openml (new addition in ver 1.2 is parser='pandas', memory efficient)
start_t = time.time()
X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True,) # parser="pandas") # parser as a keyword in the 1.2 version
X = X.select_dtypes(["number", "category"]).drop(columns=["body"])
print ('check types: ', type(X), '\n', X.head(3))
print ('check shapes: ', X.shape)
end_t = time.time()
print ('time taken: ', end_t-start_t)
process_names = [proc.name() for proc in psutil.process_iter()]
print (process_names)
print('cpu percent: ', psutil.cpu_percent())
#########################################
###### for simplicity ignore the nans
##########################################
print ('check for nans in columns: ', '\n', X.isna().sum())
# drop all the nans for making it suitable forGradientBoostingRegressor
X_NotNan = X.dropna(how='any', inplace=False)
print ('check shapes after dropping nans: ', X_NotNan.shape)
nonan_indices = X_NotNan.index.to_list()
y_NotNan = y[y.index.isin(nonan_indices)]
print ('check shape y: ', y_NotNan.shape)
print ('check for indices: ', X_nonan.index.to_list())
#### pipeline for encoder + gradient boosting
from sklearn.preprocessing import OrdinalEncoder
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import GradientBoostingRegressor #HistGradientBoostingRegressor
from sklearn.compose import ColumnTransformer
categorical_features = ["pclass", "sex", "embarked"]
model = make_pipeline(ColumnTransformer(transformers=[("cat", OrdinalEncoder(), categorical_features)],
remainder="passthrough",),
GradientBoostingRegressor(random_state=0),).fit(X_NotNan, y_NotNan)
# gradientboosting doesn't work with nan entries
##########################
# partial dependence
##########################
from sklearn.inspection import PartialDependenceDisplay
from sklearn.inspection import plot_partial_dependence
#fig, ax = plt.subplots(figsize=(14, 4), constrained_layout=True)
#disp = PartialDependenceDisplay.from_estimator(model,
# X_NotNan, features=["age", "sex", ("pclass", "sex")],
# categorical_features=categorical_features, ax=ax,)
# from_estimator method is non existent in older versions, but what about categorical features
#fig.savefig('./part_disp_old.png', dpi=200)
fig, ax = plt.subplots(figsize=(12, 6))
ax.set_title('GradientBoostingRegressor')
GBR_disp = plot_partial_dependence(model, X_NotNan, ['age', 'fare', ('age', 'fare')], ax=ax)
fig.savefig('./part_disp_old_NotCat.png', dpi=200)
#fig, ax = plt.subplots(figsize=(12, 6))
#ax.set_title('GradientBoostingRegressor')
#GBR_disp = plot_partial_dependence(model, X_NotNan, ['age', 'sex', ('age', 'sex')], ax=ax)
#fig.savefig('./part_disp_old_wCat.png', dpi=200)
# valueerror
|
from datetime import datetime
from .field import Field
class DateField(Field):
def __init__(self, name):
super(DateField, self).__init__(name, lambda: datetime.now().date())
def fromWeb(self, value):
return datetime.strptime(value, "%Y-%m-%d").date()
|
# Generated by Django 2.1.4 on 2019-06-14 20:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('create_bids', '0015_auto_20190614_2347'),
]
operations = [
migrations.AlterField(
model_name='constructionbid',
name='date_created',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='constructionbid',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='constbidder', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='desktopbid',
name='Tender_ID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='deskbidder', to='create_tender.Desktop_Tender'),
),
migrations.AlterField(
model_name='desktopbid',
name='date_created',
field=models.DateTimeField(auto_now=True),
),
]
|
from flask import Flask, render_template, request, url_for, redirect, session
import sqlite3
Flask.secret_key = 'notasecretkey'
app = Flask(__name__)
conn = sqlite3.connect('faceback.db', check_same_thread=False)
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS users (email TEXT, password TEXT, username TEXT);")
conn.commit()
cursor.execute("CREATE TABLE IF NOT EXISTS posts (author TEXT, title TEXT, content TEXT, date TEXT);")
conn.commit()
@app.route('/')
@app.route('/home', methods = ["GET", "POST"])
def home():
if request.method == 'POST':
author = session.get('username', None)
title = request.form['title']
content = request.form['content']
cursor.execute("INSERT INTO posts VALUES (?, ?, ?, datetime('now'));", (author, title, content))
conn.commit()
cursor.execute("SELECT * FROM posts ORDER BY 4 DESC LIMIT 15;")
posts = cursor.fetchall()
session['posts'] = posts
return redirect(url_for('home'))
return render_template('home.html', user=session.get('username', None), posts_html=session.get('posts', None))
@app.route('/login/', methods = ["GET", "POST"])
def login():
error = None
if request.method == 'POST':
email = request.form['emaili']
password = request.form['password']
cursor.execute("SELECT username FROM users WHERE email = ? AND password = ?", (email, password))
username = cursor.fetchone()
if username is not None:
cursor.execute("SELECT * FROM posts ORDER BY 4 DESC LIMIT 15;")
posts = cursor.fetchall()
session['username'] = username[0]
session['posts'] = posts
print(posts)
return redirect(url_for('home'))
else:
cursor.execute("SELECT email FROM users WHERE email = ?", (email,))
if cursor.fetchone() is not None:
error = "The email or the username is inccorect"
else:
error = "The email does not exist"
return render_template('login.html' , error_html=error)
return render_template('login.html', error_html=error)
@app.route('/registration', methods = ["POST", "GET"])
def registration():
error = []
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
username = request.form['username']
if len(password) < 5:
error = "The password must contain at least 5 c"
cursor.execute("SELECT username FROM users WHERE username = ?;", (username,))
if len(cursor.fetchall()) == 0:
cursor.execute("INSERT INTO users VALUES (?, ?, ?);", (email, password, username))
conn.commit()
session['username'] = username
return redirect(url_for('home'))
else:
error = "This username already exsits"
return render_template('registration.html', error_html=error)
return render_template('registration.html', error_html=None)
@app.route('/logout')
def logout():
session['username'] = None
return render_template('logout.html')
if __name__ == '__main__':
app.run(debug=True) |
class Product:
__vendor_message = " " #atribute
name = " " #public
price =" "
size = " "
unit = " "
def __init__(self, name): # method Konstruktor
self.name = name
self.unit = "gr,"
self.unit1= "gr,"
self.unit2 = "gr,"
self.unit3 = "gr,"
self.unit4 = "gr,"
self.size = 60 # indensasi
self.size1 = 75
self.size2 = 370
self.size3 = 600
self.size4 = 220
def get_vendor_message(self): # method
print (self.__vendor_message)
def set_price(self, price): # method
self.price = price
p = Product("Indomie goreng,")
p.set_price(3000)
p1 = Product("Pepsodent,")
p1.set_price(12000)
p2 = Product("susu kaleng frisien flag,")
p2.set_price(23000)
p3 = Product("wafer roll coklat,")
p3.set_price(14000)
p4 = Product("coco drink,")
p4.set_price(2000)
# print p.__vendor_message
p.get_vendor_message()
print("Detail Barang")
print("----------------------------------------------------------------------")
print ("%s | Dengan Berat : %s %s Harganya : Rp.%d" % (p.name, p.size, p.unit, p.price))
print ("%s | Dengan Berat : %s %s Harganya : Rp.%d" % (p1.name, p.size1, p.unit1, p1.price))
print ("%s| Dengan Berat : %s %s Harganya : Rp.%d" % (p2.name, p.size2, p.unit2, p2.price))
print ("%s | Dengan Berat : %s %s Harganya : Rp.%d" % (p3.name, p.size3, p.unit3, p3.price))
print ("%s | Dengan Berat : %s %s Harganya : Rp.%d" % (p4.name, p.size4, p.unit4, p4.price))
|
def read_conll(fname):
r = []
with open(fname) as f:
lines = f.readlines()
trimmed = [l.strip() for l in lines]
curr_lst = []
for tl in trimmed:
words = tl.split()
if (len(words) > 0):
curr_lst.append(tuple(words))
else:
r.append(curr_lst)
curr_lst = []
return r
lst = read_conll('data/conll2003.en.train.txt')
import pickle
pickle.dump(lst, open("conll2003_train.pkl", "wb"))
#print(lst) |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import retrain as train
import os
import threading
def initModelOutputFolders(model_name, image_dir):
train.image_dir = image_dir
train.output_graph = '../Outputs/' + model_name + '/output_graph.pb'
train.intermediate_output_graphs_dir = '../Outputs/' + model_name + '/intermediate/'
train.output_labels = '../Outputs/' + model_name + '/output_labels.txt'
train.summaries_dir = '../Outputs/' + model_name + '/summaries/'
train.bottleneck_dir = '../Outputs/' + model_name + '/bottleneck_values/'
train.saved_model_dir = '../Outputs/' + model_name + '/Exported_graph/'
train.model_name = model_name
return
def logDetailsOfModel():
if not os.path.exists('../Outputs/' + model_name + '/'):
os.makedirs('../Outputs/' + model_name + '/')
file = open('../Outputs/' + model_name + '/hyperparameters.txt', 'w')
file.write('----------------------------------------------------' + '\n')
file.write('---------------- HYPERPARAMETERS -----------------' + '\n')
file.write('----------------------------------------------------' + '\n\n')
file.write('image_dir = ' + str(train.image_dir) + '\n')
file.write('output_graph = ' + str(train.output_graph) + '\n')
file.write('intermediate_output_graphs_dir = ' + str(train.intermediate_output_graphs_dir) + '\n')
file.write('output_labels = ' + str(train.output_labels) + '\n')
file.write('summaries_dir = ' + str(train.summaries_dir) + '\n')
file.write('bottleneck_dir = ' + str(train.bottleneck_dir) + '\n')
file.write('how_many_training_steps = ' + str(train.how_many_training_steps) + '\n')
file.write('learning_rate = ' + str(train.learning_rate) + '\n')
file.write('train_batch_size = ' + str(train.train_batch_size) + '\n')
file.write('random_crop = ' + str(train.random_crop) + '\n')
file.write('random_scale = ' + str(train.random_scale) + '\n')
file.write('random_brightness = ' + str(train.random_brightness) + '\n')
file.close()
return
# # test
# train.how_many_training_steps = 5
# train.learning_rate = 0.01
# train.train_batch_size = 10
# train.random_crop = 0
# train.random_scale = 10
# train.random_brightness = 15
# train.flip_left_right = True
# model_name = 'test_model_1'
# initModelOutputFolders(model_name, '../Test/')
# logDetailsOfModel()
# train.train_model()
# # test
# train.how_many_training_steps = 10
# train.learning_rate = 0.01
# train.train_batch_size = 20
# train.random_crop = 0
# train.random_scale = 10
# train.random_brightness = 15
# train.flip_left_right = True
# model_name = 'test_model_2'
# initModelOutputFolders(model_name, '../Test/')
# logDetailsOfModel()
# train.train_model()
# # test
# train.how_many_training_steps = 15
# train.learning_rate = 0.01
# train.train_batch_size = 30
# train.random_crop = 0
# train.random_scale = 10
# train.random_brightness = 15
# train.flip_left_right = True
# model_name = 'test_model_3'
# initModelOutputFolders(model_name, '../Test/')
# logDetailsOfModel()
# train.train_model()
#------------------------------------
# melanoma_nevus_m1
train.how_many_training_steps = 400
train.learning_rate = 0.01
train.train_batch_size = 1000
train.random_crop = 0
train.random_scale = 10
train.random_brightness = 15
model_name = 'melanoma_nevus_m1'
initModelOutputFolders(model_name, '../TrainingData/')
logDetailsOfModel()
train.train_model()
# #------------------------------------
# melanoma_nevus_m2
train.how_many_training_steps = 200
train.learning_rate = 0.01
train.train_batch_size = 1500
train.random_crop = 0
train.random_scale = 10
train.random_brightness = 15
model_name = 'melanoma_nevus_m2'
initModelOutputFolders(model_name, '../TrainingData/')
logDetailsOfModel()
train.train_model()
#------------------------------------
# melanoma_nevus_m3
train.how_many_training_steps = 200
train.learning_rate = 0.02
train.train_batch_size = 1500
train.random_crop = 0
train.random_scale = 10
train.random_brightness = 15
model_name = 'melanoma_nevus_m3'
initModelOutputFolders(model_name, '../TrainingData/')
logDetailsOfModel()
train.train_model()
#------------------------------------
# melanoma_nevus_m4
train.how_many_training_steps = 200
train.learning_rate = 0.01
train.train_batch_size = 2500
train.random_crop = 0
train.random_scale = 10
train.random_brightness = 15
model_name = 'melanoma_nevus_m4'
initModelOutputFolders(model_name, '../TrainingData/')
logDetailsOfModel()
train.train_model()
|
import cv2
import numpy as np
from pathlib import Path
def spectral_saliency(filename):
img_path = Path('Images/' + filename)
img = cv2.imread(str(img_path))
salient = cv2.saliency.StaticSaliencySpectralResidual_create()
success, sal_map = salient.computeSaliency(img)
cv2.imshow('Image', img)
cv2.imshow('Output', sal_map)
cv2.waitKey()
# haven't been able to get this function to work yet
def fine_grained_saliency(filename):
img_path = Path('Images/' + filename)
img = cv2.imread(str(img_path))
saliency = cv2.saliency.StaticSaliencyFineGrained_create()
success, sal_map = saliency.computeSaliency(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
sal_map = np.array(sal_map * 255, dtype=np.uint8)
thresh_map = cv2.threshold(sal_map, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imshow('Original Image', img)
cv2.imshow('Saliency Detection', sal_map)
cv2.imshow('Thresholded', thresh_map)
cv2.waitKey()
def object_saliency(filename, detections):
img_path = Path('Images/' + filename)
model_path = Path('Models/')
img = cv2.imread(str(img_path))
saliency = cv2.saliency.ObjectnessBING_create()
saliency.setTrainingPath(str(model_path))
success, sal_map = saliency.computeSaliency(img)
num_detections = sal_map.shape[0]
print(num_detections)
output = img.copy()
for i in range(detections):
xstart, ystart, xend, yend = sal_map[i].flatten()
rand_color = np.random.randint(0,255, size=(3,))
color = [int(c) for c in rand_color]
cv2.rectangle(output, (xstart, ystart), (xend, yend), color, 2)
cv2.imshow('Original image', output)
cv2.waitKey()
|
# -*- coding: utf-8 -*-
from datetime import time
import pytest
from django.utils.timezone import now
from apps.agendas.models import Appointment
from apps.agendas.response_codes import UNAVAILABLE_DATETIME
from apps.agendas.services.appointment import AppointmentService
from apps.agendas.tests.factories.doctor_profile import DoctorProfileFactory
from apps.contrib.api.exceptions import SimpleValidationError
@pytest.mark.django_db
class AppointmentServiceTests:
def shared_data(self, test_user):
doctor_profile = DoctorProfileFactory()
return {
'doctor_uuid': str(doctor_profile.uuid),
'visitor': test_user,
'date': now().date(),
'time': time(8, 0),
}
def test_create(self, test_user):
appointment = AppointmentService.create(**self.shared_data(test_user))
assert isinstance(appointment, Appointment)
def test_create_unavailable_datetime(self, test_user):
shared_data = self.shared_data(test_user)
appointment = AppointmentService.create(**shared_data)
assert isinstance(appointment, Appointment)
with pytest.raises(SimpleValidationError) as exec_info:
AppointmentService.create(**shared_data)
assert exec_info.value.detail.code == UNAVAILABLE_DATETIME['code']
|
from django.db import connection
from django.db import models as dbmodels
from django.db.models import F, Q, Prefetch
from django.conf import settings
from django.utils import timezone
import datetime
import numbers
import uuid
import logging
import re
import string
from multiprocessing import Lock
from functools import reduce
from custom_user.models import AuthUser
class Transaction(dbmodels.Model):
id = dbmodels.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = dbmodels.ForeignKey('custom_user.AuthUser', db_index=True, editable=False)
creation_time = dbmodels.DateTimeField(db_column='creation_time', db_index=True, auto_now_add=True)
time_zone_offset = dbmodels.SmallIntegerField(editable=False)
def __str__(self):
return str(self.creation_time)
def createTransaction(user, timeZoneOffset):
if not user.is_authenticated():
raise RuntimeError('current user is not authenticated')
if not user.is_active:
raise RuntimeError('current user is not active')
return Transaction.objects.create(user=user, time_zone_offset=timeZoneOffset)
class TransactionState:
mutex = Lock()
def __init__(self, user, timeZoneOffset):
self.currentTransaction = None
self.user = user
self.timeZoneOffset = timeZoneOffset
@property
def transaction(self):
if self.currentTransaction == None:
self.currentTransaction = Transaction.createTransaction(self.user, self.timeZoneOffset)
return self.currentTransaction
class _deferred():
def __init__(self, f):
self._value = None
self._isCached = False
self._f = f
@property
def value(self):
if not self._isCached:
self._value = self._f()
self._isCached = True
return self._value
class Instance(dbmodels.Model):
id = dbmodels.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
typeID = dbmodels.ForeignKey('consentrecords.Instance', related_name='typeInstances', db_column='typeid', db_index=True, editable=False)
parent = dbmodels.ForeignKey('consentrecords.Instance', related_name='children', db_column='parentid', db_index=True, null=True, editable=False)
parentValue = dbmodels.OneToOneField('consentrecords.Value', related_name='valueChild', db_index=True, null=True)
transaction = dbmodels.ForeignKey('consentrecords.Transaction', db_index=True, editable=False)
deleteTransaction = dbmodels.ForeignKey('consentrecords.Transaction', related_name='deletedInstance', db_index=True, null=True, editable=True)
def __str__(self):
try:
d = self.description
if d:
return d.text
else:
return "Deleted"
except Description.DoesNotExist:
return "Deleted"
@property
def _parentDescription(self):
return self.parent and str(self.parent)
# Returns a new instance of an object of this kind.
def createEmptyInstance(self, parent, transactionState):
id = uuid.uuid4().hex
i = Instance.objects.create(id=id, typeID=self, parent=parent,
transaction=transactionState.transaction)
if parent:
Containment.objects.bulk_create([Containment(ancestor=j.ancestor, descendent=i) for j in parent.ancestors.all()])
Containment.objects.create(ancestor=i, descendent=i)
return i
def getDataType(self, field):
configuration = self.typeID.children.filter(typeID=terms.configuration,deleteTransaction__isnull=True)[0]
fields = configuration.children.filter(typeID=terms.field,deleteTransaction__isnull=True)
f = fields.get(value__field=terms.name,
value__referenceValue=field,
value__deleteTransaction__isnull=True)
v = f.value_set.filter(field=terms.dataType,deleteTransaction__isnull=True)[0]
return v.referenceValue
# addValue ensures that the value can be found for object values.
# addValue does not validate that self is writable.
def addValue(self, field, value, position, transactionState):
if value == None:
raise ValueError("value is not specified")
dt = self.getDataType(field)
if dt==terms.objectEnum:
if isinstance(value, Instance):
if value._canFind(transactionState.user):
return self.addReferenceValue(field, value, position, transactionState)
else:
raise Instance.DoesNotExist()
elif isinstance(value, dict) and "instanceID" in value:
f = list(UserInfo(transactionState.user).findFilter(Instance.objects.filter(pk=value["instanceID"])))
if len(f) == 0:
raise Value.DoesNotExist("specified primary key for instance does not exist")
value = f[0]
return self.addReferenceValue(field, value, position, transactionState)
else:
raise RuntimeError("specified value is not an Instance or a dictionary with an instanceID")
elif dt==terms.translationEnum:
return self.addTranslationValue(field, value, position, transactionState)
else:
return self.addStringValue(field, value["text"], position, transactionState)
def addStringValue(self, field, value, position, transactionState):
if position < 0:
raise ValueError("the position %s is not valid", position)
return Value.objects.create(id=uuid.uuid4().hex, instance=self, field=field, stringValue = value, position=position, transaction=transactionState.transaction)
def addTranslationValue(self, field, value, position, transactionState):
if position < 0:
raise ValueError("the position %s is not valid", position)
if not isinstance(value, dict):
raise ValueError("the value(%s) is not a dictionary" % str(value))
return Value.objects.create(id=uuid.uuid4().hex, instance=self, field=field,
stringValue = value["text"], languageCode = value["languageCode"],
position=position, transaction=transactionState.transaction)
def _descendents(self):
d = [self]
i = 0
while i < len(d):
d.extend(d[i].children.filter(deleteTransaction__isnull=True))
i += 1
return d
def addReferenceValue(self, field, value, position, transactionState):
if position < 0:
raise ValueError("the position %s is not valid" % position)
if not value:
raise ValueError("the value is null")
# If the field is special access, then make this and all of its children sourced to self.
if field == terms.specialAccess and value == terms.customAccessEnum:
descendents = self._descendents()
n = AccessRecord.objects.filter(id__in=descendents).delete()
AccessRecord.objects.bulk_create(map(lambda i: AccessRecord(id=i,source=self), descendents))
return Value.objects.create(id=uuid.uuid4().hex, instance=self, field=field, referenceValue=value, position=position, transaction=transactionState.transaction)
def createMissingSubValue(self, field, value, position, transactionState):
if position < 0:
raise ValueError("the position %s is not valid", position)
dt = self.getDataType(field)
if dt==terms.objectEnum:
if not Value.objects.filter(instance=self,field=field,referenceValue=value,
deleteTransaction__isnull=True).exists():
logger = logging.getLogger(__name__)
logger.error("%s: adding object %s(%s)" % (str(self), str(field), str(value)))
self.addReferenceValue(field, value, position, transactionState)
elif dt==terms.translationEnum:
if not Value.objects.filter(instance=self,field=field,stringValue=value["text"],
languageCode=value["languageCode"],
deleteTransaction__isnull=True).exists():
logger = logging.getLogger(__name__)
logger.error("%s: adding translation %s(%s)" % (str(self), str(field), str(value)))
self.addTranslationValue(field, value, position, transactionState)
else:
if not Value.objects.filter(instance=self,field=field,stringValue=value,
deleteTransaction__isnull=True).exists():
logger = logging.getLogger(__name__)
logger.error("%s: adding string %s(%s)" % (str(self), str(field), str(value)))
self.addStringValue(field, value, position, transactionState)
def _getSubValues(self, field):
return self.value_set.filter(field=field, deleteTransaction__isnull=True).order_by('position');
def _groupValuesByField(self, vs, userInfo):
values = {}
# Do not allow a user to get security field data unless they can administer this instance.
cache = _deferred(lambda: self._canAdminister(userInfo.authUser, userInfo.instance))
for v in vs:
if v.field not in terms.securityFields or cache.value:
fieldID = v.field.id
if fieldID not in values:
values[fieldID] = [v]
else:
values[fieldID].append(v)
return values
def _getSubInstances(self, field): # Previously _getSubValueObjects
return [v.referenceValue for v in self._getSubValues(field)]
# Returns a unique value of the specified id.
def getSubValue(self, field):
if not field:
raise ValueError("field is not specified")
try:
f = self.value_set.filter(deleteTransaction__isnull=True, field=field).select_related('referenceValue')
return f[0] if f.count() else None
except Value.DoesNotExist:
return None
def getSubInstance(self, field):
if not field:
raise ValueError("field is not specified")
v = self.getSubValue(field)
return v and v.referenceValue
# Returns an iterable of the values within self associated with the specified field.
def findValues(self, field, value):
return self.value_set.filter(Q(stringValue=value)|Q(referenceValue_id=value),deleteTransaction__isnull=True, field=field)
# Returns a list of pairs of text that are used to generate the description of objects
# of this kind.
# The first of the pair is the hex UUID of the name, the second is the hex UUID of the dataType
@property
def _descriptors(self):
configuration = self.getSubInstance(terms.configuration)
results = []
if configuration:
elementIDs = [terms.name, terms.dataType]
for fieldObject in configuration._getSubInstances(terms.field):
r = fieldObject.getSubInstance(terms.descriptorType)
if r:
n = [fieldObject.getSubValue(x) for x in elementIDs]
dataTypeInstance = n[1] and n[1].referenceValue
if n[0] and dataTypeInstance:
results.append([n[0].referenceValue, dataTypeInstance, r])
return results
# Returns a description of this object with these verbs.
# verbs is an array of pairs where the first of the pair is the field name and
# the second is the field dataType.
# The string is directly attached to the verb (v1).
def cacheDescription(self, nameLists):
verbs = nameLists.getNameUUIDs(self.typeID)
r = []
for field, dataType, descriptorType in verbs:
if descriptorType == terms.textEnum:
vs = self.value_set.filter(field=field, deleteTransaction__isnull=True).order_by('position')
if dataType == terms.objectEnum:
for v in vs:
try:
if not v.referenceValue:
raise ValueError("no reference value for %s in %s: %s(%s)" % (str(v.instance), str(self), str(v.field), v.stringValue))
r.append(v.referenceValue.description.text)
except Description.DoesNotExist:
r.append(v.referenceValue._description)
else:
r.extend([v.stringValue for v in filter(lambda v: v.stringValue, vs)])
elif descriptorType == terms.firstTextEnum:
vs = self.value_set.filter(field=field, deleteTransaction__isnull=True).order_by('position')
if vs.count() > 0:
v = vs[0]
if dataType == terms.objectEnum:
try:
if not v.referenceValue:
raise ValueError("no reference value for %s in %s: %s(%s)" % (str(v.instance), str(self), str(v.field), v.stringValue))
r.append(v.referenceValue.description.text)
except Description.DoesNotExist:
r.append(v.referenceValue._description)
else:
if v.stringValue:
r.append(v.stringValue)
elif descriptorType == terms.countEnum:
vs = self.value_set.filter(field=field, deleteTransaction__isnull=True)
r.append(str(vs.count()))
else:
raise ValueError("unrecognized descriptorType: %s ('%s' or '%s')" % (str(descriptorType), str(terms.textEnum), str(terms.countEnum)))
s = " ".join(r)
Description.objects.update_or_create(instance = self,
defaults={'text': s})
return s
# Return a list of the instances for which this instance contributes
# to the description.
@property
def _descriptionReferences(self):
values = self.referenceValues.filter(deleteTransaction__isnull=True)
return [v.instance for v in filter(lambda v: v.isDescriptor, values)]
def getDescription(self, language=None):
return self._description
def updateDescriptions(queue, nameLists):
queue = list(queue) # Make a local copy of the list.
calculated = set()
while len(queue) > 0:
i = queue[0]
queue = queue[1:]
if i not in calculated:
i.cacheDescription(nameLists)
queue.extend(i._descriptionReferences)
calculated.add(i)
@property
def _description(self):
d = self.description
return d.text if d else "Deleted"
# Return enough data for a reference to this object and its human readable form.
# This method is called only for root instances that don't have containers.
def getReferenceData(self, userInfo, language=None):
d = {'id': None,
'instanceID': self.id,
'description': self.getDescription(language),
'parentID': self.parent and self.parent.id}
privilege = self.getPrivilege(userInfo)
if privilege:
d["privilege"] = privilege.getDescription()
return d
# This code presumes that all fields have unique values.
def _sortValueDataByField(values):
d = {}
for v in values:
# If there is a reference value, put in a duple with the referenceValue name and id.
# Otherwise, put in the string value.
if v.referenceValue:
d[v.field] = (v.referenceValue.name_values[0].stringValue, v.referenceValue.id)
else:
d[v.field] = v.stringValue
return d
# Returns a dictionary by field where each value is
# a duple containing the value containing the name and
# the instance referenced by self from the key field.
# Self is an instance of type field.
def _getSubValueReferences(self):
vs2 = Value.objects.filter(field=terms.name,
deleteTransaction__isnull=True)
vs1 = self.value_set.filter(deleteTransaction__isnull=True)\
.select_related('referenceValue')\
.prefetch_related(Prefetch('referenceValue__value_set',
queryset=vs2,
to_attr='name_values'))
return Instance._sortValueDataByField(vs1)
# For a parent field when getting data, construct this special field record
# that can be used to display this field data.
def getParentReferenceFieldData(self):
name = self.description.text
fieldData = {"name" : name,
"nameID" : self.id,
"dataType" : TermNames.objectEnum,
"dataTypeID" : terms.objectEnum.id,
"capacity" : TermNames.uniqueValueEnum,
"ofKind" : name,
"ofKindID" : self.id}
return fieldData
# Returns a dictionary of information about a field with this configuration.
def getFieldDataByName(self, name, language=None):
if terms.isUUID(name):
# The key may be the key of a field object or the key of a term that is
# the name of a field object in the configuration.
fieldObject = Instance.objects.get(pk=name)
if fieldObject.typeID != terms.field:
fieldObject = self.getFieldByReferenceValue(name)
elif fieldObject.parent != self:
raise RuntimeError("the specified field is not contained within the configuration of this type")
else:
fieldObject = self._getValueByName(name)
return fieldObject.getFieldData(language)
# Returns a dictionary of information about a field instance.
def getFieldData(self, language=None):
return self._getFieldDataFromValues(self._getSubValueReferences(), language)
def _getFieldDataFromValues(self, values, language):
fieldData = None
if terms.name in values and terms.dataType in values:
nameReference = values[terms.name]
dataTypeReference = values[terms.dataType]
fieldData = {"id" : self.id,
"name" : nameReference[0],
"nameID" : nameReference[1],
"dataType" : dataTypeReference[0],
"dataTypeID" : dataTypeReference[1]}
if terms.maxCapacity in values:
fieldData["capacity"] = values[terms.maxCapacity][0]
else:
fieldData["capacity"] = TermNames.multipleValuesEnum
if terms.descriptorType in values:
fieldData["descriptorType"] = values[terms.descriptorType][0]
if terms.addObjectRule in values:
fieldData["objectAddRule"] = values[terms.addObjectRule][0]
if fieldData["dataTypeID"] == terms.objectEnum.id:
if terms.ofKind in values:
ofKindReference = values[terms.ofKind]
fieldData["ofKind"] = ofKindReference[0]
fieldData["ofKindID"] = ofKindReference[1]
if terms.pickObjectPath in values:
fieldData["pickObjectPath"] = values[terms.pickObjectPath]
else:
raise ValueError("values does not contain name or dataType: %s" % values)
return fieldData
# Returns the fieldsData from the database for self, which is a term.
def _getFieldsData(self, language=None):
vs2 = Value.objects.filter(field=terms.name,
deleteTransaction__isnull=True)
vs1 = Value.objects.filter(deleteTransaction__isnull=True)\
.select_related('field')\
.select_related('referenceValue')\
.prefetch_related(Prefetch('referenceValue__value_set',
queryset=vs2,
to_attr='name_values'))
fields = Instance.objects.filter(typeID=terms.field, deleteTransaction__isnull=True)\
.filter(parent__parent=self)\
.prefetch_related(Prefetch('value_set', queryset=vs1, to_attr='values'))\
.order_by('parentValue__position')
return [field._getFieldDataFromValues(Instance._sortValueDataByField(field.values), language) for field in fields]
# Returns the fieldsData from the cache or database for self, which is a term.
def getFieldsData(self, fieldsDataDictionary, language=None):
if self in fieldsDataDictionary:
return fieldsDataDictionary[self]
else:
fieldsData = self._getFieldsData(language)
if not len(fieldsData):
raise RuntimeError("the specified item is not configured")
fieldsDataDictionary[self] = fieldsData
return fieldsData
def _getCellValues(dataTypeID, values, userInfo, language=None):
if dataTypeID == terms.objectEnum.id:
return [{ "id": v.id,
"instanceID" : v.referenceValue.id,
"description": v.referenceValue._description,
'privilege': v.referenceValue.getPrivilege(userInfo).getDescription(),
"position": v.position } for v in values]
elif dataTypeID == terms.translationEnum.id:
return [{"id": v.id, "text": v.stringValue, "languageCode": v.languageCode} for v in values]
else:
# Default case is that each datum in this cell contains a unique value.
return [{"id": v.id, "text": v.stringValue} for v in values]
def getReadableSubValues(self, field, userInfo):
return userInfo.readValueFilter(self.value_set.filter(field=field, deleteTransaction__isnull=True)) \
.order_by('position')\
.select_related('referenceValue')\
.select_related('referenceValue__typeID')\
.select_related('referenceValue__typeID__description__text')
def _getCellData(self, fieldData, values, userInfo, language=None):
if not fieldData:
raise ValueError("fieldData is null")
cell = {"field": fieldData}
fieldID = fieldData["nameID"]
if fieldID not in values:
cell["data"] = []
else:
cell["data"] = Instance._getCellValues(fieldData["dataTypeID"], values[fieldID], userInfo, language)
return cell
# Returns an array of arrays.
def getData(self, vs, fieldsData, userInfo, language=None):
values = self._groupValuesByField(vs, userInfo)
return [self._getCellData(fieldData, values, userInfo, language) for fieldData in fieldsData]
# self should be a configuration object with fields.
def getConfiguration(self):
return [{"field": fieldObject.getFieldData()} for fieldObject in self._getSubInstances(terms.field)]
def getNextElementIndex(self, field):
maxElementIndex = reduce(lambda x,y: max(x, y),
[e.position for e in self._getSubValues(field)],
-1)
if maxElementIndex < 0:
return 0
else:
return maxElementIndex + 1
def updateElementIndexes(self, field, newIndex, transactionState):
ids = {}
for e in self._getSubValues(field):
ids[e.position] = e
if len(ids) == 0:
return 0
else:
sortedIndexes = sorted(ids)
if len(sortedIndexes) <= newIndex:
return sortedIndexes[-1]+1
elif newIndex == 0 and sortedIndexes[0] > 0:
return 0
elif sortedIndexes[newIndex] > sortedIndexes[newIndex-1] + 1:
return sortedIndexes[newIndex-1] + 1
else:
movingIndexes = sortedIndexes[newIndex:]
ids[movingIndexes[0]].updateIndex(movingIndexes[0] + 1, transactionState)
lastIndex = movingIndexes[0]
for i in movingIndexes[1:]:
if lastIndex + 1 < i:
break
ids[i].updateIndex(i + 1, transactionState)
lastIndex = movingIndexes[i]
return movingIndexes[0]
def markAsDeleted(self, transactionState):
self.deleteTransaction = transactionState.transaction
self.save()
def deepDelete(self, transactionState):
queue = [self]
AccessRecord.objects.filter(pk=self).delete()
self.deleteTransaction = transactionState.transaction
self.save()
while len(queue) > 0:
next = queue[0]
queue = queue[1:]
instances = next.children.filter(deleteTransaction__isnull=True).only('id')
values = next.value_set.filter(deleteTransaction__isnull=True).only('id')
queue.extend(instances)
# Delete associated access records before marking the instances as deleted.
AccessRecord.objects.filter(id__in=instances).delete()
instances.update(deleteTransaction=transactionState.transaction)
values.update(deleteTransaction=transactionState.transaction)
def deleteOriginalReference(self, transactionState):
if self.parent:
for v in self.referenceValues.filter(instance=self.parent):
v.markAsDeleted(transactionState)
# Return a filter of all of the instances of this type that exactly match the specified name.
def getInstanceByName(self, nameField, name, userInfo):
f = userInfo.findFilter(self.typeInstances.filter(deleteTransaction__isnull=True,
value__deleteTransaction__isnull=True,
value__field=nameField,
value__stringValue__iexact=name))
return f[0] if len(f) else None
# Return the Value for the specified configuration. If it doesn't exist, raise a Value.DoesNotExist.
# Self is of type configuration.
def _getValueByName(self, name):
try:
return self.value_set.select_related('referenceValue')\
.get(deleteTransaction__isnull=True,
field=terms.field,
referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__field=terms.name,
referenceValue__value__referenceValue__typeID=terms.term,
referenceValue__value__referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__referenceValue__value__field=terms.name,
referenceValue__value__referenceValue__value__stringValue=name)\
.referenceValue
except Value.DoesNotExist:
raise Value.DoesNotExist('the field name "%s" is not recognized for "%s" configuration' % (name, self))
# Return the Value for the specified configuration. If it doesn't exist, raise a Value.DoesNotExist.
# Self is of type configuration.
def getFieldByReferenceValue(self, key):
return self.value_set.select_related('referenceValue')\
.get(deleteTransaction__isnull=True,
field=terms.field,
referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__field=terms.name,
referenceValue__value__referenceValue__id=key)\
.referenceValue
@property
def inheritsSecurity(self):
return True
def comparePrivileges(a, b):
if a == b:
return a
elif not a:
return b
privileges = [terms.findPrivilegeEnum, terms.readPrivilegeEnum, terms.registerPrivilegeEnum,
terms.writePrivilegeEnum, terms.administerPrivilegeEnum]
aIndex = privileges.index(a)
return b if b in privileges[(aIndex+1):] else a
# returns the privilege level that the specified user instance has for this instance.
def getPrivilege(self, userInfo):
if userInfo.is_administrator:
return terms.administerPrivilegeEnum
try:
source = self.accessrecord.source
except AccessRecord.DoesNotExist:
return terms.readPrivilegeEnum
minPrivilege = None
minPrivilegeFilter = source.value_set.filter(field=terms.publicAccess, deleteTransaction__isnull=True)\
.select_related('referenceValue__description__text')
if minPrivilegeFilter.exists():
minPrivilege=minPrivilegeFilter[0].referenceValue
if not userInfo.instance:
return minPrivilege
if source.value_set.filter(field=terms.primaryAdministrator, deleteTransaction__isnull=True).count():
if source.value_set.filter(field=terms.primaryAdministrator, deleteTransaction__isnull=True)[0].referenceValue == userInfo.instance:
return terms.administerPrivilegeEnum
f = source.children.filter(typeID=terms.accessRecord, deleteTransaction__isnull=True)\
.filter(Q(value__referenceValue=userInfo.instance,
value__deleteTransaction__isnull=True)|
(Q(value__deleteTransaction__isnull=True,
value__referenceValue__value__referenceValue=userInfo.instance,
value__referenceValue__value__deleteTransaction__isnull=True)))
p = map(lambda i: i.value_set.filter(field=terms.privilege, deleteTransaction__isnull=True)\
.select_related('referenceValue__description__text')[0].referenceValue, f)
return reduce(Instance.comparePrivileges, p, minPrivilege)
### For the specified self user, return a filter of values indicating which access records are accessible to this user.
def _getPrivilegeValues(self, privilegeIDs):
return Value.objects.filter(Q(referenceValue=self)|\
(Q(referenceValue__value__referenceValue=self)\
&Q(referenceValue__value__deleteTransaction__isnull=True)\
),\
instance__typeID=terms.accessRecord,
deleteTransaction__isnull=True
) \
.annotate(pField=F('instance__value__field'),privilege=F('instance__value__referenceValue'),
pDeleted=F('instance__value__deleteTransaction')
) \
.filter(pField=terms.privilege.id, privilege__in=privilegeIDs,pDeleted=None)
### Returns True if this user (self) is the primary administrator of the specified instance
def isPrimaryAdministrator(self, instance):
try:
return instance.accessrecord.source.value_set.filter(field=terms.primaryAdministrator,
referenceValue=self,
deleteTransaction__isnull=True).exists()
except AccessRecord.DoesNotExist:
return False
def _securityFilter(self, f, privilegeIDs, accessRecordOptional=True):
sourceValues = self._getPrivilegeValues(privilegeIDs)
sources=Instance.objects.filter(\
(Q(children__typeID=terms.accessRecord)&
Q(children__value__in=sourceValues))
|
(((Q(value__field=terms.publicAccess.id)\
&Q(value__referenceValue__in=privilegeIDs)\
&Q(value__deleteTransaction__isnull=True)\
)\
|
(Q(value__field=terms.primaryAdministrator.id)\
&Q(value__referenceValue=self)\
&Q(value__deleteTransaction__isnull=True)\
)\
)\
)\
)
if accessRecordOptional:
return f.filter(Q(accessrecord__isnull=True)|
Q(accessrecord__source__in=sources))
else:
return f.filter(Q(accessrecord__source__in=sources))
### For the specified instance filter, filter only those instances that can be found by self.
def findFilter(self, f):
privilegeIDs = [terms.findPrivilegeEnum.id, terms.readPrivilegeEnum.id, terms.registerPrivilegeEnum.id,
terms.writePrivilegeEnum.id, terms.administerPrivilegeEnum.id]
return self._securityFilter(f, privilegeIDs)
### For the specified instance filter, filter only those instances that can be found by self.
def readFilter(self, f):
privilegeIDs = [terms.readPrivilegeEnum.id,
terms.writePrivilegeEnum.id, terms.administerPrivilegeEnum.id]
return self._securityFilter(f, privilegeIDs)
### For the specified instance filter, filter only those instances that can be found by self.
def administerFilter(self, f):
privilegeIDs = [terms.administerPrivilegeEnum.id]
return self._securityFilter(f, privilegeIDs, accessRecordOptional=False)
def _canUse(self, user, publicAccessPrivileges, accessRecordPrivilegeIDs):
if user.is_staff:
return True
userInstance = Instance.getUserInstance(user)
if user.is_authenticated():
if userInstance and userInstance.isPrimaryAdministrator(self):
return True
try:
if self.accessrecord.source.value_set.filter(field=terms.publicAccess,
referenceValue__in=publicAccessPrivileges,
deleteTransaction__isnull=True).exists():
return True
return userInstance and \
self.accessrecord.source.children.filter(typeID=terms.accessRecord,
value__in=userInstance._getPrivilegeValues(accessRecordPrivilegeIDs))\
.exists()
except AccessRecord.DoesNotExist:
return False
## Instances can be read if the specified user is a super user or there is no accessRecord
## associated with this instance.
## Otherwise, the user must have a permission, public access set to read or be the primary administrator.
def _canFind(self, user):
publicAccessPrivileges = [terms.findPrivilegeEnum, terms.registerPrivilegeEnum,
terms.readPrivilegeEnum,
terms.writePrivilegeEnum]
accessRecordPrivilegeIDs = [terms.findPrivilegeEnum.id,
terms.registerPrivilegeEnum.id,
terms.readPrivilegeEnum.id,
terms.writePrivilegeEnum.id,
terms.administerPrivilegeEnum.id]
return self._canUse(user, publicAccessPrivileges, accessRecordPrivilegeIDs)
def _canRead(self, user):
publicAccessPrivileges = [terms.readPrivilegeEnum,
terms.writePrivilegeEnum]
accessRecordPrivilegeIDs = [terms.readPrivilegeEnum.id,
terms.writePrivilegeEnum.id,
terms.administerPrivilegeEnum.id]
return self._canUse(user, publicAccessPrivileges, accessRecordPrivilegeIDs)
## Instances can be written if the specified user is a super user or the user is authenticated, the
## current instance has an access record and either the user is the primary administrator of the instance
## or the user has either write or administer privilege on the instance.
def _canRegister(self, user):
publicAccessPrivileges = [terms.registerPrivilegeEnum,
terms.writePrivilegeEnum]
accessRecordPrivilegeIDs = [terms.registerPrivilegeEnum.id,
terms.writePrivilegeEnum.id,
terms.administerPrivilegeEnum.id]
return self._canUse(user, publicAccessPrivileges, accessRecordPrivilegeIDs)
## Instances can be written if the specified user is a super user or the user is authenticated, the
## current instance has an access record and either the user is the primary administrator of the instance
## or the user has either write or administer privilege on the instance.
def _canWrite(self, user):
publicAccessPrivileges = [terms.writePrivilegeEnum]
accessRecordPrivilegeIDs = [terms.writePrivilegeEnum.id,
terms.administerPrivilegeEnum.id]
return self._canUse(user, publicAccessPrivileges, accessRecordPrivilegeIDs)
## Instances can be administered if the specified user is a super user or the user is authenticated, the
## current instance has an access record and either the user is the primary administrator of the instance
## or the user has administer privilege on the instance.
def _canAdminister(self, user, userInstance=None):
publicAccessPrivileges = []
accessRecordPrivilegeIDs = [terms.administerPrivilegeEnum.id]
return self._canUse(user, publicAccessPrivileges, accessRecordPrivilegeIDs)
def checkWriteAccess(self, user, field=None):
if self.typeID==terms.accessRecord:
if not self._canAdminister(user):
raise RuntimeError("administer permission failed")
elif field in terms.securityFields:
if not self._canAdminister(user):
raise RuntimeError("administer permission failed")
else:
if not self._canWrite(user):
raise RuntimeError("write permission failed")
# Raises an error unless the specified user can write the specified value to the specified field of self.
# This handles the special case of register permission if the value is a user.
# This also handles the special case of submitting an access request to another user.
def checkWriteValueAccess(self, user, field, value):
if value:
if isinstance(value, str) and terms.isUUID(value):
value = Instance.objects.get(pk=value, deleteTransaction__isnull=True)
if isinstance(value, Instance) and \
value.typeID == terms.user and \
value._canAdminister(user) and \
field not in terms.securityFields and \
self._canRegister(user):
return
if isinstance(value, Instance) and \
value.typeID == terms.user and \
field == terms.accessRequest and \
self.typeID == terms.user:
return
self.checkWriteAccess(user, field)
def anonymousFindFilter():
sources=Instance.objects.filter(\
Q(value__field=terms.publicAccess.id)&
Q(value__referenceValue__in=[terms.findPrivilegeEnum, terms.readPrivilegeEnum])&\
Q(value__deleteTransaction__isnull=True)\
)
return (Q(accessrecord__isnull=True)|
Q(accessrecord__source__in=sources))
def securityValueFilter(self, privilegeIDs):
sourceValues = self._getPrivilegeValues(privilegeIDs)
sources=Instance.objects.filter(\
(Q(children__typeID=terms.accessRecord)&
Q(children__value__in=sourceValues))
|
(((Q(value__field=terms.publicAccess.id)\
&Q(value__referenceValue__in=privilegeIDs)\
&Q(value__deleteTransaction__isnull=True)\
)\
|
(Q(value__field=terms.primaryAdministrator.id)\
&Q(value__referenceValue=self)\
&Q(value__deleteTransaction__isnull=True)\
)\
)\
)\
)
return (Q(referenceValue__isnull=True)|
Q(referenceValue__accessrecord__isnull=True)|
(Q(referenceValue__accessrecord__source__in=sources)))
### For the specified instance filter, filter only those instances that can be found by self.
@property
def findValueFilter(self):
privilegeIDs = [terms.findPrivilegeEnum.id, terms.readPrivilegeEnum.id, terms.registerPrivilegeEnum.id,
terms.writePrivilegeEnum.id, terms.administerPrivilegeEnum.id]
return self.securityValueFilter(privilegeIDs)
### For the specified instance filter, filter only those instances that can be read by self.
@property
def readValueFilter(self):
privilegeIDs = [terms.readPrivilegeEnum.id, terms.registerPrivilegeEnum.id,
terms.writePrivilegeEnum.id, terms.administerPrivilegeEnum.id]
return self.securityValueFilter(privilegeIDs)
@property
def defaultCustomAccess(self):
return self.value_set.filter(field=terms.defaultAccess, deleteTransaction__isnull=True).exists()
def getUserInstance(user):
field = terms[TermNames.userID]
userID = user.id
if isinstance(userID, uuid.UUID):
userID = userID.hex
qs = Value.objects.filter(field=field, stringValue=userID,
deleteTransaction__isnull=True)
return qs[0].instance if len(qs) else None
@property
def user(self):
field = terms[TermNames.userID]
id = self.value_set.get(field=field, deleteTransaction__isnull=True).stringValue
return AuthUser.objects.get(pk=id)
# The following functions are used for loading scraped data into the system.
def getOrCreateTextValue(self, field, value, fieldData, transactionState):
children = self.value_set.filter(field=field,
stringValue=value['text'],
deleteTransaction__isnull=True)
if len(children):
return children[0]
else:
if 'capacity' in fieldData and fieldData['capacity'] == TermNames.uniqueValueEnum:
children = self.value_set.filter(field=field,
deleteTransaction__isnull=True)
if len(children):
return children[0].updateValue(value, transactionState)
return self.addValue(field, value, self.getNextElementIndex(field), transactionState)
def getOrCreateTranslationValue(self, field, text, languageCode, fieldData, transactionState):
children = self.value_set.filter(field=field,
stringValue=text,
languageCode=languageCode,
deleteTransaction__isnull=True)
if len(children):
return children[0]
else:
if 'capacity' in fieldData and fieldData['capacity'] == TermNames.uniqueValueEnum:
children = self.value_set.filter(field=field,
deleteTransaction__isnull=True)
if len(children):
return children[0].updateValue({'text': text, 'languageCode': languageCode}, transactionState)
return self.addValue(field, {'text': text, 'languageCode': languageCode}, self.getNextElementIndex(field), transactionState)
def getOrCreateReferenceValue(self, field, referenceValue, fieldData, transactionState):
children = self.value_set.filter(field=field,
referenceValue=referenceValue,
deleteTransaction__isnull=True)
if children.count():
return children[0]
else:
if 'capacity' in fieldData and fieldData['capacity'] == TermNames.uniqueValueEnum:
children = self.value_set.filter(field=field,
deleteTransaction__isnull=True)
if len(children):
return children[0].updateValue(referenceValue, transactionState)
return self.addReferenceValue(field, referenceValue, self.getNextElementIndex(field), transactionState)
# returns the querySet of values within self that are in the specified object field and named using
# a string within the referenceValue of the value.
def getChildrenByName(self, field, nameField, name):
return self.value_set.filter(deleteTransaction__isnull=True,
field=field,
referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__field=nameField,
referenceValue__value__stringValue__iexact=name)
# returns the querySet of values within self that are in the specified object field and named using
# a referenceValue within the referenceValue of the value.
def getChildrenByReferenceName(self, field, nameField, name):
return self.value_set.filter(deleteTransaction__isnull=True,
field=field,
referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__field=nameField,
referenceValue__value__referenceValue=name)
def getValueByReference(self, field, r):
return self.value_set.filter(deleteTransaction__isnull=True,
field=field,
referenceValue=r)
# The previous functions are used for loading scraped data into the system.
class NameList():
def __init__(self):
self.items = {}
def getNameUUIDs(self, typeID):
if typeID in self.items:
return self.items[typeID]
else:
nameFieldUUIDs = typeID._descriptors
self.items[typeID] = nameFieldUUIDs
return nameFieldUUIDs
class Value(dbmodels.Model):
id = dbmodels.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
instance = dbmodels.ForeignKey('consentrecords.Instance', db_index=True, editable=False)
field = dbmodels.ForeignKey('consentrecords.Instance', related_name='fieldValues', db_column='fieldid', db_index=True, editable=False)
stringValue = dbmodels.CharField(max_length=255, db_index=True, null=True, editable=False)
languageCode = dbmodels.CharField(max_length=10, db_index=True, null=True, editable=False)
referenceValue = dbmodels.ForeignKey('consentrecords.Instance', related_name='referenceValues', db_index=True, null=True, editable=False)
position = dbmodels.IntegerField(editable=False)
transaction = dbmodels.ForeignKey('consentrecords.Transaction', db_index=True, editable=False)
deleteTransaction = dbmodels.ForeignKey('consentrecords.Transaction', related_name='deletedValue', db_index=True, null=True, editable=True)
def __str__(self):
d = str(self.referenceValue) if self.referenceValue else self.stringValue
return "%s[%s:%s]@%s" % (str(self.instance),
str(self.field),
d,
str(self.position))
@property
def objectValue(self):
str(self.referenceValue) if self.referenceValue else self.stringValue
@property
def isDescriptor(self):
return Instance.objects.filter(parent__parent=self.instance.typeID, typeID=terms.field) \
.filter(deleteTransaction__isnull=True)\
.filter(value__field=terms.name,\
value__referenceValue=self.field,
value__deleteTransaction__isnull=True)\
.filter(value__field=terms.descriptorType,
value__deleteTransaction__isnull=True)\
.exists()
@property
def isOriginalReference(self):
# If it is not an id, then return false.
if not self.referenceValue:
return False
return self.referenceValue.parent == self.instance
def getReferenceData(self, userInfo, language=None):
d = { "id": self.id,
"instanceID" : self.referenceValue.id,
"description": self.referenceValue.getDescription(language),
"position": self.position }
privilege = self.referenceValue.getPrivilege(userInfo)
if privilege:
d["privilege"] = privilege.getDescription()
return d
# Updates the value of the specified object
# All existing facts that identify the value are marked as deleted.
def updateValue(self, newValue, transactionState):
self.markAsDeleted(transactionState)
return self.instance.addValue(self.field, newValue, self.position, transactionState);
# Updates the position of the specified object
# All existing facts that identify the value are marked as deleted.
def updateIndex(self, newIndex, transactionState):
self.markAsDeleted(transactionState)
return Value.objects.create(id=uuid.uuid4().hex, instance=self.instance,
field=self.field,
stringValue = self.stringValue,
referenceValue = self.referenceValue,
position=newIndex,
transaction=transactionState.transaction)
def markAsDeleted(self, transactionState):
self.deleteTransaction = transactionState.transaction
self.save()
def deepDelete(self, transactionState):
# If the field is special access, then make this and all of its children
# sourced to the same source as the parent of self.
if self.field == terms.specialAccess:
descendents = self.instance._descendents()
n = AccessRecord.objects.filter(id__in=descendents).delete()
if self.instance.parent and self.instance.parent.accessrecord:
AccessRecord.objects.bulk_create(\
map(lambda i: AccessRecord(id=i,source=self.instance.parent.accessrecord.source), descendents))
if self.isOriginalReference:
self.referenceValue.deepDelete(transactionState)
self.markAsDeleted(transactionState)
@property
def dataType(self):
f = Instance.objects.get(typeID=terms.field,
value__field=terms.name,
value__referenceValue=self.field,
value__deleteTransaction__isnull=True,
parent__parent=self.instance.typeID)
v = f.value_set.filter(field=terms.dataType,deleteTransaction__isnull=True)[0]
return v.referenceValue
# returns whether or not c has data to update self.
# The analysis of c varies based on the data type of self's field.
def hasNewValue(self, c):
if c == None:
raise ValueError("c is not specified")
dt = self.dataType
if dt==terms.objectEnum:
return "instanceID" in c
elif dt==terms.translationEnum:
return 'text' in c and 'languageCode' in c
else:
return 'text' in c
def checkWriteAccess(self, user):
self.instance.checkWriteValueAccess(user, self.field, self.referenceValue)
def anonymousFindFilter():
sources=Instance.objects.filter(\
Q(value__field=terms.publicAccess.id)&
Q(value__referenceValue__in=[terms.findPrivilegeEnum, terms.readPrivilegeEnum])&\
Q(value__deleteTransaction__isnull=True)\
)
return (Q(referenceValue__isnull=True)|
Q(referenceValue__accessrecord__isnull=True)|
Q(referenceValue__accessrecord__source__in=sources))
def anonymousReadFilter():
sources=Instance.objects.filter(\
Q(value__field=terms.publicAccess.id)&
Q(value__referenceValue__in=[terms.readPrivilegeEnum])&\
Q(value__deleteTransaction__isnull=True)\
)
return Q(referenceValue__isnull=True)|\
Q(referenceValue__accessrecord__isnull=True)|\
Q(referenceValue__accessrecord__source__in=sources)
class Description(dbmodels.Model):
id = dbmodels.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
instance = dbmodels.OneToOneField('consentrecords.Instance', db_index=True, editable=False)
text = dbmodels.CharField(max_length=255, db_index=True, editable=True)
def __str__(self):
return "%s" % (self.text)
# Security Sources are used on targets to determine which record contains the security rules for the target.
class AccessRecord(dbmodels.Model):
id = dbmodels.OneToOneField('consentrecords.Instance', primary_key=True, db_column='id', db_index=True, editable=False)
source = dbmodels.ForeignKey('consentrecords.Instance', related_name='sources', db_index=True, editable=True)
def __str__(self):
return str(self.id)
class Containment(dbmodels.Model):
id = dbmodels.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
ancestor = dbmodels.ForeignKey('consentrecords.Instance', related_name='descendents', db_index=True, editable=False)
descendent = dbmodels.ForeignKey('consentrecords.Instance', related_name='ancestors', db_index=True, editable=False)
def __str__(self):
return "%s -> %s" % (self.ancestor, self.descendent)
class TermNames():
# These verbs are associated with field IDs of values.
term = '_term'
configuration = '_configuration'
field = '_field'
boolean = '_boolean'
name = '_name'
dataType = '_data type'
stringEnum = '_string'
number = '_number'
datestamp = '_datestamp'
datestampDayOptional = '_datestamp (day optional)'
translationEnum = '_translation'
objectEnum = '_object'
ofKind = '_of kind'
pickObjectPath = '_pick object path'
enumerator = 'enumerator'
maxCapacity = '_max capacity'
uniqueValueEnum = '_unique value'
multipleValuesEnum = '_multiple values'
addObjectRule = '_object add rule'
pickObjectRuleEnum = '_pick one'
createObjectRuleEnum = '_create one'
descriptorType = '_descriptor type'
yes = '_yes'
no = '_no'
user = '_user'
userID = '_userID'
email = '_email'
firstName = '_first name'
lastName = '_last name'
text = '_text'
textEnum = '_by text'
firstTextEnum = '_by first text'
countEnum = '_by count'
accessRecord = '_access record'
accessRequest = '_access request'
systemAccess = '_system access'
privilege = '_privilege'
findPrivilegeEnum = '_find'
readPrivilegeEnum = '_read'
writePrivilegeEnum = '_write'
administerPrivilegeEnum = '_administer'
registerPrivilegeEnum = '_register'
group = '_group'
defaultAccess = '_default access'
specialAccess = '_special access'
custom = '_custom'
publicAccess='_public access'
primaryAdministrator='_primary administrator'
initialKinds = [
configuration, # identifies a configuration instance (contained by a uuName)
field, # identifies a field instance (contained by a configuration)
boolean, # identifies an instance of type Boolean
name, # Defines the proper name of an object.
ofKind, # identifies the type of object for a field of "object" data type.
pickObjectPath, # identifies the path to objects that are to be picked.
enumerator, # identifies an enumerator
dataType, # defines the data type of a property
maxCapacity, # defines the quantity relationship of a field within its container.
addObjectRule, # defines the rule for adding objects to a field that supports multiple objects
descriptorType, # defines how the data of this field is used to describe its instance.
user, # identifies an instance of a user.
userID, # identifies the user identifier for the user.
email, # identifies an email address.
firstName, # identifies the first name.
lastName, # identifies the last name.
accessRecord, # identifies an access record for an instance
accessRequest, # identifies an access request for an instance
privilege, # identifies a privilege associated with an access record
group, # identifies a group associated with an access record
defaultAccess,
specialAccess,
publicAccess,
primaryAdministrator,
]
class Terms():
# uuName = None
# configuration = None # identifies a configuration instance (contained by a uuName)
# field = None # identifies a field instance (contained by a configuration)
# boolean = None # identifies an instance of type Boolean
# name = None
# ofKind = None
# pickObjectPath = None
# enumerator = None
# dataType = None
# maxCapacity = None
# addObjectRule = None
# descriptorType = None
# user = None
# userID = None
# email = None
# firstName = None
# lastName = None
# translation = None
# text = None
# accessRecord = None
# accessRequest = None
# systemAccess = None
# privilege = None
# group = None
# defaultAccess = None
# specialAccess = None
# publicAccess=None # Identifies fields used to determine what privileges the public has with regard to an instance.
# primaryAdministrator=None # Identifies fields that contain the primary administrator for an instance.
#
# textEnum = None # Identifies fields where instances of the containing type
# # are described by the text of values of this field type
# # contained within the instance.
# countEnum = None # Identifies fields where instances of the containing type
# # are described by the number of values of this field type
# # contained within the instance.
#
# objectEnum = None # Identifies fields whose values are other instances.
# stringEnum = None # Identifies fields whose values are strings.
# translationEnum = None # Identifies fields whose values are translations.
#
# uniqueValueEnum = None # Identifies fields that can have a single value
# multipleValuesEnum = None # Identifies fields that can have 0 or more values
#
# pickObjectRuleEnum = None # Identifies object fields that pick an existing object when adding a value
# createObjectRuleEnum = None # Identifies object fields that create a new object when adding a value
#
# findPrivilegeEnum = None # Identifies access records that give find access to an instance.
# readPrivilegeEnum = None # Identifies access records that give read access to an instance.
# writePrivilegeEnum = None # Identifies access records that give write access to an instance.
# administerPrivilegeEnum = None # Identifies access records that give administer access to an instance.
# registerPrivilegeEnum = None # Identifies access records that give register access to an instance.
#
# # Enumeration values of the default secure term.
# defaultCustomEnum = None # Identifies instance types that have customized access by default.
#
# customAccessEnum = None # Identifies instances that have customized access as a user setting.
#
# securityFields = None
def initialize(self, transactionState=None):
try:
self.term = Terms.getUUName()
self.name = Terms.getName()
self.configuration = Terms.getOrCreateTerm(TermNames.configuration, transactionState)
self.field = Terms.getOrCreateTerm(TermNames.field, transactionState)
self.boolean = Terms.getOrCreateTerm(TermNames.boolean, transactionState)
self.ofKind = Terms.getOrCreateTerm(TermNames.ofKind, transactionState)
self.pickObjectPath = Terms.getOrCreateTerm(TermNames.pickObjectPath, transactionState)
self.enumerator = Terms.getOrCreateTerm(TermNames.enumerator, transactionState)
self.dataType = Terms.getOrCreateTerm(TermNames.dataType, transactionState)
self.maxCapacity = Terms.getOrCreateTerm(TermNames.maxCapacity, transactionState)
self.addObjectRule = Terms.getOrCreateTerm(TermNames.addObjectRule, transactionState)
self.descriptorType = Terms.getOrCreateTerm(TermNames.descriptorType, transactionState)
self.user = Terms.getOrCreateTerm(TermNames.user, transactionState)
self.userID = Terms.getOrCreateTerm(TermNames.userID, transactionState)
self.email = Terms.getOrCreateTerm(TermNames.email, transactionState)
self.firstName = Terms.getOrCreateTerm(TermNames.firstName, transactionState)
self.lastName = Terms.getOrCreateTerm(TermNames.lastName, transactionState)
self.translation = Terms.getOrCreateTerm(TermNames.translationEnum, transactionState)
self.accessRecord = Terms.getOrCreateTerm(TermNames.accessRecord, transactionState)
self.accessRequest = Terms.getOrCreateTerm(TermNames.accessRequest, transactionState)
self.systemAccess = Terms.getOrCreateTerm(TermNames.systemAccess, transactionState)
self.privilege = Terms.getOrCreateTerm(TermNames.privilege, transactionState)
self.group = Terms.getOrCreateTerm(TermNames.group, transactionState)
self.defaultAccess = Terms.getOrCreateTerm(TermNames.defaultAccess, transactionState)
self.specialAccess = Terms.getOrCreateTerm(TermNames.specialAccess, transactionState)
self.publicAccess = Terms.getOrCreateTerm(TermNames.publicAccess, transactionState)
self.primaryAdministrator = Terms.getOrCreateTerm(TermNames.primaryAdministrator, transactionState)
self.securityFields = [self.accessRecord, self.systemAccess, self.defaultAccess, self.specialAccess, self.publicAccess, self.primaryAdministrator, self.accessRequest]
except Instance.DoesNotExist: pass
except Value.DoesNotExist: pass
try: self.textEnum = Terms.getNamedEnumerator(self.descriptorType, TermNames.textEnum)
except Value.DoesNotExist: pass
try: self.firstTextEnum = Terms.getNamedEnumerator(self.descriptorType, TermNames.firstTextEnum)
except Value.DoesNotExist: pass
try: self.countEnum = Terms.getNamedEnumerator(self.descriptorType, TermNames.countEnum);
except Value.DoesNotExist: pass
try: self.objectEnum = Terms.getNamedEnumerator(self.dataType, TermNames.objectEnum);
except Value.DoesNotExist: pass
try: self.stringEnum = Terms.getNamedEnumerator(self.dataType, TermNames.stringEnum);
except Value.DoesNotExist: pass
try: self.translationEnum = Terms.getNamedEnumerator(self.dataType, TermNames.translationEnum);
except Value.DoesNotExist: pass
try: self.uniqueValueEnum = Terms.getNamedEnumerator(self.maxCapacity, TermNames.uniqueValueEnum);
except Value.DoesNotExist: pass
try: self.multipleValuesEnum = Terms.getNamedEnumerator(self.maxCapacity, TermNames.multipleValuesEnum);
except Value.DoesNotExist: pass
try: self.pickObjectRuleEnum = Terms.getNamedEnumerator(self.addObjectRule, TermNames.pickObjectRuleEnum);
except Value.DoesNotExist: pass
try: self.createObjectRuleEnum = Terms.getNamedEnumerator(self.addObjectRule, TermNames.createObjectRuleEnum);
except Value.DoesNotExist: pass
try: self.findPrivilegeEnum = Terms.getNamedEnumerator(self.privilege, TermNames.findPrivilegeEnum);
except Value.DoesNotExist: pass
try: self.readPrivilegeEnum = Terms.getNamedEnumerator(self.privilege, TermNames.readPrivilegeEnum);
except Value.DoesNotExist: pass
try: self.writePrivilegeEnum = Terms.getNamedEnumerator(self.privilege, TermNames.writePrivilegeEnum);
except Value.DoesNotExist: pass
try: self.administerPrivilegeEnum = Terms.getNamedEnumerator(self.privilege, TermNames.administerPrivilegeEnum);
except Value.DoesNotExist: pass
try: self.registerPrivilegeEnum = Terms.getNamedEnumerator(self.privilege, TermNames.registerPrivilegeEnum);
except Value.DoesNotExist: pass
try: self.defaultCustomEnum = Terms.getNamedEnumerator(self.defaultAccess, TermNames.custom);
except Value.DoesNotExist: pass
try: self.customAccessEnum = Terms.getNamedEnumerator(self.specialAccess, TermNames.custom);
except Value.DoesNotExist: pass
def getUUName():
return Instance.objects.get(typeID=F('id'),
value__deleteTransaction__isnull=True,
value__stringValue=TermNames.term)
def getName():
return Instance.objects.get(typeID=terms.term,
value__deleteTransaction__isnull=True,
value__field=F('id'),
value__stringValue=TermNames.name)
# If name is a 32 character hex string, then it is considered that ID. Otherwise,
# it is looked up by name.
def __getitem__(self, name):
try:
if terms.isUUID(name):
return Instance.objects.get(pk=name, deleteTransaction__isnull=True);
else:
return Instance.objects.get(typeID=terms.term,
value__deleteTransaction__isnull=True,
value__field = terms.name,
value__stringValue=name)
except Instance.DoesNotExist:
raise Instance.DoesNotExist('the term "%s" is not recognized' % name)
def __getattr__(self, name):
if name == 'term':
x = Terms.getUUName()
elif name == 'name':
x = Terms.getName()
elif name == 'securityFields':
x = [self.accessRecord, self.systemAccess, self.defaultAccess, self.specialAccess, self.publicAccess, self.primaryAdministrator, self.accessRequest]
elif name in ['textEnum', 'firstTextEnum', 'countEnum']:
x = Terms.getNamedEnumerator(self.descriptorType, type.__getattribute__(TermNames, name))
elif name in ['objectEnum', 'stringEnum', 'translationEnum']:
x = Terms.getNamedEnumerator(self.dataType, type.__getattribute__(TermNames, name))
elif name in ['uniqueValueEnum', 'multipleValuesEnum']:
x = Terms.getNamedEnumerator(self.maxCapacity, type.__getattribute__(TermNames, name))
elif name in ['pickObjectRuleEnum', 'createObjectRuleEnum']:
x = Terms.getNamedEnumerator(self.addObjectRule, type.__getattribute__(TermNames, name))
elif name in ['findPrivilegeEnum', 'readPrivilegeEnum', 'writePrivilegeEnum', 'administerPrivilegeEnum', 'registerPrivilegeEnum']:
x = Terms.getNamedEnumerator(self.privilege, type.__getattribute__(TermNames, name))
elif name == 'defaultCustomEnum':
x = Terms.getNamedEnumerator(self.defaultAccess, TermNames.custom);
elif name == 'customAccessEnum':
x = Terms.getNamedEnumerator(self.specialAccess, TermNames.custom);
else:
x = self[type.__getattribute__(TermNames, name)]
self.__setattr__(name, x)
return x
def getOrCreateTerm(name, transactionState):
try:
return terms[name]
except Instance.DoesNotExist:
print('new term: %s' % name)
i = terms.term.createEmptyInstance(None, transactionState)
i.addStringValue(terms.name, name, 0, transactionState)
return i
# Return the UUID for the specified Ontology object. If it doesn't exist, raise a Value.DoesNotExist.
def getNamedEnumerator(term, stringValue):
if not term:
raise ValueError("term is null")
v = Value.objects.get(instance=term, field=terms.enumerator,
deleteTransaction__isnull=True,
referenceValue__value__field=terms.name,
referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__stringValue=stringValue)
return v.referenceValue
# Return the UUID for the specified Ontology object. If it doesn't exist, raise a Value.DoesNotExist.
def getTranslationNamedEnumerator(term, stringValue, languageCode):
v = Value.objects.get(instance=term, field = terms.enumerator,
referenceValue__value__deleteTransaction__isnull=True,
referenceValue__value__field=terms.translation,
referenceValue__value__stringValue=stringValue,
referenceValue__value__languageCode=languageCode)
return v.referenceValue
def isUUID(self, s):
return re.search('^[a-fA-F0-9]{32}$', s)
terms = Terms()
class UserInfo:
def __init__(self, authUser):
self.authUser = authUser
self.instance = Instance.getUserInstance(authUser) if authUser.is_authenticated() else None
self._findValueFilter = None
self._readValueFilter = None
@property
def is_administrator(self):
return self.authUser.is_staff
@property
def is_authenticated(self):
return self.authUser.is_authenticated()
def findFilter(self, resultSet):
if not self.is_authenticated:
return resultSet.filter(Instance.anonymousFindFilter())
elif self.is_administrator:
return resultSet
elif self.instance:
return self.instance.findFilter(resultSet)
else:
return resultSet.filter(Instance.anonymousFindFilter()) # This case occurs while setting up a user.
def findValueFilter(self, resultSet):
if self._findValueFilter:
return resultSet.filter(self._findValueFilter)
elif self.is_administrator:
return resultSet
else:
if not self.is_authenticated:
self._findValueFilter = Value.anonymousFindFilter()
else:
self._findValueFilter = self.instance.findValueFilter
return resultSet.filter(self._findValueFilter)
def readValueFilter(self, resultSet):
if self._readValueFilter:
return resultSet.filter(self._readValueFilter)
elif self.is_administrator:
return resultSet
else:
if not self.is_authenticated:
self._readValueFilter = Value.anonymousReadFilter()
else:
self._readValueFilter = self.instance.readValueFilter
return resultSet.filter(self._readValueFilter)
def readFilter(self, resultSet):
if not self.is_authenticated:
return resultSet.filter(Instance.anonymousFindFilter())
elif self.is_administrator:
return resultSet
else:
return self.instance.readFilter(resultSet)
def administerFilter(self, resultSet):
if not self.is_authenticated:
return [] # If not authenticated, then return an empty iterable.
elif self.is_administrator:
return resultSet
else:
return self.instance.administerFilter(resultSet)
|
import csv
import DBN
import matplotlib.pyplot as plt
def getData(inp="../ABP_data_11traces_1min/dataset7.txt"):
f = file(inp)
lines = f.readlines()
data = (map(float,l.split(" ")[:3]) for l in lines)
# end = lines.index('\n')
# obs = lines[1:end]
# data = map(lambda x: tuple(map(float,x.split(','))),obs)
return data
def main():
data = list(getData())
bayesNet = DBN.DBN()
dataOut = []
count = 0
for each in data:
# for i in range(1000):
print("timestep: " + str(count) + " Observation: " + str(each))
# if (bayesNet.observe(each) != False):
bayesNet.observe(each)
bayesNet.elapseTime()
dataOut.append(bayesNet.getStats())
count += 1
DiaObserved = [d["dia_bp"][0] for d in dataOut]
MeanObserved = [d["mean_bp"][0] for d in dataOut]
SysObserved = [d["sys_bp"][0] for d in dataOut]
BagPressure = [d["bag_pressure"][0] for d in dataOut]
DiaObservedErr = [d["dia_bp"][1] for d in dataOut]
MeanObservedErr = [d["mean_bp"][1] for d in dataOut]
SysObservedErr = [d["sys_bp"][1] for d in dataOut]
BagPressureErr = [d["bag_pressure"][1] for d in dataOut]
DiaData = map(lambda x: x[2], data)
MeanData = map(lambda x: x[0], data)
SysData = map(lambda x: x[1], data)
l = list(range(31))
plt.plot(l,DiaData)
plt.plot(l,DiaObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(DiaObserved,DiaObservedErr)),list(x[0] + x[1] for x in zip(DiaObserved,DiaObservedErr)),interpolate=True)
plt.plot(l,MeanData)
plt.plot(l,MeanObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(MeanObserved,MeanObservedErr)),list(x[0] + x[1] for x in zip(MeanObserved,MeanObservedErr)),interpolate=True)
plt.plot(l,SysData)
plt.plot(l,SysObserved)
plt.fill_between(l,list(x[0] - x[1] for x in zip(SysObserved,SysObservedErr)),list(x[0] + x[1] for x in zip(SysObserved,SysObservedErr)),interpolate=True)
# plt.plot(l,BagPressure)
# plt.fill_between(l,list(x[0] - x[1] for x in zip(BagPressure,BagPressureErr)),list(x[0] + x[1] for x in zip(BagPressure,BagPressureErr)),interpolate=True)
plt.show()
# return dataOut
if __name__ == "__main__":
main()
|
# https://helloacm.com/teaching-kids-programming-recursive-depth-first-search-algorithm-to-count-the-surrounded-islands/
# https://binarysearch.com/problems/Surrounded-Islands
# MEDIUM, DFS
class Solution:
def solve(self, matrix):
if not matrix:
return 0
rows, cols = len(matrix), len(matrix[0])
def dfs(r, c):
if r < 0 or c < 0 or r >= rows or c >= cols:
return
if not matrix[r][c]:
return
matrix[r][c] = 0
for dx, dy in ((0, 1), (0, -1), (1, 0), (-1, 0)):
dfs(r + dx, c + dy)
for r in range(rows):
if matrix[r][0]:
dfs(r, 0)
if matrix[r][cols - 1]:
dfs(r, cols - 1)
for c in range(cols):
if matrix[0][c]:
dfs(0, c)
if matrix[rows - 1][c]:
dfs(rows - 1, c)
ans = 0
for r in range(1, rows - 1):
for c in range(1, cols - 1):
if matrix[r][c]:
dfs(r, c)
ans += 1
return ans
|
import random
import re
from flask import session, jsonify, current_app, make_response, request
from ihome.utils.response_code import RET
from . import api
from ihome.utils.captcha.captcha import captcha
from ihome import redis_cli, constants, db
from ihome.models import User
from ihome.utils.sms import CCP
@api.route("/imagecode/<image_uuid>", methods=['GET'])
def generate_image_code(image_uuid):
"""
生成图片验证码
1.导入captcha工具包
2.生成图片验证码,获取文本和图片
3.存入redis数据库中,存文本
4.返回前端图片,把响应类型改成image/jpg
:param image_code_id:
:return:
"""
text, image = captcha.generate_captcha()
# 在redis中保存图片验证码文本
try:
redis_cli.setex('ImageCode_' + image_uuid, constants.IMAGE_CODE_REDIS_EXPIRES, text)
except Exception as e:
# 使用应用上下文对象,记录项目日志
current_app.logger.error(e)
# 返回错误信息,前后端数据交互格式应该使用json
return jsonify(errno=RET.DBERR, errmsg='数据保存失败')
resp = make_response(image)
resp.headers['Content-Type'] = 'image/jpg'
return resp
@api.route("/smscode/<mobile>", methods=['GET'])
def send_sms_code(mobile):
"""
发送短信接口
1. 获取参数, 查询字符串方式, 输入的图片验证码和uuid
2. 检查参数的完整性
3. 正则校验手机号码格式
4. 从redis中取出真实的图片验证码
5. 判断redis中的获取结果
6. 先把redis中的图片验证码内容删除
7. 比较图片验证码是否正确
8. 生成短信的随机码, 存入redis
9. 调用云通信发送短信
10. 获取发送结果
:param mobile:
:return:
"""
# 获取查询字符串参数
image_code = request.args.get('text')
uuid = request.args.get('id')
# 检查参数完整性
if not all([mobile, image_code, uuid]):
return jsonify(errno=RET.PARAMERR, errmsg='缺少必传参数')
# 校验手机格式
if not re.match(r'1[3-9]\d{9}$', mobile):
return jsonify(errno=RET.PARAMERR, errmsg='手机格式错误')
# 从redis中取出真实的图片验证码文本内容
try:
server_image_code = redis_cli.get('ImageCode_' + uuid)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='查询数据失败')
# 判断获取结果
if not server_image_code:
return jsonify(errno=RET.DATAERR, errmsg='无效的id')
# 删除图片验证码
try:
redis_cli.delete('ImageCode_' + uuid)
except Exception as e:
current_app.logger.error(e)
# 比较图片验证码
if image_code.lower() != server_image_code.lower():
return jsonify(errno=RET.DATAERR, errmsg='验证码错误')
# 检验手机号是否注册
try:
user = User.query.filter_by(mobile=mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='数据库异常')
if user:
return jsonify(errno=RET.DATAEXIST, errmsg='手机号已注册')
# 生成6位短信验证码
sms_code = '%06d' % random.randint(1, 999999)
current_app.logger.info(sms_code)
# 存入redis
try:
pl = redis_cli.pipeline()
pl.setex('SMSCode_' + mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)
pl.execute()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='保存数据失败')
# 调用云通讯发送短信
try:
sms = CCP()
sms.send_template_sms(mobile, [sms_code, 5], 1)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg='发送短信异常')
return jsonify(errno=RET.OK, errmsg='发送成功')
@api.route("/users", methods=['POST'])
def register():
"""
:return:
"""
# request.json.get() 获取单个json数据
# 获取整个json数据包
json_data = request.get_json()
if not json_data:
return jsonify(errno=RET.PARAMERR, errmsg='参数错误')
# 从json数据包中挨个提取参数
mobile = json_data.get('mobile')
sms_code = json_data.get('sms_code')
password = json_data.get('password')
if not all([mobile, sms_code, password]):
return jsonify(errno=RET.PARAMERR, errmsg='缺少必传参数')
# 校验手机号
if not re.match(r'1[3-9]\d{9}$', mobile):
return jsonify(errno=RET.PARAMERR, errmsg='手机格式错误')
# 从redis中取出真实的短信验证码
try:
server_sms_code = redis_cli.get('SMSCode_' + mobile)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='查询数据失败')
# 判断查询结果
if not server_sms_code:
return jsonify(errno=RET.DATAERR, errmsg='数据失效')
# 先比较 再删除
if server_sms_code != str(sms_code):
return jsonify(errno=RET.DATAERR, errmsg='短信验证码错误')
try:
redis_cli.delete('SMSCode_' + mobile)
except Exception as e:
current_app.logger.error(e)
# 实例化模型类对象, 保存用户信息
user = User()
user.mobile = mobile
user.name = mobile
user.password = password
try:
db.session.add(user)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg='用户注册失败')
# 缓存用户信息,使用session对象,存到redis中
session['user_id'] = user.id
session['name'] = mobile
session['mobile'] = mobile
# 返回结果,
# data表示用户数据,是注册业务完成后,返回注册结果相关的附属信息
return jsonify(errno=RET.OK, errmsg='注册成功', data=user.to_dict())
@api.route("/sessions", methods=['POST'])
def login():
"""
用户登录:
:return:
"""
# 获取参数, json格式
json_data = request.get_json()
if not json_data:
return jsonify(errno=RET.PARAMERR, errmsg='参数错误')
# 从json数据包中提取数据
mobile = json_data.get('mobile')
password = json_data.get('password')
if not all([mobile, password]):
return jsonify(errno=RET.PARAMERR, errmsg='缺少必传参数')
# 校验手机号
if not re.match(r'1[3-9]\d{9}$', mobile):
return jsonify(errno=RET.PARAMERR, errmsg='手机格式错误')
# 查询数据库
try:
user = User.query.filter(User.mobile==mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg='查询失败')
if not user or not user.check_password(password):
return jsonify(errno=RET.DATAERR, errmsg='用户名或密码错误')
# 缓存用户信息在redis中
session['user_id'] = user.id
session['mobile'] = mobile
session['name'] = user.name # 用户有可能修改用户名, 默认用户名
# 返回响应
return jsonify(errno=RET.OK, errmsg='OK', data={'user_id': user.id})
|
import tensorflow as tf
from sklearn import datasets
import numpy as np
np.random.seed(0)
def get_batch(X, size):
a = np.random.choice(len(X), size, replace=False)
return X[a]
class AutoEncoder():
def __init__(self):
self.input = tf.placeholder(tf.float32, [None, 4])
self.encode = self._encode()
self.decode = self._decode()
self.loss = tf.reduce_mean(tf.square(tf.subtract(self.input, self.decode)))
self.train_op = tf.train.AdamOptimizer(1e-4).minimize(self.loss)
self.saver = tf.train.Saver(max_to_keep=3)
def _encode(self):
return tf.layers.dense(self.input, 2)
def _decode(self):
return tf.layers.dense(self.input, 4)
def train(self, data):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1, 20001):
sess.run(self.train_op, feed_dict={self.input: get_batch(data, 100)})
if i % 500 == 0:
loss_val = sess.run(self.loss, feed_dict={self.input: get_batch(data, 100)})
print("step = {},loss = {:.5f}".format(i, loss_val))
self.saver.save(sess, 'model/enocder', global_step=i, write_meta_graph=False)
self.saver.save(sess, 'model/enocder', global_step=i)
def test(self, data):
with tf.Session() as sess:
saver = tf.train.import_meta_graph('model/model.ckpt-20000.meta')
saver.restore(sess, tf.train.latest_checkpoint("model/"))
hidden, reconstructed = sess.run([self.encode, self.decode], feed_dict={self.input: data})
print('input', data)
print('compressed', hidden)
print('reconstructed', reconstructed)
data = datasets.load_iris().data
encoder = AutoEncoder()
# encoder.train(data)
encoder.test(data[0:5, ])
|
from keras.datasets import cifar10
from sklearn.preprocessing import LabelBinarizer
import numpy as np
from keras.utils import np_utils
#from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Input
from keras.models import Model
from keras.optimizers import SGD,RMSprop,adam
import os
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
class ShallowCNet():
@staticmethod
def build(width, height, depth, classes,weights_path=None):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
#model = Sequential()
inputShape = (width, height, depth)
# first CONV => RELU
In_=Input(inputShape)
c1=Conv2D(32, (3, 3), padding="same", input_shape=inputShape)(In_)
a1=Activation("relu")(c1)
# first CONV => RELU => POOL
c2=Conv2D(32, (3, 3), padding="same")(a1)
a2=Activation("relu")(c2)
p1=MaxPooling2D(pool_size=(2, 2))(a2)
# first (and only) set of FC => RELU => Dropout layers
d1=Flatten()(p1)
d2=Dense(512)(d1)
d3=Activation("relu")(d2)
d4=Dropout(0.5)(d3)
# softmax classifier
output=Dense(classes, activation='softmax')(d4)
model=Model(inputs=In_,outputs=output)
if weights_path:
model.load_weights(weights_path)
# return the constructed network architecture
return model
print("[INFO] loading CIFAR10 data")
((trainX,trainY),(testX,testY))=cifar10.load_data()
trainX=trainX.astype("float")/255.0
testX=testX.astype("float")/255.0
print(trainX.shape)
print(testX.shape)
np.random.seed(123)
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(trainY, 10)
Y_test = np_utils.to_categorical(testY, 10)
print(Y_test[0:5])
print(Y_test.argmax(axis=1))
labelNames=["airplane","automobile","bird","cat","deer","dog",
"frog","horse","ship","truck"]
# # evaluate the network
# print("[INFO] evaluating network...")
cifar_model=ShallowCNet.build(width=32, height=32, depth=3, classes=10,weights_path=r'cifar\cifar_weights.h5')
predictions = cifar_model.predict(testX, batch_size=64)
print(classification_report(Y_test.argmax(axis=1),predictions.argmax(axis=1), target_names=labelNames))
|
import jsonpickle
import json
import xlwings as xw
DEBUG = False
SMOOTH_E = 0.1
testdata_results_file = open("data/intrusion.testlabels.categorized", "r")
testdata_results_str = testdata_results_file.read()
testdata_results = testdata_results_str.split("\n")
testdata_results = testdata_results[:-1]
print("Reading attacktypes from file...")
attacktypes_file = open("data/attacktypes.list", "r")
attacktypes_str = attacktypes_file.read()
attacktypes = attacktypes_str.split("\n")
attacktypes = attacktypes[:-1]
attacktypes_set = []
for idx, item in enumerate(attacktypes):
item = item.split(" ")
if item[1] not in attacktypes_set:
attacktypes_set.append(item[1])
attacktypes[idx] = item
attacktypes_file.close()
print("Loading processed test data")
testdata_processed_file = open("processed_data/intrusion.kmeanstestdataprocessed", "r")
testdata = json.loads(testdata_processed_file.read())
testdata_processed_file.close()
print("TESTDATA loaded...")
def loadClassifier(classifier_str):
print("About to load dtree")
traindata_processed_file = open(classifier_str, "r")
trained_dtree = jsonpickle.decode(traindata_processed_file.read())
traindata_processed_file.close()
print("trained dtree loaded")
return trained_dtree
def calc_and_save(tree_str, confusion_matrix, offset):
correct = 0
for e in attacktypes_set:
correct = correct + confusion_matrix[e][e]
print(((correct * 1.0) / len(testdata)) * 100)
score = (((correct * 1.0) / len(testdata)) * 100)
wb = xw.Book('part2_results.xlsx')
sht = wb.sheets['Sheet1']
sht.range('A' + str(1 + offset)).value = "Name: "
sht.range('B' + str(1 + offset)).value = tree_str
for i in range(offset + 4, offset + 4 + len(attacktypes_set)):
num = 'A' + str(i)
sht.range(num).value = attacktypes_set[i - 4 - offset]
alphas = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
for i in range(1, 1 + len(attacktypes_set)):
num = alphas[i] + str(3 + offset)
sht.range(num).value = attacktypes_set[i - 1]
for i in range(offset + 4, offset + 4 + len(attacktypes_set)):
for j in range(1, 1 + len(attacktypes_set)):
num = alphas[j] + str(i)
sht.range(num).value = confusion_matrix[attacktypes_set[i-4-offset]][attacktypes_set[j-1]]
sht.range('A' + str(10 + offset)).value = "Accuracy:"
sht.range('B' + str(10 + offset)).value = score
offset += 13
return offset
def doTest(classifier):
confusion_matrix = {}
for i in attacktypes_set:
confusion_matrix[i] = {}
for j in attacktypes_set:
confusion_matrix[i][j] = 0
for i in range(len(testdata)):
prob_class = {}
for e in attacktypes_set:
prob = 1.0
attrs = testdata[i]
for j in range(len(attrs)):
# if(i > 1117000):
# print("~~~~~~~~~~~~~")
# print ("e", e)
# print ("j", j)
# print ("attrs", attrs)
# print("classifier[0][e][j]", classifier[0][str(e)][j])
if(str(attrs[j]) in classifier[0][e][j].keys()):
# print("~~~~~~~~~~~~~~~~~~~~~~~~")
# print((classifier[0][e][j][str(attrs[j])] * 1.0))
# print((classifier[1][e][j] * 1.0))
prob = prob * (((classifier[0][e][j][str(attrs[j])] * 1.0) + SMOOTH_E) / ((classifier[1][e][j] * 1.0) + (SMOOTH_E * len(classifier[0][e][j].keys()))))
#print(prob)
else:
prob = prob * ((SMOOTH_E * 1.0) / (classifier[1][e][j] + (SMOOTH_E * len(classifier[0][e][j].keys()))))
prob = prob * ((classifier[2][str(e)] * 1.0) / classifier[3])
prob_class[str(e)] = prob
cur_max = -1.0
max_class = None
for k in prob_class.keys():
if(prob_class[k] > cur_max):
cur_max = prob_class[k]
max_class = k
result = max_class
confusion_matrix[testdata_results[i]][result] = confusion_matrix[testdata_results[i]][result] + 1
#print(prob_class)
if(i % 1000 == 0):
print("Done testing " + str(i) + " instances...")
print(confusion_matrix)
return confusion_matrix
offset = 0
# Part 1 Testing
classifier_str = "part_2/processed_classifier/intrusion.trainedclassifier"
c_matrix = doTest(loadClassifier(classifier_str))
offset = calc_and_save(classifier_str, c_matrix, offset)
|
def properNounCorrection(noun):
# pretty straightforward . slice first word make it upper. Make everything else lower. Concat !
return noun[0].upper()+noun[1:].lower()
|
from django.urls import path
from . import views
app_name = "main"
urlpatterns = [
path('',views.HomeView.as_view(), name="home"),
path('lists/', views.MyListView.as_view(), name= "mylist"),
path('createlist/', views.CreateList.as_view(), name = "create"),
path('<int:pk>/deletelist/', views.DeleteList.as_view(), name = "delete"),
path('updatelist/<int:pk>', views.UpdateList.as_view(), name = "update"),
path('list_detail/<int:pk>', views.DetailList.as_view(), name="detail")
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'petro-ew'
"""
52-1. ================================
добавить в класс car бензобак
максимальный объем бензобака 50 литров
добавить метод для заправки бензобака
аргументом которого будет число с плавуещей точкой
которое обознаяает количество заливаемого топлива
или строку "FUll" если бензобак надо заправить полный
Добавить проверку наличия бензина в метод мув форвард
При создании экземпляра - бензобак пустой )
добавить заправочную станцию )
"""
class Vehicle: #Базовый класс
def __init__(self):
print("konstruktor bazovogo klassa Vehicle")
def move_forvard(self):
print("look forward")
print ("if none == MOVE FORVARD!")
#print("Move FORWARD!")
def move_back(self):
print("Move BACKWARD")
class Car(Vehicle):
def __init__(self):
print("konstructor proizvodnogo klassa Car ot Vehicle")
self.benz = 0
super().__init__()
def move_forvard(self):
print("metod proverit benzin")
if car.benz > 0:
print("zevesti esli ne zavedena") # Vehicle.move_forvard(self)
super().move_forvard()
else:
print("zapravte mashinu!")
def zapravka(self, benzto):
print("metod zapravki")
if benzto == "full":
print("ya edu na Yamaika!")
benzto = 50 - self.benz
print("zaprivilis na ", benzto)
if self.benz == 0:
print("Nado zapravitsua!")
#exit()
if self.benz < 50:
#print("zapravili do 50")
self.benz = self.benz + benzto
print("zapravilis na ", self.benz)
else:
print("Polniy bak zapravka ne treba")
if self.benz > 50:
print("Nelsya zalit bolshe 50 L!")
self.benz = 50
def youbenz(self):
print("benzina v bake: ", self.benz)
car = Car()
car.youbenz()
car.zapravka(60)
car.youbenz()
#car.zapravka("full")
car.youbenz()
car.move_forvard()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'petro-ew'
"""
52-1. ================================
добавить в класс car бензобак
максимальный объем бензобака 50 литров
добавить метод для заправки бензобака
аргументом которого будет число с плавуещей точкой
которое обознаяает количество заливаемого топлива
или строку "FUll" если бензобак надо заправить полный
Добавить проверку наличия бензина в метод мув форвард
При создании экземпляра - бензобак пустой )
добавить заправочную станцию )
"""
class Vehicle: #Базовый класс
def __init__(self):
print("konstruktor bazovogo klassa Vehicle")
def move_forvard(self):
print("look forward")
print ("if none == MOVE FORVARD!")
#print("Move FORWARD!")
def move_back(self):
print("Move BACKWARD")
class Car(Vehicle):
def __init__(self):
print("konstructor proizvodnogo klassa Car ot Vehicle")
self.benz = 0
super().__init__()
def move_forvard(self):
print("metod proverit benzin")
if car.benz > 0:
print("zevesti esli ne zavedena") # Vehicle.move_forvard(self)
super().move_forvard()
else:
print("zapravte mashinu!")
def zapravka(self, benzto):
print("metod zapravki")
if benzto == "full":
print("ya edu na Yamaika!")
benzto = 50 - self.benz
print("zaprivilis na ", benzto)
if self.benz == 0:
print("Nado zapravitsua!")
#exit()
if self.benz < 50:
#print("zapravili do 50")
self.benz = self.benz + benzto
print("zapravilis na ", self.benz)
else:
print("Polniy bak zapravka ne treba")
if self.benz > 50:
print("Nelsya zalit bolshe 50 L!")
self.benz = 50
def youbenz(self):
print("benzina v bake: ", self.benz)
car = Car()
car.youbenz()
car.zapravka(60)
car.youbenz()
#car.zapravka("full")
car.youbenz()
car.move_forvard()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
import torchvision
from models.losses import FocalLoss, TripletLoss
from models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from models.decode import mot_decode
from models.utils import _sigmoid, _tranpose_and_gather_feat
from utils.post_process import ctdet_post_process
from .base_trainer import BaseTrainer
def myphi(x,m):
x = x * m
return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
x**8/math.factorial(8) - x**9/math.factorial(9)
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4, phiflag=True):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features,out_features))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.phiflag = phiflag
self.m = m
self.mlambda = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
x = input # size=(B,F) F is feature len
w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
xlen = x.pow(2).sum(1).pow(0.5) # size=B
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
cos_theta = x.mm(ww) # size=(B,Classnum)
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
cos_theta = cos_theta.clamp(-1,1)
if self.phiflag:
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
k = (self.m*theta/3.14159265).floor()
n_one = k*0.0 - 1
phi_theta = (n_one**k) * cos_m_theta - 2*k
else:
theta = cos_theta.acos()
phi_theta = myphi(theta,self.m)
phi_theta = phi_theta.clamp(-1*self.m,1)
cos_theta = cos_theta * xlen.view(-1,1)
phi_theta = phi_theta * xlen.view(-1,1)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
class AngleLoss(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss, self).__init__()
self.gamma = gamma
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
self.it += 1
cos_theta,phi_theta = input
target = target.view(-1,1) #size=(B,1)
index = cos_theta.data * 0.0 #size=(B,Classnum)
idx = index.scatter(1,target.data.view(-1,1),1)
idx = idx.bool()
idx = Variable(idx)
#print(index)
self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it ))
output = cos_theta*1.0 #size=(B,Classnum)
#out = output
#print(output)
output[idx] = cos_theta[idx] - cos_theta[idx]*(1.0+0)/(1+self.lamb) + phi_theta[idx]*(1.0+0)/(1+self.lamb)
#print(output)
#output[idx] += phi_theta[idx]*(1.0+0)/(1+self.lamb)
#print(output)
#output[index] = output[index] - output[index]*(1.0+0)/(1+self.lamb) + phi_theta[index]*(1.0+0)/(1+self.lamb)
logpt = F.log_softmax(output)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
loss = -1 * (1-pt)**self.gamma * logpt
#print(loss)
loss = loss.mean()
#print(loss)
return loss
class MotLoss(torch.nn.Module):
def __init__(self, opt):
super(MotLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.opt = opt
self.emb_dim = opt.reid_dim
self.nID = opt.nID
self.classifier = nn.Linear(self.emb_dim, self.nID)
#self.classifier = AngleLinear(self.emb_dim,self.nID)
self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
#self.IDLoss = AngleLoss()
#self.TriLoss = TripletLoss()
self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
self.s_det = nn.Parameter(-1.85 * torch.ones(1))
self.s_id = nn.Parameter(-1.05 * torch.ones(1))
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss, id_loss = 0, 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
'''if opt.id_weight > 0:
#id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = output['id_feature']
index = output['index']
reg_mask = batch['reg_mask'].view(-1)[index]
gt_id = batch['ids'].view(-1)[index]
#print(id_head.size())
id_head = id_head[reg_mask> 0].contiguous()
#print(id_head.size())
id_head = self.emb_scale * F.normalize(id_head)
#id_head_t = id_head.transpose(1,0)
#length = id_head * id_head_t
id_target = gt_id[reg_mask > 0]
#print(id_target.size(),id_target)
id_output = self.classifier(id_head)
id_loss += self.IDLoss(id_output, id_target)'''
if opt.id_weight > 0:
# id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = output['id_feature']
# print(id_head.size())
id_head = id_head[batch['reg_mask'] > 0].contiguous()
# print(id_head.size())
id_head = self.emb_scale * F.normalize(id_head)
# id_head_t = id_head.transpose(1,0)
# length = id_head * id_head_t
id_target = batch['ids'][batch['reg_mask'] > 0]
# print(id_target.size(),id_target)
id_output = self.classifier(id_head).contiguous()
id_loss += self.IDLoss(id_output, id_target)
det_loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss
loss = torch.exp(-self.s_det) * det_loss + torch.exp(-self.s_id) * id_loss + (self.s_det + self.s_id)
loss *= 0.5
#loss = det_loss
#print(loss, hm_loss, wh_loss, off_loss, id_loss)
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss, 'id_loss': id_loss}
return loss, loss_stats
class MotTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(MotTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss', 'id_loss']
loss = MotLoss(opt)
return loss_states, loss
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = mot_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]
|
import argparse
import att_event_engine.att as att
import csv
import datetime
import dateutil.parser
def valid_date(s):
try:
dateVal = dateutil.parser.parse(s)
return s
except:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--asset", help="The id of the asset who's data needs to be exported", required=True)
parser.add_argument("-u", "--user", help="The username to connect with", required=True)
parser.add_argument("-p", "--password", help="The password for the user", required=True)
parser.add_argument("-o", "--output", help="The name of the output file, default = export.csv", default='export.csv')
parser.add_argument("-s", "--start", help="start date of the query (default = 1900-1-1", default="1900-1-1", type=valid_date)
parser.add_argument("-e", "--end", help="end date of the query (default = now()", default=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), type=valid_date)
args = parser.parse_args()
iot = att.HttpClient()
iot.connect_api(args.user, args.password, "api.allthingstalk.io")
done = False
page = 0
with open(args.output, 'wb') as fp:
writer = csv.writer(fp) # , delimiter=';'
while not done:
res = iot.get_history(args.asset, args.start, args.end, page)
writer.writerows([[rec["at"], rec["data"]] for rec in res['data'] if rec])
page += 1
done = 'next' not in res['links']
print ("done") |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ResourceIdentityType',
'SelectorType',
'TargetReferenceType',
]
class ResourceIdentityType(str, Enum):
"""
String of the resource identity type.
"""
NONE = "None"
SYSTEM_ASSIGNED = "SystemAssigned"
class SelectorType(str, Enum):
"""
Enum of the selector type.
"""
PERCENT = "Percent"
RANDOM = "Random"
TAG = "Tag"
LIST = "List"
class TargetReferenceType(str, Enum):
"""
Enum of the Target reference type.
"""
CHAOS_TARGET = "ChaosTarget"
|
import json
import unittest
import os
import requests
import sqlite3
import re
# FetchFlights.py
# This peice of code calls two API's to gather data and store it in thee different tables: flights, locals, and COVID under the Data.db database
# The purpose of this peice of code is to fetch the arrivals for the day from the Detroit Airport (400 Arrivals Daily), find where they came from, and then get data about coronavirus positivity from those origins
''' FUNCTIONS '''
# PURPOSE: Figure out if the data.db database has any tables in it and if so how much data is in
# those tables to know how many flights from the arrivals board to offset
# INPUT: The cursor to the open database
# OUTPUT: Record Quantity
def databaseValidation(cur):
try:
counta = 0
cur.execute("SELECT * FROM Flights")
for flight in cur:
counta += 1
return counta
# We need to create tables from scratch if so
except:
return 0
# PURPOSE: Get connected to database
# INPUT: The database name
# OUTPUT: A cursor and connection
def setUpDatabase(db_name):
path = os.path.dirname(os.path.abspath(__file__))
conn = sqlite3.connect(path+'/'+db_name)
cur = conn.cursor()
return cur, conn
# PURPOSE: Retrive additional context for the origin airport of any given flight
# INPUT: The airport code of any given airport
# OUTPUT: A database ready tuple of the airport code, airport longitude, airport latitude,
# airport state, and airport country to be used for visulizations and to get COVID-19 data
def getAirportData(code):
print("Getting Airport Info from FlightAware about " + code)
data = requests.get("http://traveltimeiosapp:b34cb08f23579a0812280da25b76aee4e47bac16@flightxml.flightaware.com/json/FlightXML3/AirportInfo?airport_code=" + code)
try:
jsonData = json.loads(data.text)["AirportInfoResult"]
return (str(jsonData.get("airport_code", code)), float(jsonData.get("longitude", 0.0)), float(jsonData.get("latitude", 0.0)), str(jsonData.get("state", "--")), str(jsonData.get("city", "--")), str(jsonData.get("country_code", "--")))
except:
print("Uh-Oh... we are having some trouble connecting to FlightAware")
return (code, 0.0, 0.0, "--", "--", "--")
# PURPOSE: Get 25 Arrivals from FlightAware Airport Arrivals Boards API
# INPUT: This function takes in how many recent arrivals to offset from the most recent touchdown
# (this is rapidly changing with every landing)
# OUTPUT: returns the arrivals board for the DTW airport 25 flights at a time
def flightBoardDTW(offset):
print("Getting 25 Flights from FlightAware Arrivals Board API")
data = requests.get("http://traveltimeiosapp:b34cb08f23579a0812280da25b76aee4e47bac16@flightxml.flightaware.com/json/FlightXML3/AirportBoards?howMany=25&offset=" + str(offset) + "&airport_code=KDTW&include_ex_data=true&type=arrivals&filter=airline")
try:
return json.loads(data.text)["AirportBoardsResult"]["arrivals"]["flights"]
except:
print("Uh-Oh... we are having some trouble connecting to FlightAware")
return None
# PURPOSE: Get coronavirus statistics for a particular state
# INPUT: a particular state
# OUTPUT: basic coronavirus quantities from yesterday or last Friday if weekend
def getCoronaData(state):
print("Getting COVID data for " + state)
data = requests.get("https://localcoviddata.com/covid19/v1/cases/covidTracking?state=" + state + "&daysInPast=1")
try:
dataPack = json.loads(data.text)["historicData"][0]
return (state, int(dataPack.get("peoplePositiveNewCasesCt", 0)), int(dataPack.get("peopleNegativeNewCt", 0)), int(dataPack.get("peopleDeathCt", 0)))
except:
print("Uh-Oh... we are having some trouble connecting to Muelsoft COVID-19 Data")
return (state, 0, 0, 0)
# Runner of program that gets flights, loops through them, and calls remaining functions to complete file's mission
def main():
manifest = [] # list of airports in db
cur, conn = setUpDatabase("data.db")
validateDatabaseCount = databaseValidation(cur)
print("Starting at " + str(validateDatabaseCount) + "ith position in Database\n")
if(validateDatabaseCount == 0):
cur.execute("CREATE TABLE IF NOT EXISTS Flights (flightNumber TEXT PRIMARY KEY, origin TEXT, PAXCount INTEGER)")
cur.execute("CREATE TABLE IF NOT EXISTS Locals (code TEXT PRIMARY KEY, lng DOUBLE, lat DOUBLE, state TEXT, cityName TEXT, countryCode TEXT)")
cur.execute("CREATE TABLE IF NOT EXISTS Corona (state TEXT PRIMARY KEY, peoplePositiveNewCasesCt INTEGER, peopleNegativeNewCt INTEGER, peopleDeathCt INTEGER)")
else:
#Find which airports we already have in DB
cur.execute("SELECT code FROM Locals")
for code in cur.fetchall():
manifest.append(code[0])
grabFlights = flightBoardDTW(validateDatabaseCount+2)
for flight in grabFlights:
# Convience Print to Acknoledge Processing of the Flight
print("\nProcessing: " + flight["ident"])
# Inject flight into database accounting for any arrivals that may land while this is being run and push the departures list back
cur.execute("INSERT OR IGNORE INTO Flights (flightNumber, origin, PAXCount) VALUES (?, ?, ?) ", (str(flight["ident"]), str(flight["origin"]["code"]), (int(flight.get("seats_cabin_business", "0")) + int(flight.get("seats_cabin_coach", "0")))))
#if airport code is novel, call api for more data and then
if (str(flight["origin"]["code"]) not in manifest):
#Get & Store Data about the Airport from the flight
airportData = getAirportData(str(flight["origin"]["code"]))
cur.execute("INSERT OR IGNORE INTO Locals (code, lng, lat, state, cityName, countryCode) VALUES (?, ?, ?, ?, ?, ?) ", airportData)
#Get Corona Data for all US States that the flight originiated from. International Origins are not supported by the API and are injected into database
if(airportData[5] == "US"):
cur.execute("INSERT OR IGNORE INTO Corona (state, peoplePositiveNewCasesCt, peopleNegativeNewCt, peopleDeathCt) VALUES (?, ?, ?, ?) ", getCoronaData(airportData[3]))
else:
print("Found Data for " + str(flight["origin"]["code"]) + " in DB. Bypassing Call to save API Limit")
conn.commit()
conn.close()
main() |
"""
index()
The index() method returns the index of a substring inside the string (if found).
If the substring is not found, it raises an exception.
"""
# Example 1
sentence = 'Python programming is fun.'
result = sentence.index('is fun')
print("Substring 'is fun':", result)
# result = sentence.index('Java')
# print("Substring 'Java':", result)
# Example 2
sentence = 'Python programming is fun.'
print(sentence.index('ing', 10))
print(sentence.index('g is', 10, -4))
print(sentence.index('fun', 7, 18)) |
from relationalai import LocalConnection
import unittest
class TestListSource(unittest.TestCase):
def testListSource(self):
conn = LocalConnection(dbname="python-sdk")
conn.debug_level = 1
self.assertTrue(conn.create_database(overwrite=True))
self.assertTrue(conn.list_source())
if __name__ == '__main__':
unittest.main()
|
messages = ["Hello", "How are you?", "What're you doing later?"]
sent_messages = list()
def send_messages(messages, sent_messages):
while messages:
current_message = messages.pop()
print(f"\nSending message, {current_message}")
sent_messages.append(current_message)
send_messages(messages, sent_messages)
print(messages)
print(sent_messages) |
import os
import time
import random
import pyshark
import itertools
from pyshark.packet.layer import JsonLayer
import logging
import ipaddress
import ciphersuite_parser
class ZeroPacketError(Exception):
def __init__(self, message):
super().__init__(message)
def searchEnums(rootdir, limit):
"""
Given a root directory containing all the pcap files, it will search for all possible enums
and return a list of all enums
Set a hard limit on the number of files iterated through to save time
"""
compressionmethods = []
supportedgroups = []
sighashalgorithms_client = []
sighashalgorithms_cert = []
success = 0
failed = 0
logging.info("Traversing through directory to find all enums...")
files = os.listdir(rootdir)
random.seed(2019)
sampled_files = random.sample(files, min(len(files), limit))
pkt_limit = 500
for f in sampled_files:
if f.endswith(".pcap"):
pcapfile_capture = pyshark.FileCapture(os.path.join(rootdir,f))
packets = []
try:
for i in range(pkt_limit):
packets.append(pcapfile_capture[i])
except KeyError:
pass
pcapfile_capture.close()
try:
logging.info("Processing {}".format(f))
starttime_traffic = time.time()
found_clienthello = False # Variable for ending the packet loop if ClientHello is found
found_certificate = False # Variable for ending the packet loop if Certificate is found
for packet in packets:
starttime_packet = time.time()
# Compression Methods
traffic_compressionmethods = extractClienthelloCompressionmethod(packet)
compressionmethods.extend(traffic_compressionmethods)
# Supported Groups
traffic_supportedgroups = extractClienthelloSupportedgroup(packet)
supportedgroups.extend(traffic_supportedgroups)
# Clienthello - Signature Hash Algorithm
traffic_sighashalgorithms_client = extractClienthelloSignaturehash(packet)
sighashalgorithms_client.extend(traffic_sighashalgorithms_client)
if traffic_compressionmethods or traffic_sighashalgorithms_client:
found_clienthello = True
# Certificate - Signature Hash Algorithm
traffic_sighashalgorithms_cert = extractCertificate(packet)
sighashalgorithms_cert.extend(traffic_sighashalgorithms_cert)
if traffic_sighashalgorithms_cert:
found_certificate = True
logging.debug("Time spent on packet: {}s".format(time.time()-starttime_packet))
# Break the loop once both ClientHello and Certificate are found
if found_clienthello and found_certificate:
break
logging.debug("Time spent on traffic: {}s".format(time.time()-starttime_traffic))
# If ClientHello cannot be found in the traffic
if not found_clienthello:
logging.warning("No ClientHello found for file {}".format(os.path.join(rootdir,f)))
if not found_certificate:
logging.warning("No Certificate found for file {}".format(os.path.join(rootdir,f)))
compressionmethods = list(set(compressionmethods))
supportedgroups = list(set(supportedgroups))
sighashalgorithms_client = list(set(sighashalgorithms_client))
sighashalgorithms_cert = list(set(sighashalgorithms_cert))
success += 1
# Skip this pcap file
except (KeyError, AttributeError, TypeError):
logging.exception('Known error in file {}. Traffic is skipped'.format(f))
failed+=1
except Exception:
logging.exception('Unknown error in file {}. Traffic is skipped')
failed += 1
logging.info("Done processing enum")
print("Processed enums in directory {}: {} success, {} failure".format(rootdir,success, failed))
enum = {}
enum['compressionmethods'] = compressionmethods
enum['supportedgroups'] = supportedgroups
enum['sighashalgorithms_client'] = sighashalgorithms_client
enum['sighashalgorithms_cert'] = sighashalgorithms_cert
return enum
def extract_tcp_features(pcapfile, limit):
# Traffic features for storing features of packets
traffic_features = []
traffic_appdata_segments_data = []
pcapfile_capture = pyshark.FileCapture(pcapfile)
packets = []
try:
for i in range(limit):
packets.append(pcapfile_capture[i])
except KeyError:
pass
pcapfile_capture.close()
for packet in packets:
packet_features = []
# 1: COME/LEAVE
comeLeaveFeature = extractComeLeaveFromPacket(packet)
packet_features.extend(comeLeaveFeature)
# 2: PROTOCOL
protocolFeature = extractProtocolFromPacket(packet)
packet_features.extend(protocolFeature)
# 3: PACKET LENGTH
lengthFeature = extractLengthFromPacket(packet)
packet_features.extend(lengthFeature)
# 4: INTERVAL
intervalFeature = extractIntervalFromPacket(packet)
packet_features.extend(intervalFeature)
# 5: FLAG
flagFeature = extractFlagFromPacket(packet)
packet_features.extend(flagFeature)
# 6: WINDOW SIZE
windowSizeFeature = extractWindowSizeFromPacket(packet)
packet_features.extend(windowSizeFeature)
traffic_features.append(packet_features)
packet_appdata_segments_idx = findIdxOfAppDataSegments(packet)
if packet_appdata_segments_idx:
traffic_appdata_segments_data.append((packet_appdata_segments_idx, protocolFeature))
prot_start_idx = 1
num_prot = 6
for idx_list, flag in traffic_appdata_segments_data:
for idx in idx_list:
traffic_features[idx][prot_start_idx:prot_start_idx + num_prot] = flag
if len(traffic_features) == 0:
raise ZeroPacketError('Pcap file contains no packet')
return traffic_features
def extractComeLeaveFromPacket(packet):
feature = []
if ipaddress.ip_address(str(packet.ip.dst)).is_private:
feature.append(1)
else:
feature.append(0)
return feature
def extractProtocolFromPacket(packet):
# Protocol version value to encode into one-hot vector
# prot: ['TCP' (-), 'SSL2.0' (0x0200), 'SSL3.0' (0x0300), 'TLS1.0' (0x0301), 'TLS1.1' (0x0302), 'TLS1.2' (0x0303)]
protcol_ver = [0, 512, 768, 769, 770, 771]
feature = [0] * len(protcol_ver)
try:
if hasattr(packet, 'ssl'):
protocol = int(packet.ssl.record_version.show, 16)
protocol_id = protcol_ver.index(protocol)
feature[protocol_id] = 1
elif hasattr(packet, 'tcp'):
feature[0] = 1
except ValueError:
logging.warning('Found SSL packet with unknown SSL type {}'.format(protocol))
except AttributeError:
pass
return feature
def extractLengthFromPacket(packet):
return [int(packet.length)]
def extractIntervalFromPacket(packet):
return [float(packet.frame_info.time_delta) * 1000]
def extractFlagFromPacket(packet):
num_of_flags = 9
try:
# Convert hex into binary and pad left with 0 to fill 9 flags
feature = list(bin(int(packet.tcp.flags, 16))[2:].zfill(num_of_flags))
feature = list(map(int, feature))
except AttributeError:
feature = [0] * num_of_flags
return feature
def extractWindowSizeFromPacket(packet):
try:
# Window size = window size value * scaling factor
feature = [int(packet.tcp.window_size)]
except AttributeError:
feature = [0]
return feature
def extract_tslssl_features(pcapfile, enums, limit):
enumCompressionMethods = enums['compressionmethods']
enumSupportedGroups = enums['supportedgroups']
enumSignatureHashClient = enums['sighashalgorithms_client']
enumSignatureHashCert = enums['sighashalgorithms_cert']
# Traffic features for storing features of packets
traffic_features = []
traffic_appdata_segments_idx = []
pcapfile_capture = pyshark.FileCapture(pcapfile)
packets = []
try:
for i in range(limit):
packets.append(pcapfile_capture[i])
except KeyError:
pass
pcapfile_capture.close()
for packet in packets:
packet_features = []
# HANDSHAKE PROTOCOL
##################################################################
# 1: ClientHello - LENGTH
clienthelloLengthFeature = extractClienthelloLength(packet)
packet_features.extend(clienthelloLengthFeature)
# 2: ClientHello - CIPHER SUITE
clienthelloCiphersuiteFeature = extractClienthelloCiphersuite(packet)
packet_features.extend(clienthelloCiphersuiteFeature)
# 3: ClientHello - CIPHER SUITE LENGTH
clienthelloCiphersuiteLengthFeature = extractClienthelloCiphersuiteLength(packet)
packet_features.extend(clienthelloCiphersuiteLengthFeature)
# 4: ClientHello - COMPRESSION METHOD
clienthelloCompressionMethodFeature = extractClienthelloCompressionmethodAndEncode(packet,
enumCompressionMethods)
packet_features.extend(clienthelloCompressionMethodFeature)
# 5: ClientHello - SUPPORTED GROUP LENGTH
clienthelloSupportedgroupLengthFeature = extractClienthelloSupportedgroupLength(packet)
packet_features.extend(clienthelloSupportedgroupLengthFeature)
# 6: ClientHello - SUPPORTED GROUPS
clienthelloSupportedgroupFeature = extractClienthelloSupportedgroupAndEncode(packet, enumSupportedGroups)
packet_features.extend(clienthelloSupportedgroupFeature)
# 7: ClientHello - ENCRYPT THEN MAC LENGTH
clienthelloEncryptthenmacLengthFeature = extractClienthelloEncryptthenmacLength(packet)
packet_features.extend(clienthelloEncryptthenmacLengthFeature)
# 8: ClientHello - EXTENDED MASTER SECRET
clienthelloExtendedmastersecretLengthFeature = extractClienthelloExtendedmastersecretLength(packet)
packet_features.extend(clienthelloExtendedmastersecretLengthFeature)
# 9: ClientHello - SIGNATURE HASH ALGORITHM
clienthelloSignaturehashFeature = extractClienthelloSignaturehashAndEncode(packet, enumSignatureHashClient)
packet_features.extend(clienthelloSignaturehashFeature)
# 10: ServerHello - LENGTH
serverhelloLengthFeature = extractServerhelloLength(packet)
packet_features.extend(serverhelloLengthFeature)
# 11: ServerHello - EXTENDED MASTER SECRET
# Feature cannot be found in the packet
# 12: ServerHello - RENEGOTIATION INFO LENGTH
serverhelloRenegoLengthFeature = extractServerhelloRenegoLength(packet)
packet_features.extend(serverhelloRenegoLengthFeature)
# 13,14,15,16: Certificate - NUM_CERT, AVERAGE, MIN, MAX CERTIFICATE LENGTH
certificateLengthInfoFeature = extractCertificateLengthInfo(packet)
packet_features.extend(certificateLengthInfoFeature)
# 17: Certificate - SIGNATURE ALGORITHM
certificateFeature = extractCertificateAndEncode(packet, enumSignatureHashCert)
packet_features.extend(certificateFeature)
# 18: ServerHelloDone - LENGTH
serverhellodoneLengthFeature = extractServerhellodoneLength(packet)
packet_features.extend(serverhellodoneLengthFeature)
# 19: ClientKeyExchange - LENGTH
clientkeyexchangeLengthFeature = extractClientkeyexchangeLength(packet)
packet_features.extend(clientkeyexchangeLengthFeature)
# 20: ClientKeyExchange - PUBKEY LENGTH
clientkeyexchangePubkeyLengthFeature = extractClientkeyexchangePubkeyLength(packet)
packet_features.extend(clientkeyexchangePubkeyLengthFeature)
# 21: EncryptedHandshakeMessage - LENGTH
encryptedhandshakemsgLengthFeature = extractEncryptedhandshakemsgLength(packet)
packet_features.extend(encryptedhandshakemsgLengthFeature)
# CHANGE CIPHER PROTOCOL
##################################################################
# 22: ChangeCipherSpec - LENGTH
changecipherspecLengthFeature = extractChangeCipherSpecLength(packet)
packet_features.extend(changecipherspecLengthFeature)
# APPLICATION DATA PROTOCOL
##################################################################
# 23: ApplicationDataProtocol - LENGTH
# Set app data length for pure app data packets first, modify app data length for TCP segments
# for reassembled PDU later
appdataLengthFeature = extractAppDataLength(packet)
packet_features.extend(appdataLengthFeature)
# Finding all index of TCP segments for reassembed PDU
packet_appdata_segments_idx = findIdxOfAppDataSegments(packet)
traffic_appdata_segments_idx.extend(packet_appdata_segments_idx)
# Convert to float for standardization
packet_features = [float(i) for i in packet_features]
traffic_features.append(packet_features)
traffic_appdata_segments_idx = list(set(traffic_appdata_segments_idx)) # Remove duplicates from the list first
for idx in traffic_appdata_segments_idx:
try:
# Use tcp.len as the application data length
traffic_features[idx][-1] = float(packets[idx].tcp.len)
except AttributeError:
pass
if len(traffic_features) == 0:
raise ZeroPacketError('Pcap file contains no packet')
return traffic_features
def extractClienthelloLength(packet):
feature = [0]
try:
clienthello_type = '1'
handshake_type = [field.show for field in packet.ssl.handshake_type.all_fields]
handshake_len = [field.show for field in packet.ssl.handshake_length.all_fields]
clienthello_idx = handshake_type.index(clienthello_type)
feature = [int(handshake_len[clienthello_idx])]
except (AttributeError, ValueError):
pass
return feature
def extractClienthelloCiphersuite(packet):
feature_len = sum(map(len, [getattr(ciphersuite_parser, component) for component in ciphersuite_parser.components]))
feature = [0] * feature_len
try:
if int(packet.ssl.handshake_type)==1:
raw_fields = [field.show for field in packet.ssl.handshake_ciphersuite.all_fields]
dec_ciphersuites = [int(field) for field in raw_fields]
feature = ciphersuite_parser.getVecAndAggregateAndNormalize(dec_ciphersuites)
except (AttributeError, ZeroDivisionError):
pass
return feature
def extractClienthelloCiphersuiteLength(packet):
feature = [0]
try:
feature = [int(packet.ssl.handshake_cipher_suites_length)]
except AttributeError:
pass
return feature
def extractClienthelloCompressionmethodAndEncode(packet, enum):
compressionmethods = extractClienthelloCompressionmethod(packet)
encoded_compressionmethods = encodeEnumIntoManyHotVec(compressionmethods, enum)
if encoded_compressionmethods[-1] == 1:
logging.warning('Compression methods contain unseen enums. Refer to above')
return encoded_compressionmethods
def extractClienthelloCompressionmethod(packet):
feature = []
try:
if int(packet.ssl.handshake_type)==1:
raw_fields = [field.show for field in packet.ssl.handshake_comp_method.all_fields]
feature = [int(field, 16) for field in raw_fields]
except AttributeError:
pass
return feature
def extractClienthelloSupportedgroupLength(packet):
feature = [0]
try:
feature = [int(packet.ssl.handshake_extensions_supported_groups_length)]
except AttributeError:
pass
return feature
def extractClienthelloSupportedgroupAndEncode(packet, enum):
supportedgroups = extractClienthelloSupportedgroup(packet)
encoded_supportedgroups = encodeEnumIntoManyHotVec(supportedgroups, enum)
if encoded_supportedgroups[-1] == 1:
logging.warning('Supported groups contain unseen enums. Refer to above')
return encoded_supportedgroups
def extractClienthelloSupportedgroup(packet):
feature = []
try:
raw_fields = [field.show for field in packet.ssl.handshake_extensions_supported_group.all_fields]
feature = [int(field, 16) for field in raw_fields]
except AttributeError:
pass
return feature
def extractClienthelloEncryptthenmacLength(packet):
feature = [0]
try:
encryptthenmac_type = '22'
handshake_extension_type = [field.show for field in packet.ssl.handshake_extension_type.all_fields]
handshake_extension_len = [field.show for field in packet.ssl.handshake_extension_len.all_fields]
encryptthenmac_idx = handshake_extension_type.index(encryptthenmac_type)
feature = [int(handshake_extension_len[encryptthenmac_idx])]
except (AttributeError, ValueError):
pass
return feature
def extractClienthelloExtendedmastersecretLength(packet):
feature = [0]
try:
extendedmastersecret_type = '23'
handshake_extension_type = [field.show for field in packet.ssl.handshake_extension_type.all_fields]
handshake_extension_len = [field.show for field in packet.ssl.handshake_extension_len.all_fields]
extendedmastersecret_idx = handshake_extension_type.index(extendedmastersecret_type)
feature = [int(handshake_extension_len[extendedmastersecret_idx])]
except (AttributeError, ValueError):
pass
return feature
def extractClienthelloSignaturehashAndEncode(packet, enum):
signaturehashes = extractClienthelloSignaturehash(packet)
encoded_signaturehashes = encodeEnumIntoManyHotVec(signaturehashes, enum)
if encoded_signaturehashes[-1] == 1:
logging.warning('Signature hash contains unseen enums. Refer to above')
return encoded_signaturehashes
def extractClienthelloSignaturehash(packet):
feature = []
try:
if int(packet.ssl.handshake_type)==1:
raw_fields = [field.show for field in packet.ssl.handshake_sig_hash_alg.all_fields]
feature = [int(field, 16) for field in raw_fields]
except AttributeError:
pass
return feature
def extractServerhelloLength(packet):
feature = [0]
try:
serverhello_type = '2'
handshake_type = [field.show for field in packet.ssl.handshake_type.all_fields]
handshake_len = [field.show for field in packet.ssl.handshake_length.all_fields]
serverhello_idx = handshake_type.index(serverhello_type)
feature = [int(handshake_len[serverhello_idx])]
except (AttributeError, ValueError):
pass
return feature
def extractServerhelloRenegoLength(packet):
feature = [0]
try:
feature = [int(packet.ssl.handshake_extensions_reneg_info_len)]
except AttributeError:
pass
return feature
def extractCertificateLengthInfo(packet):
feature = [0,0,0,0]
try:
raw_fields = [field.show for field in packet.ssl.handshake_certificate_length.all_fields]
cert_len = [int(field) for field in raw_fields]
num_cert = len(cert_len)
mean_cert_len = sum(cert_len) / float(num_cert)
max_cert_len = max(cert_len)
min_cert_len = min(cert_len)
feature = [num_cert, mean_cert_len, max_cert_len, min_cert_len]
except AttributeError:
pass
return feature
def extractCertificateAndEncode(packet, enum):
certs = extractCertificate(packet)
encoded_certs = encodeEnumIntoManyHotVec(certs, enum)
if encoded_certs[-1] == 1:
logging.warning('Certificates contains unseen enums. Refer to above')
return encoded_certs
def extractCertificate(packet):
feature = []
try:
# Need to scan through layers attribute instead of access ssl attribute directly
# because pyshark cant extract inner attributes for duplicate ssl layers
raw_fields = []
for layer in packet.layers:
if layer.layer_name == 'ssl' and hasattr(layer, 'x509af_algorithm_id'):
raw_fields.extend([field.show for field in layer.x509af_algorithm_id.all_fields])
feature = [str(field) for field in set(raw_fields)]
except AttributeError:
pass
return feature
def extractServerhellodoneLength(packet):
feature = [0]
try:
serverhellodone_type = '14'
handshake_type = []
handshake_len = []
# Need to scan through layers attribute instead of accessing ssl attribute directly
# because pyshark cant extract inner attributes for duplicate ssl layers
for layer in packet.layers:
if layer.layer_name == 'ssl' and hasattr(layer, 'handshake_type') and hasattr(layer, 'handshake_length'):
handshake_type.extend([field.show for field in layer.handshake_type.all_fields])
handshake_len.extend([field.show for field in layer.handshake_length.all_fields])
serverhellodone_idx = handshake_type.index(serverhellodone_type)
feature = [int(handshake_len[serverhellodone_idx])]
except (AttributeError,ValueError):
pass
return feature
def extractClientkeyexchangeLength(packet):
feature = [0]
try:
clientkeyexchange_type = '16'
handshake_type = [field.show for field in packet.ssl.handshake_type.all_fields]
handshake_len = [field.show for field in packet.ssl.handshake_length.all_fields]
clientkeyexchange_idx = handshake_type.index(clientkeyexchange_type)
feature = [int(handshake_len[clientkeyexchange_idx])]
except (AttributeError, ValueError):
pass
return feature
def extractClientkeyexchangePubkeyLength(packet):
feature = [0]
try:
feature = [int(packet.ssl.handshake_client_point_len)]
except AttributeError:
pass
return feature
def extractEncryptedhandshakemsgLength(packet):
feature = [0]
try:
encryptedhandshakemsg_record_name = 'Encrypted Handshake Message'
record_names = [field.showname for field in packet.ssl.record.all_fields]
record_lengths = [field.show for field in packet.ssl.record_length.all_fields]
in_record_names = [encryptedhandshakemsg_record_name in record_name for record_name in record_names]
tmp = [int(record_length) for i, record_length in enumerate(record_lengths) if in_record_names[i] == True]
if tmp:
feature = [sum(tmp)]
except AttributeError:
pass
return feature
def extractChangeCipherSpecLength(packet):
feature = [0]
try:
changecipherspec_type = '20'
record_type = [field.show for field in packet.ssl.record_content_type.all_fields]
record_len = [field.show for field in packet.ssl.record_length.all_fields]
changecipherspec_idx = record_type.index(changecipherspec_type)
feature = [int(record_len[changecipherspec_idx])]
except (AttributeError, ValueError):
pass
return feature
# If there are more than 1 app data record layer in the same packet, it will extract the latest app data record layer
def extractAppDataLength(packet):
feature = [0]
try:
# Check if it is an application data packet and extract the len of tcp payload only
appdata_type = '23'
record_type = [field.show for field in packet.ssl.record_content_type.all_fields]
if appdata_type in record_type:
feature = [int(packet.tcp.len)]
except AttributeError:
pass
return feature
def findIdxOfAppDataSegments(packet):
appdata_segments_idx = []
try:
# Verify that it is a app data ssl packet and
# that it has a data layer containing info about reassembled tcp pkt
if hasattr(packet, 'ssl') and hasattr(packet.ssl, 'app_data') and hasattr(packet, 'data'):
raw_fields = [field.show for field in packet.data.tcp_segment.all_fields]
appdata_segments_idx.extend(list(map(lambda x:int(str(x))-1, raw_fields)))
except (AttributeError, KeyError):
pass
return appdata_segments_idx
def find_handshake(obj, target_type):
if type(obj) == list:
final = None
for a_obj in obj:
temp = find_handshake(a_obj, target_type)
if temp:
final = temp
return final
elif type(obj) == JsonLayer:
if obj.layer_name=='ssl' and hasattr(obj, 'record'):
return find_handshake(obj.record, target_type)
# elif obj.layer_name=='ssl' and hasattr(obj, 'handshake'):
# return find_handshake(obj.handshake, target_type)
elif obj.layer_name=='record' and hasattr(obj, 'handshake') and target_type!=99:
return find_handshake(obj.handshake, target_type)
# If correct handshake is identified
elif obj.layer_name=='handshake' and int(obj.type)==target_type:
return obj
# Return record containing Encrypted Handshake Message (only handshake msg without a type)
elif obj.layer_name=='record' and hasattr(obj, 'handshake') and not(type(obj.handshake)==JsonLayer) and target_type==99:
return obj
elif type(obj) == dict:
if 'ssl.record' in obj:
return find_handshake(obj['ssl.record'], target_type)
elif 'ssl.handshake' in obj:
return find_handshake(obj['ssl.handshake'], target_type)
elif 'ssl.handshake.type' in obj and int(obj['ssl.handshake.type'])==target_type:
return obj
def find_changecipher(obj):
if type(obj) == list:
final = None
for a_obj in obj:
temp = find_changecipher(a_obj)
if temp:
final = temp
return final
elif type(obj)==JsonLayer:
if obj.layer_name=='ssl' and hasattr(obj, 'record'):
return find_changecipher(obj.record)
elif obj.layer_name=='record' and hasattr(obj, 'change_cipher_spec'):
return obj
# For identifying pure Application Data and Application Data [TCP segment of a reassembled PDU]
def find_appdata(obj, appdata):
if type(obj) == list:
for a_obj in obj:
find_appdata(a_obj, appdata)
elif type(obj)==JsonLayer:
if obj.layer_name=='ssl' and hasattr(obj, 'record'):
find_appdata(obj.record, appdata)
elif obj.layer_name=='record' and hasattr(obj, 'app_data'):
appdata.append(obj)
elif type(obj) == dict:
if 'ssl.record' in obj:
find_appdata(obj['ssl.record'], appdata)
elif 'ssl.app_data' in obj:
appdata.append(obj)
def encodeEnumIntoManyHotVec(listOfEnum, refEnum):
unknown_dim = [0]
# The unknown dim occupies the last position
encoded_enums = [0] * len(refEnum) + unknown_dim
if listOfEnum:
for enum in listOfEnum:
if enum in refEnum:
encoded_enums[refEnum.index(enum)] = 1
else:
encoded_enums[-1] = 1
logging.warning('Unseen enum {}'.format(enum))
return encoded_enums
if __name__ == '__main__':
enums = {'ciphersuites': [], 'compressionmethods': [], 'supportedgroups': [], 'sighashalgorithms_client': [],
'sighashalgorithms_cert': []}
# Find attributes of a packet in pyshark.FileCapture
pcapfile = 'sample-pcap/tls/www.stripes.com_2018-12-21_16-20-12.pcap'
packets = pyshark.FileCapture(pcapfile)
packets_list = [packet for packet in packets]
clienthello = packets_list[3]
supportedgroups = clienthello.ssl.handshake_extensions_supported_group
for i in supportedgroups.all_fields:
print(dir(i))
print(i.show)
print(i.showname)
print(i.showname_key)
print(i.showname_value)
print(i.hex_value)
|
from .cameras import XSyncCamera, YSyncCamera
from .picker import Picker, ROI_time_series
from .probe_view import probe_view
from .color_scheme import palette
from .widgets import param_widget
from .line_view import line_view
# from .raster_view import raster_view
from .spike_view import spike_view
from .scatter_3d_view import scatter_3d_view
from .wave_view import wave_view
from .correlogram_view import correlogram_view
# from .cluster_view import cluster_view
from .scatter_2d_view import scatter_2d_view
from .raster_view import raster_view
from .amplitude_view import amplitude_view
from .firing_rate_view import firing_rate_view
from .trace_view import trace_view
from .ctree_view import ctree_view
from .volume_view import volume_view
from .pf_view import pf_view
from .image_view import image_view
from .cluster_view import cluster_view
from .grid_scatter3d import grid_scatter3d |
import threading
from pyee import EventEmitter
from pythonosc import dispatcher, osc_server
from tomomibot.const import OSC_ADDRESS, OSC_PORT
class Server:
def __init__(self, ctx, **kwargs):
self.ctx = ctx
self.is_running = False
# Provide an interface for event subscribers
self.emitter = EventEmitter()
# Prepare OSC message dispatcher and UDP server
self.address = kwargs.get('osc_address', OSC_ADDRESS)
self.port = kwargs.get('osc_port', OSC_PORT)
bind = (self.address, self.port)
disp = dispatcher.Dispatcher()
disp.map('/tomomibot/*', self._on_param)
self._server = osc_server.ThreadingOSCUDPServer(bind, disp)
def start(self):
thread = threading.Thread(target=self._start_server)
thread.daemon = True
thread.start()
self.is_running = True
def stop(self):
self._server.shutdown()
self.is_running = False
def _start_server(self):
self.ctx.log('OSC server @ {}:{}'.format(self.address,
self.port))
self._server.serve_forever()
def _on_param(self, address, *args):
param = address.replace('/tomomibot/', '')
# Commands with no arguments
if param == 'reset':
self.emitter.emit('reset')
return
# We expect one float argument from now on
if not len(args) == 1 or type(args[0]) is not float:
return
if param in ['temperature'] and 0 <= args[0] <= 1:
self.emitter.emit('param', param, args[0])
if param in ['interval'] and 0 <= args[0] <= 5:
self.emitter.emit('param', param, args[0])
if param in ['volume']:
self.emitter.emit('param', param, args[0])
|
import functools
import os
import sys
from taichi._lib import core as _ti_core
from taichi._logging import info
pybuf_enabled = False
_env_enable_pybuf = os.environ.get("TI_ENABLE_PYBUF", "1")
if not _env_enable_pybuf or int(_env_enable_pybuf):
# When using in Jupyter / IDLE, the sys.stdout will be their wrapped ones.
# While sys.__stdout__ should always be the raw console stdout.
pybuf_enabled = sys.stdout is not sys.__stdout__
_ti_core.toggle_python_print_buffer(pybuf_enabled)
def _shell_pop_print(old_call):
if not pybuf_enabled:
# zero-overhead!
return old_call
info("Graphical python shell detected, using wrapped sys.stdout")
@functools.wraps(old_call)
def new_call(*args, **kwargs):
ret = old_call(*args, **kwargs)
# print's in kernel won't take effect until ti.sync(), discussion:
# https://github.com/taichi-dev/taichi/pull/1303#discussion_r444897102
print(_ti_core.pop_python_print_buffer(), end="")
return ret
return new_call
|
import pathlib
from pkg_resources import resource_filename
def get_root_path():
import water_vhf_analysis
return pathlib.Path(water_vhf_analysis.__file__).parent
def get_txt_file(model_path, name):
"""Get path for txt file
Parameters
----------
model : str
Simulation model directory path
name : str
Name of txt file to load
Returns
-------
path : Path object
Path of txt file
"""
full_name = "/data/" + model_path + "/" + name
path = pathlib.Path(str(get_root_path()) + full_name)
return path
def get_csv_file(name):
"""Get path for csv table files
Parameters
----------
model : str
Simulation model directory path
name : str
Name of csv file to load
Returns
-------
path : Path object
Path of csv file
"""
full_name = "/analysis/tables/" + name
path = pathlib.Path(str(get_root_path()) + full_name)
return path
|
# Generated by Django 3.2.2 on 2021-10-07 20:29
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
("data", "0040_auto_20210920_2347"),
]
operations = [
migrations.CreateModel(
name="PersonOffice",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"classification",
models.CharField(
choices=[
("district", "District Office"),
("capitol", "Capitol Office"),
("primary", "Primary Office"),
],
max_length=20,
),
),
("address", models.CharField(blank=True, default="", max_length=300)),
("voice", models.CharField(blank=True, default="", max_length=30)),
("fax", models.CharField(blank=True, default="", max_length=30)),
("name", models.CharField(blank=True, default="", max_length=200)),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="offices",
to="data.person",
),
),
],
options={
"db_table": "openstates_personoffice",
},
),
]
|
import discord
from discord.ext import commands
import sys
sys.path.append('/home/runner/hosting601/modules')
import username601 as myself
from username601 import *
from decorators import command, cooldown
import canvas as Painter
class encoding(commands.Cog):
def __init__(self, client):
self.client = client
@command('fliptext,fancy,cursive,braille')
@cooldown(5)
async def morse(self, ctx, *args):
if len(list(args))==0: await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | no arguments? Really?')
elif len(' '.join(list(args))) > 100:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | too long....')
else:
async with ctx.message.channel.typing():
res = myself.jsonisp('https://useless-api--vierofernando.repl.co/encode?text='+myself.urlify(' '.join(list(args))))
if 'fliptext' in str(ctx.message.content).split(' ')[0][1:]: data = res['styles']['upside-down']
elif 'cursive' in str(ctx.message.content).split(' ')[0][1:]: data = res['styles']['cursive']
elif 'fancy' in str(ctx.message.content).split(' ')[0][1:]: data = res['styles']['fancy']
elif 'braille' in str(ctx.message.content).split(' ')[0][1:]: data = res['braille']
else: data = res['ciphers']['morse']
await ctx.send(f'{data}')
@command('qr,qrcode,qr-code')
@cooldown(1)
async def barcode(self, ctx, *args):
if len(list(args))==0:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | Please provide a text!')
elif len(' '.join(list(args))) > 50:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | too longggggggggg')
else:
async with ctx.message.channel.typing():
if 'qr' in str(ctx.message.content).split(' ')[0][1:]: url = "https://api.qrserver.com/v1/create-qr-code/?size=150x150&data="+str(myself.urlify(str(' '.join(list(args)))))
else: url= 'http://www.barcode-generator.org/zint/api.php?bc_number=20&bc_data='+str(myself.urlify(str(' '.join(list(args)))))
await ctx.send(file=discord.File(Painter.urltoimage(url), 'qr_or_barcode.png'))
@command()
@cooldown(1)
async def binary(self, ctx, *args):
if len(list(args))==0:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | gimme something.')
elif len(' '.join(list(args))) > 50:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | too long.')
else:
if len(myself.bin(str(' '.join(list(args)))))>4000: await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | the result is too long for discord to proccess...')
else: await ctx.send('```'+str(myself.bin(str(' '.join(list(args)))))+'```')
@command()
@cooldown(1)
async def caesar(self, ctx, *args):
if len(list(args))==0:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | gimme something.')
else:
offset = None
for i in args:
if i.isnumeric():
offset = int(i)
break
if offset==None:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | No offset?')
else:
await ctx.send(myself.caesar(str(' '.join(list(args)).replace(str(offset), '')), int(offset)))
@command()
@cooldown(1)
async def atbash(self, ctx, *args):
if len(list(args))==0: await ctx.send(str(self.client.get_emoji(BotEmotes.error)) + ' | Invalid. Please give us the word to encode...')
else: await ctx.send(myself.atbash(' '.join(list(args))))
@command()
@cooldown(1)
async def reverse(self, ctx, *args):
if len(list(args))==0: await ctx.send('no arguments? rip'[::-1])
else: await ctx.send(str(' '.join(list(args)))[::-1])
@command('b64')
@cooldown(1)
async def base64(self, ctx, *args):
if len(list(args))==0: await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | Gimme dat args!')
else: await ctx.send(myself.encodeb64(' '.join(args)))
@command('leetspeak')
@cooldown(1)
async def leet(self, ctx, *args):
if len(list(args))==0:
await ctx.send(str(self.client.get_emoji(BotEmotes.error))+' | No arguments? ok then! no service it is!')
else:
data = myself.jsonisp("https://vierofernando.github.io/username601/assets/json/leet.json")
total = ''
for i in range(0, len(' '.join(list(args)))):
for j in list(data.keys()):
if str(' '.join(list(args)))[i].lower()==j.lower():
total += data[list(data.keys())[i]]
break
if len(total)==i: total += str(' '.join(list(args)))[i]
await ctx.send(total)
def setup(client):
client.add_cog(encoding(client))
|
from utils import read_split_line_input
from math import ceil, floor
from typing import List
boarding_pass_list = read_split_line_input(5, list)
def get_row(code: List[str]):
low = 0
high = 127
for char in code:
mid = (low + high) / 2
if char == "F":
high = floor(mid)
else:
low = ceil(mid)
return low
def get_column(code: List[str]):
low = 0
high = 7
for char in code:
mid = (low + high) / 2
if char == "L":
high = floor(mid)
else:
low = ceil(mid)
return low
def boarding_id(boarding_pass: str):
return get_row(boarding_pass[:-3]) * 8 + get_column(boarding_pass[-3:])
boarding_id_list = [boarding_id(boarding_pass) for boarding_pass in boarding_pass_list]
print(sum(range(min(boarding_id_list), max(boarding_id_list) + 1)) - sum(boarding_id_list)) |
# Universidade de Aveiro - Physics Department
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 28/6/2017
# Selecting Data from an excel file to another
#import numpy as np
import pandas as pd
from openpyxl import load_workbook
#r = [0, 301, 302, 303, 304, 305, 306]
#desired = ['S_Avg', 'I_Avg', 'R_Avg', 'S_StD', 'I_StD', 'R_StD']
cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
book = load_workbook('data/ns_shift.xlsx')
writer = pd.ExcelWriter('data/nd_shift.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
for cvar in cvalues:
for rvar in rvalues:
print('retrieving...')
tblnm = 'c='+str(cvar)+'|r='+ str(rvar)
data = pd.read_excel('data/ns_shift.xlsx',
sheetname = tblnm, index_col = 0)
print('...retrieved')
#data.drop(data.columns[r], axis = 1, inplace= True)
sel = data[:1000]
print('copying...............................'+str(tblnm))
sel.to_excel(writer,'c='+str(cvar)+'|r='+ str(rvar))
print('copied!')
writer.save()
|
"""process geoquery request for comment emails
called by cronjob on server for branch
"""
# -----------------------------------------------------------------------------
import sys
import os
branch = sys.argv[1]
utils_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'utils')
sys.path.insert(0, utils_dir)
from config_utility import BranchConfig
config = BranchConfig(branch=branch)
config.test_connection()
# -------------------------------------------------------------------------
if config.connection_status != 0:
raise Exception('Could not connect to mongodb')
import textwrap
import time
import pandas as pd
# # used for logging
# sys.stdout = sys.stderr = open(
# os.path.dirname(os.path.abspath(__file__)) + '/processing.log', 'a')
from email_utility import GeoEmail
from geoquery_requests import QueueToolBox
# =============================================================================
print '\n======================================='
print '\nRequest for Comments Script'
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
# -------------------------------------
# modifiable parameters
# mode = "auto"
mode = "manual"
# dry_run = True
dry_run = False
# maximum number of emails to send per batch (should really be per day for gmail limits, but we only run this once a week+ 1 )
email_limit = 50
# filters for searching requests
f = {
"n_days": 365, # number of days to search for any requests
"request_count": 3, # minimum number of requests in n_days required for an email
"earliest_request": 14, # minimum number of days since earliest request
"latest_request": 7, # minimum number of days since latest request
}
# -------------------------------------
queue = QueueToolBox()
# load config setting for branch script is running on
branch_info = queue.set_branch_info(config)
print "`{0}` branch on {1}".format(branch_info.name, branch_info.database)
current_timestamp = int(time.time())
def to_seconds(days):
"""convert days to seconds"""
return days*24*60*60
# get timestamp for ndays before present time
# used to get requests for past n days
search_timestamp = current_timestamp - to_seconds(f["n_days"])
try:
search = queue.c_queue.find(
{
"stage.0.time": {"$gt": search_timestamp}
},
{
"release_data": 0,
"raster_data": 0,
"boundary": 0
}
)
request_objects = list(search)
except Exception as e:
print "Error while searching for requests in queue"
raise
# verify that we have some requests
if not request_objects:
print "Request queue is empty"
else:
# convert to dataframe
request_df_data = []
for r in request_objects:
request_dict = {
'email': r['email'],
'request_time': r['stage'][0]['time'],
'complete_time': r['stage'][3]['time'],
'status': r['status'],
'count': 1
}
if 'comments_requested' in r:
request_dict['comments_requested'] = r['comments_requested']
else:
request_dict['comments_requested'] = 0
if 'contact_flag' in r:
request_dict['contact_flag'] = r['contact_flag']
else:
request_dict['contact_flag'] = 0
request_df_data.append(request_dict)
request_df = pd.DataFrame(request_df_data)
# time_field = "request_time"
time_field = "complete_time"
request_df["earliest_time"] = request_df[time_field]
request_df["latest_time"] = request_df[time_field]
# convert to user aggregated dataframe
user_df = request_df.groupby('email', as_index=False).agg({
"count": "sum",
"comments_requested": "sum",
"contact_flag": "sum",
"earliest_time": "min",
"latest_time": "max"
})
# filter
valid_df = user_df.loc[
(user_df["comments_requested"] == 0) &
(user_df["contact_flag"] == 0) &
(user_df["count"] > f["request_count"]) &
(current_timestamp - user_df["earliest_time"] > to_seconds(f["earliest_request"])) &
(current_timestamp - user_df["latest_time"] > to_seconds(f["latest_request"]))
]
valid_user_count = len(valid_df)
print "\n{} valid users found:\n".format(valid_user_count)
valid_df.reset_index(drop=True, inplace=True)
# send list of users to staff emails
if not dry_run and mode == "manual" and valid_user_count > 0:
email_list = valid_df["email"].tolist()
email_list_str = "\n\t".join(email_list)
mail_to = "geo@aiddata.org, info@aiddata.org, eteare@aiddata.wm.edu"
dev = " (dev) " if branch == "develop" else " "
mail_subject = ("Your weekly list of GeoQuery{0} user emails").format(dev)
mail_message = (
"""
Hello there team!
Below you will find the list of users who satisfy the criteria for contact. For details
on what these criteria actually are, contact your GeoQuery Admin. At the end of this email
is some sample language for contacting users.
--------------------
{}
--------------------
Hello there!
We would like to hear about your experience using AidData's GeoQuery tool. Would you
please respond to this email with a couple sentences about how GeoQuery has helped you?
We are able to make GeoQuery freely available thanks to the generosity of donors and
open source data providers. These people love to hear about new research enabled by
GeoQuery, and what kind of difference this research is making in the world.
Also, we love feedback of all kinds. If something did not go the way you expected, we
want to hear about that too.
Thanks!
\tAidData's GeoQuery Team
""").format(email_list_str)
mail_message = textwrap.dedent(mail_message)
email = GeoEmail(config)
mail_status = email.send_email(mail_to, mail_subject, mail_message)
if not mail_status[0]:
print mail_status[1]
raise mail_status[2]
# email any users who pass above filtering with request for comments
# add "comments_requested" = 1 flag to all of their existing requests
for ix, user_info in valid_df.iterrows():
user_email = user_info["email"]
if mode == "auto" and ix >= email_limit:
print "\n Warning: maximum emails reached. Exiting."
break
print '\t{}: {}'.format(ix, user_email)
# automated request for comments
if not dry_run and mode == "auto":
print "sending emails..."
# avoid gmail email per second limits
time.sleep(1)
queue.notify_comments(user_email)
queue.c_queue.update_many(
{"email": user_email},
{"$set": {"comments_requested": 1}}
)
# flag as being included in list for staff to manually email
elif not dry_run and mode == "manual":
queue.c_queue.update_many(
{"email": user_email},
{"$set": {"contact_flag": 1}}
)
print '\n---------------------------------------'
print "\nFinished checking requests"
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
|
import vars
t=9
name='Mr.T'
animal= 'hippo'
print(vars())
d= vars().copy() #save as variable
print(d.items()) #a list of tuples
for k, v in d.items():
print(k,":", v)
#option 1
for k in d:
if '_' not in k:
print(k,':', d[k])
#print the values |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.