index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,800 | 781ce153d5053078ee11cecc13d055a67999a651 | # -*- coding: utf-8 -*-
from flask import jsonify
from flask.views import MethodView
class Users(MethodView):
def get(self):
return jsonify(
{
'status': 'OK',
'users': [
{'name': 'Pepe', 'age': 35, 'ocupation': "Engineer"},
{'name': 'Bob', 'age': 20, 'ocupation': "Student"}
]
}
)
def post(self):
# create user
pass
def put(self):
# update user
pass
def delete(self):
# delete user
pass
|
3,801 | 50c7ce95f17cbd40a753d16d9f9fab349ad4f4ce | """
100 4 200 1 3 2
100
4
200
1
3
2
6:35
"""
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
numset = set(nums)
ans = 0
# visited = set(nums)
maxnum = float('-inf')
if not nums:
return 0
for n in numset:
# saven = n
if n+1 not in numset:
ans = 1
saven = n
while saven-1 in numset:
ans +=1
saven = saven-1
# visited.add(n)
maxnum = max(ans, maxnum)
return maxnum
# cnt = Counter(nums)
# print(cnt)
# maxnum = float('-inf')
# minnum = float('inf')
# ans = [minnum, maxnum]
# visited = set()
# def checknumber(checknum, cnt, ans):
# minnum = ans[0]
# maxnum = ans[1]
# print('checknum', checknum, minnum, maxnum, visited)
# if checknum in cnt and n not in visited:
# minnum = min(checknum, minnum)
# maxnum = max(checknum, maxnum)
# visited.add(n)
# if checknum-1 in cnt:
# checknumber(checknum-1, cnt,[minnum, maxnum])
# if checknum+1 in cnt:
# checknumber(checknum+1, cnt, [minnum, maxnum])
# for n in nums:
# checknumber(n, cnt, [minnum, maxnum])
# return (ans[1]-ans[0])+1 |
3,802 | 15eed401728e07bfe9299edd12add43ad8b9cb71 | # -*- coding: utf-8 -*-
import luigi
from luigi import *
#from luigi import Task
import pandas as pd
from pset.tasks.embeddings.load_embeding import EmbedStudentData
from pset.tasks.data.load_dataset import HashedStudentData
import numpy as npy
import pickle
import os
class NearestStudents(Task):
github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)')
n = IntParameter(default=5, description='Output top N')
farthest = BoolParameter(default=False, description='Find farthest instead')
def output(self):
return luigi.LocalTarget("/Users/adcxdpf/Downloads/pset_03/sd.csv")
def requires(self):
return {
'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),
'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')
}
#return self.clone(EmbedStudentData)
def run(self):
vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb'))
vectors_lookup = pickle.load(vectors_lookup_bytes)
vecs_list = pd.Series(vectors_lookup)
vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)
vectors_df.columns = ['vectors']
print('##### vectors_df : ', vectors_df)
print(" vectors_df shape is :: " , vectors_df.shape)
print("github_id param : " , self.github_id)
pd_xls_data = pd.read_excel(self.input()['data'].path,0)
idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id]
#print ('######## idx.values ######### ', idx.values)
my_vec = vectors_df.iloc[[idx.values[0]]]
self.my_vec = (my_vec.values[0][0])
print ("my_vec : " , self.my_vec)
print(" my_vec shape is :: " , self.my_vec.shape)
distances = vectors_df['vectors'].apply(self.my_distance)
sortedDistance= distances.sort_values()
print('###### sortedDistance : ', sortedDistance)
# output data
f = self.output().open('w')
sortedDistance.str[0].to_csv(f)
#df.to_csv(f, sep='\t', encoding='utf-8', index=None)
f.close()
nearDis= sortedDistance.head(self.n).index
print ("******** Nearest**********")
for index in nearDis:
print(pd_xls_data.iloc[index])
farDis = sortedDistance.tail(5).index
print ("******** Farthest**********")
for index in farDis:
print(pd_xls_data.iloc[index])
def cosine_similarity(self,a, b):
# """Takes 2 vectors a, b and returns the cosine similarity according
# to the definition of the dot product
# """
# dot_product = npy.dot(a, b)
# norm_a = npy.linalg.norm(a)
# norm_b = npy.linalg.norm(b)
# return dot_product / (norm_a * norm_b)
dot_product = npy.dot(a[0], b.T)
norm_a = npy.linalg.norm(a)
norm_b = npy.linalg.norm(b)
return dot_product / (norm_a * norm_b)
def my_distance(self,vec1):
return 1 - self.cosine_similarity(vec1, self.my_vec)
|
3,803 | b713e38824db13f919484b071fb35afb29e26baa | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="数据结构"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("二叉树——递归套路")
r2=s2.getRootTopic()
r2.setTitle("二叉树——递归套路")
content={
'递归套路':[
'可解决面试中绝大多数二叉树问题,尤其是树型dp问题',
'本质是利用递归遍历二叉树的便利性'
],
'思路':[
'1.假设以x节点为为头,假设可以向X左树和X右树要任何信息',
'2.在上一步的假设下,讨论以X为头节点的树,得到答案的可能性(最重要)',
'3.列出所有可能性后,确定到底需要向左树还是右树要什么样的信息',
'4.把左树信息和右树信息求全集,就是任何一棵子树都需要返回的信息S',
'5.递归函数都返回S,每一棵子树都这么要求',
'6.写代码,在代码中考虑如何把左树信息和右树信息整合出整棵树的信息'
],
'题目1':[
'给定一棵二叉树的头节点head,返回这颗二叉树是不是平衡二叉树',
{'思路':[
'1.左子树是否平衡',
'2.右子树是否平衡',
'3.左树与右树高在2以内',
]},
{'实现':[
'Class Info(){',
' boolean isBalanced;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(node==null){',
' return node;',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)-1;',
' boolean isBalanced=true;',
' if(leftInfo.isBalanced&&rightInfo.isBalanced&&Math.abs(leftInfo.height-rightInfo.height)<2){',
' isBalanced=false;',
' }',
' return new Info(isBalanced,height);',
'}'
]}
],
'题目2':[
'给定一棵二叉树的头节点head,任何两个节点之前都存在距离',
'返回整棵二叉树的最大距离',
{'思路':[
{'1.与头节点无关':[
'max(左侧的最大距离,右侧的最大距离)',
]},
{'2.与头节点有头':[
'左树高+右树高+1'
]}
]},
{'实现':[
'Class Info(){',
' int maxDistance;',
' int height;',
'}',
'---------------------',
'Info process(Node head){',
' if(head==null){',
' return new Info(0,0);',
' }',
' Info leftInfo=process(head.left);',
' Info rightInfo=process(head.right);',
' int height=Math.max(leftInfo.height,rightInfo.height)+1;',
' int maxDistance=Math.max(',
' Math.max(leftInfo.maxDistance,rightInfo.maxDistance),',
' leftInfo.height+rightInfo.height+1)',
' return new Info(maxDistance,height);',
'}'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") |
3,804 | 9f479ad2acf4f6deb0ca4db606c3d804979c10bd | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.policies.gaussian_rbf_policy import GaussianRBFPolicy
from rllab.policies.gaussian_hmlp_policy import GaussianHMLPPolicy
from rllab.policies.gaussian_hlc_policy import GaussianHLCPolicy
import numpy as np
import joblib
def run_task(*_):
env = normalize(GymEnv("DartWalker2d-v1", record_video=False))
policy_sep = GaussianHLCPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(64,32),
sub_out_dim=3,
option_dim=2,
#init_std=0.1,
)
policy_sep = joblib.load('data/local/experiment/Walker2d_hlc_2/policy_0.pkl')
'''# copy parameter from integrated controller to separate controller
hrl_pol_param = policy_int._mean_network.get_params()
hlc_param = policy_sep._mean_network.get_params()
llc_param = policy_sep._lowlevelnetwork.get_params()
for param in hlc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))
for param in llc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))'''
baseline = LinearFeatureBaseline(env_spec=env.spec)
'''o = np.random.random(17)*0
o[0]=1.25
a, ainfo = policy_int.get_action(o)
a2, a2info = policy_sep.get_action(o)
action1 = ainfo['mean']
action2 = policy_sep.lowlevel_action(o, a2)
print(action1)
print(action2)
abc'''
algo2 = TRPO(
env=env,
policy=policy_sep,
baseline=baseline,
batch_size=15000,
max_path_length=env.horizon,
n_itr=200,
discount=0.99,
step_size=0.01,
epopt_epsilon = 1.0,
epopt_after_iter = 0,
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo2.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=2,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
exp_name='Walker2d_hlc_cont',
# plot=True
)
|
3,805 | e807cef534226f3efb4a8df471598727fa068f02 | # -*- python -*-
# ex: set syntax=python:
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# See master.experimental/slaves.cfg for documentation.
slaves = [
################################################################################
# Linux
################################################################################
# {
# 'master': 'Chromium',
# 'hostname': 'build59-m1',
# 'builder': 'Linux Builder x64',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'vm119-m1',
# 'builder': 'Linux Tests x64',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# {
# 'master': 'Chromium',
# 'builder': 'Linux (aura)',
# 'hostname': 'vm80-m1',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'build13-m1',
# 'builder': 'Linux Builder (dbg)',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'vm128-m1',
# 'builder': 'Linux Tests (dbg)(1)',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'hostname': 'vm129-m1',
# 'builder': 'Linux Tests (dbg)(2)',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '32',
# },
# {
# 'master': 'Chromium',
# 'builder': 'Linux Sync',
# 'hostname': 'vm121-m1',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# {
# 'master': 'Chromium',
# 'builder': 'Linux Clang (dbg)',
# 'hostname': 'vm79-m1',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
# ################################################################################
# # Android
# ################################################################################
# {
# 'master': 'Chromium',
# 'hostname': 'vm138-m1',
# 'builder': 'Android Builder',
# 'os': 'linux',
# 'version': 'lucid',
# 'bits': '64',
# },
]
|
3,806 | f561846c943013629e417d16f4dae77df43b25c4 | from flask_sqlalchemy import SQLAlchemy
from flask_security import UserMixin, RoleMixin
db = SQLAlchemy()
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __repr__(self):
return f'<Role {self.name}'
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(255))
current_login_ip = db.Column(db.String(255))
login_count = db.Column(db.Integer)
def __repr__(self):
return f'User {self.username}'
class Domain(db.Model):
id = db.Column(db.Integer, primary_key=True)
domain = db.Column(db.String, unique=True)
subdomain_search_ran = db.Column(db.Boolean(), nullable=False)
#subdomains = db.relationship('Subdomain', secondary='subdomain',
#backref=db.backref('subdomain', lazy='dynamic'))
def __repr__(self):
return f'<Domain {self.domain}>'
class Subdomain(db.Model):
id = db.Column(db.Integer, primary_key=True)
subdomain = db.Column(db.String, unique=True, nullable=False)
domain_id = db.Column(db.Integer, db.ForeignKey('domain.id'), nullable=False)
|
3,807 | a4697f0a0d0cc264b28a58bcc28528c221b4cb49 | import os
import datetime
from classifier import Classification
class PersistableClassificationModel(Classification):
"""
Classification classifier with ability to persist trained classifier on the disk.
"""
def __init__(self, output_dir, origin):
self.originModel = origin
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
self.path_to_persist = os.path.join(
output_dir,
'model-{0}.mdl'.format(datetime.datetime.now()).replace(":", "-"))
@property
def model(self):
return self.originModel.model
def persist(self):
"""
Persists original classifier to the file.
"""
self.originModel.model.save(self.path_to_persist)
return self
def build(self):
"""
Simply calls original classifier to build classifier.
"""
self.originModel.build()
return self
def train(self, training_set):
"""
Simply calls original classifier to train classifier.
"""
self.originModel.train(training_set)
return self
|
3,808 | 167c36627c7c3377266bde266e610792ba29b3e4 | import re
#lines = open("input.1").read()
lines = open("input.2").read()
lines = lines.splitlines()
moves = {}
moves["nw"] = [-1, -1]
moves["ne"] = [ 0, -1]
moves["w"] = [-1, 0]
moves["e"] = [ 1, 0]
moves["sw"] = [ 0, 1]
moves["se"] = [ 1, 1]
tiles = {}
def fliptile(tile):
if tile == "B":
tile = "W"
else:
tile = "B"
return tile
for line in lines:
regexp = "(e|w|nw|ne|sw|se)"
m = re.findall(regexp, line)
position = [0, 0]
for instruction in m:
position = [pos1 + pos2 for pos1, pos2 in zip(position, moves[instruction])]
try:
tiles[position[0], position[1]] = fliptile(tiles[position[0], position[1]])
except KeyError:
tiles[position[0], position[1]] = "B"
for key in tiles:
print str(key) + ": " + str(tiles[key])
print "Part 1: " + str(sum(value == "B" for value in tiles.values()))
def countblack(t, x, y):
b = 0
for m in moves.values():
try:
if t[x + m[0], y + m[1]] == "B":
#print "black at ",
#print [x + m[0], y + m[1]]
b += 1
else:
#print "white at ",
#print [x + m[0], y + m[1]]
pass
except KeyError: # non exisiting tiles start out as white
#print "white at ",
#print [x + m[0], y + m[1]]
pass
return b
def gol(t):
minx = min(t)[0] - 2
maxx = max(t)[0] + 2
miny = min(t)[1] - 2
maxy = max(t)[1] + 2
#print [minx, maxx, miny, maxy]
temptiles = t.copy()
for x in range(minx, maxx):
for y in range(miny, maxy):
black = countblack(t, x, y)
try:
if t[x, y] == "B" and (black == 0 or black > 2):
temptiles[x, y] = "W"
if t[x, y] == "W" and black == 2:
temptiles[x, y] = "B"
except KeyError:
if black == 2:
temptiles[x, y] = "B"
else:
temptiles[x, y] = "W"
#try:
# print "Tile (" + str(x) + "," + str(y) + ") was " + t[x, y] + " and becomes/remains " + temptiles[x, y] + " with " + str(black) + " black enighbours!"
#except KeyError:
# print "Tile (" + str(x) + "," + str(y) + ") was W and becomes/remains " + temptiles[x, y] + " with " + str(black) + " black enighbours!"
return temptiles
turns = 100
for turn in range(turns):
# hex game of life
tiles = gol(tiles)
print "Day " + str(turn + 1) + ": " + str(sum(value == "B" for value in tiles.values()))
|
3,809 | 1a8c9be389aad37a36630a962c20a0a36c449bdd | def func(i):
if(i % 2 != 0): return False
visited = [0,0,0,0,0,0,0,0,0,0]
temp = i
while(i):
x = i%10
if (visited[x] == 1) or (x == 0): break
visited[x] = 1;
i = (int)(i / 10);
if(i == 0):
for y in str(temp):
if(temp % int(y) != 0): return False
else: return False
return True
n,m = map(int, input().split())
print(sum([1 for i in range(n,m) if func(i)])) |
3,810 | dae8529aa58f1451d5acdd6607543c202c3c0c66 | ####
#Some more on variables
####
#Variables are easily redefined.
#Let's start simple.
x=2 #x is going to start at 2
print (x)
x=54 #we are redefining x to equal 54
print (x)
x= "Cheese" #x is now the string 'cheese'
print (x)
#Try running this program to see x
#printed at each point
#Clearly variables can be manipulated easily,
#this can make them very useful
|
3,811 | ae6cbb181e024b8c0b222d14120b910919f8cc81 | """Restaurant"""
def main():
"""Restaurant"""
moeny = int(input())
service = moeny*0.1
vat = moeny*0.07
print("Service Charge : %.2f Baht" %service)
print("VAT : %.2f Baht" %vat)
print("Total : %.2f Baht" %(moeny+vat+service))
main()
|
3,812 | e15524d7ae87cbf0b10c54ee0bdc613ba589c1a9 | from Cars import Bmw
from Cars import Audi
from Cars import Nissan
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print('In Sample.py........')
# Import classes from your brand new package
# Create an object of Bmw class & call its method
ModBMW = Bmw.Bmw()
ModBMW.outModels()
# Create an object of Audi class & call its method
ModAudi = Audi.Audi()
ModAudi.outModels()
# Create an object of Nissan class & call its method
ModNissan = Nissan.Nissan()
ModNissan.outModels() |
3,813 | 9081d0f75ac53ab8d0bafb39cd46a2fec8a5135f | from django import forms
from .models import Profile
class ImageForm(forms.ModelForm):
userimage = forms.ImageField(required=False, error_messages={'invalid':("Image file only")}, widget=forms.FileInput)
class Meta:
model = Profile
fields = ['userimage',]
|
3,814 | 9725c4bfea1215e2fb81c31cbb8948fd1656aca9 | from airbot import resolvers
from airbot import utils
import unittest
from grapher import App
import pprint
OPENID_CONFIG = {
'ISSUER_URL': 'https://dev-545796.oktapreview.com',
'CLIENT_ID': '0oafvba1nlTwOqPN40h7',
'REDIRECT_URI': 'http://locahost/implicit/callback'
}
class TestEndToEnd(unittest.TestCase) :
@classmethod
def get_claim(cls):
claim = utils.OpenidHelper.get_claim(OPENID_CONFIG, "moshir.mikael@gmail.com","Azerty1!")
return claim
def test_entity_api(self):
event = {
"identity": {"claims" : TestEndToEnd.get_claim()},
"field" : "createBot",
"path" : "Mutation/createBot",
"arguments" : {
"accountid" : "testaccount",
"input" : {
"name" : "mytestbot",
"description" :"test"
}
}
}
self.assertTrue(True)
b= App.handler(event,{})
print b
event = {
"identity": {"claims": TestEndToEnd.get_claim()},
"field": "createEntity",
"path": "Mutation/createEntity",
"arguments": {
"botid": b["ID"],
"input": {
"name": "mytestbot",
"description": "test"
}
}
}
w= App.handler(event,{})
print w
event = {
"identity": {"claims": TestEndToEnd.get_claim()},
"field": "getEntity",
"path": "Query/getEntity",
"arguments": {
"entityid": w["ID"],
}
}
event = {
"identity": {"claims": TestEndToEnd.get_claim()},
"field": "updateEntity",
"path": "Mutation/updateEntity",
"arguments": {
"entityid": w["ID"],
"input": {
"tags" : "x,y,z"
}
}
}
u = App.handler(event, {})
print "U = ", u
event = {
"identity": {"claims": TestEndToEnd.get_claim()},
"field": "listEntities",
"path": "Query/listentities",
"arguments": {
"botid": b["ID"]
}
}
l = App.handler(event, {})
print "entities = ",l
event = {
"identity": {"claims": TestEndToEnd.get_claim()},
"field": "deleteEntity",
"path": "Mutation/deleteEntity",
"arguments": {
"entityid": w["ID"]
}
}
d = App.handler(event, {})
print "D = ", d
if __name__ == "__main__" :
unittest.main(verbosity=2)
|
3,815 | aea92827753e12d2dc95d63ddd0fe4eb8ced5d14 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
#tf.config.allow_growth = True
#config.gpu_options.allow_growth = True
#session = tf.Session(config=config....)
from tensorflow import keras
# In[5]:
data = keras.datasets.fashion_mnist
(train_X, train_y), (test_X,test_y) = data.load_data()
class_names = ['t-shirt', 'trouser', 'pullover', 'dress'
,'coat', 'sandal', 'shirt', 'sneaker'
, 'bag', 'ankle boot']
train_X = train_X/255
test_X = test_X/255
# In[7]:
plt.imshow(train_X[7], cmap= 'binary')
# In[ ]:
def convolve(image,fltr):
r_p = 0
c_p = 0
conv_list = []
while (r_p+1) <= image.shape[0]-1 :
while (c_p+1) <= image.shape[1]-1 :
x = np.sum(np.multiply(image[r_p : r_p+2 , c_p : c_p+2],fltr))
conv_list.append(x)
c_p += 1
r_p += 1
c_p = 0
return conv_list
img_matrix = np.array(train.iloc[6,1:]).reshape(28,28)
flt = np.matrix([[1,1],[0,0]])
conv = np.array(convolve(img_matrix,flt)).reshape(27,27)
plt.imshow(img_matrix, cmap='gray')
plt.show()
plt.imshow(conv, cmap='gray')
plt.show()
# In[33]:
with tf.device('GPU:0'):
model = keras.Sequential([
#keras.layers.Conv2D(filters=32 ,kernel_size=3, activation='relu',input_shape=(28,28,1)),
keras.layers.Flatten(input_shape=(28,28)),
#keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(2560, activation='relu'),
keras.layers.Dense(2560, activation='relu'),
#keras.layers.Dense(2560, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
import time
tic = time.time()
from warnings import filterwarnings
filterwarnings
model.fit(train_X, train_y,batch_size=1024, epochs=3)
toc = time.time()
print('time : {:0.1f} sec '.format(toc-tic))
# In[72]:
#predictions
train_loss, train_accuracy = model.evaluate(train_X, train_y,verbose=False )
test_loss, test_accuracy = model.evaluate(test_X, test_y, verbose = False )
# In[73]:
print('trin_accuracy : {}'.format(train_accuracy))
print('test_accuracy : {}'.format(test_accuracy))
# In[74]:
predictions = model.predict(test_X)
# In[76]:
plt.imshow(test_X[26], cmap='binary')
plt.title(class_names[test_y[26]])
|
3,816 | e03290746d6520fde63836e917f6af0c76596704 | # find the 12-digit number formed by concatenating a series of 3 4-digit
# numbers who are permutations of each other and are all prime
from itertools import permutations, dropwhile
from pe_utils import prime_sieve
prime_set = set(prime_sieve(10000))
def perm(n, inc):
perm_set = set(map(lambda x: int("".join(x)), permutations(str(n))))
perms = (n, n + inc, n + inc*2)
if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):
return None
else:
return perms
primes = dropwhile(lambda x: x < 1000, prime_sieve(3333))
primes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))
primes = list(map(lambda x: x[0] * 10**8 + x[1] * 10**4 + x[2], primes))
print(primes)
|
3,817 | 8ce2e9cd9ceed6c79a85682b8bc03a3ffb5131c4 | """
This module provides an optimizer class that is based on an evolution
strategy algorithm.
"""
import copy, random, math
from time import time
from xml.dom import minidom
from extra.schedule import Schedule
from extra.printer import pprint, BLUE
class Optimizer(object):
"""
This class is the implementation of the evolution strategy to optimize
and evaluate schedules.
"""
def __init__(self, plant, orderList, simulator, evaluator):
"""
plant - the plant to run the simulation and evaluation on
orderList - the list of orders in the given schedule
simulator - Simulator instance to run a schedule
evaluator - Evaluator instance to evaluate a schedule
"""
assert plant != None
assert orderList != None
self.plant = plant
self.orderList = orderList
self.simulator = simulator
self.evaluator = evaluator
# used for benchmarking
self.simulatorTime = 0
# enable/disable console output
self.printing = True
# parameters for the evolution strategy algorithm
self.populationSize = 0
self.indivMutationRate = 0
self.selectionRate = 0
self.mutationRange = 0
self.iterations = 0
@staticmethod
def fromXml(xmlDoc, plant, orderList, simulator, evaluator):
"""
Loads the optimizer configuration and parameters from an XML tree.
"""
optimizer = Optimizer(plant, orderList, simulator, evaluator)
element = xmlDoc.getElementsByTagName("optimizer")
# there should only be 1 optimizer node in the XML tree!
assert len(element) == 1
element = element[0]
# load the different attributes
optimizer.populationSize = \
int(element.getAttribute("populationSize"))
optimizer.mutationRange = \
int(element.getAttribute("mutationRange"))
optimizer.iterations = \
int(element.getAttribute("iterations"))
optimizer.indivMutationRate = \
float(element.getAttribute("indivMutationRate"))
optimizer.selectionRate = \
float(element.getAttribute("selectionRate"))
return optimizer
@staticmethod
def fromXmlFile(filename, plant, orderList, simulator, evaluator):
"""
Loads the optimizer configuration and parameters from an XML tree.
"""
file = open(filename, "r")
doc = minidom.parse(file)
optimizer = Optimizer.fromXml(doc, plant, orderList, simulator, evaluator)
file.close()
return optimizer
def run(self, initialPopulation = None):
"""
Entry point of the evolution strategy algorithm.
"""
pprint("OPT calculating initial population...", BLUE, self.printing)
if initialPopulation == None:
# if we don't get an initial set of schedules as the initial population,
# then we need to generate one.
population = self.initialPopulation()
else:
# if we do get an initial population as input, then we just need to
# calculate the fitnesses of the schedules in it.
for p in initialPopulation:
self.calcIndividualFitness(p)
# if the population is too small or too large (less than or larger than
# self.populationSize) then this will fix that for us.
population = self.mutatePopulation(initialPopulation)
# go through the needed number of iterations and mutate the population
# everytime, this will keep the best individuals and will return the
# best population achieved at the end.
for i in range(self.iterations):
pprint("OPT iteration number %s" % (i + 1), BLUE, self.printing)
population = self.mutatePopulation(population)
return population
def calcIndividualFitness(self, indiv):
"""
Calculates fitness of a schedule.
"""
t = time()
self.simulator.simulate(indiv)
self.evaluator.evaluate(indiv)
t = time() - t
self.simulatorTime += t
def sortPopulation(self, population):
"""
Sorts the population based on fitness, to have the better individuals
at the beginning of the population list.
"""
population.sort(lambda a, b: cmp(b.fitness, a.fitness))
def mutatePopulation(self, population):
"""
Mutates a population. Selects the best n individuals (based on the
selectionRate) to mutate (maybe they'll give us even better individuals!).
After mutating an individual, it checks if we have an individual that is
similar to the mutated one, if so, then try to mutate again, otherwise,
we simply calculate its fitness and append it to the list. We then sort
the population based on fitness and return the best PopulationSize items.
"""
for i in range(int(math.ceil(self.selectionRate * len(population)))):
mutatedIndiv = self.mutateIndividual(population[i])
while self.isIndividualInPopulation(mutatedIndiv, population) == True:
mutatedIndiv = self.mutateIndividual(population[i])
self.calcIndividualFitness(mutatedIndiv)
population.append(mutatedIndiv)
self.sortPopulation(population)
return population[:self.populationSize]
def isIndividualInPopulation(self, individual, population):
"""
Checks if an individual is in a population.
"""
for i in population:
if i == individual:
return True
return False
def initialPopulation(self):
"""
Generates an initial population.
"""
population = []
# generate an initial individual, calculate its fitness and add it to our
# new population
initIndiv = self.initialIndividual()
self.calcIndividualFitness(initIndiv)
population.append(initIndiv)
# until we have filled the population
for i in range(self.populationSize):
# keep mutating the initial individual to get new ones
mutatedIndiv = self.mutateIndividual(initIndiv)
# if that new individual is in the population, don't add it, try
# getting a new one
while self.isIndividualInPopulation(mutatedIndiv, population) == True:
mutatedIndiv = self.mutateIndividual(initIndiv)
self.calcIndividualFitness(mutatedIndiv)
population.append(mutatedIndiv)
self.sortPopulation(population)
return population
def mutateIndividual(self, originalIndiv):
"""
Gets an individual and returns a mutation of it.
"""
# we need to deepcopy the schedule object
newIndiv = copy.deepcopy(originalIndiv)
# emtpy its schedule (we don't need it since it will be generated from the
# new start times using the simulator
newIndiv.schedule = []
# same for the finish times
newIndiv.finishTimes = []
indivLen = len(newIndiv.startTimes)
# the plant-entrance times in the schedule should be equal to the number
# of orders! otherwise, something is wrong!
assert indivLen == len(self.orderList.orders)
indexes = range(indivLen)
# for n times (based on the individual mutation rate), mutate a random
# order plant-entrance time that we didn't mutate before.
for i in range(int(self.indivMutationRate * indivLen)):
index = int(random.uniform(0, len(indexes)))
newIndiv.startTimes[indexes[index]][2] = \
self.mutateGene(newIndiv.startTimes[indexes[index]][2])
del indexes[index]
return newIndiv
def mutateGene(self, value):
"""
Gets a value and returns a mutation of it based on the mutation range.
"""
addent = int(random.uniform(0, self.mutationRange))
if (random.uniform(0, 1) < 0.5):
addent = -addent
return max(0, value + addent)
def initialIndividual(self):
"""
Generates an initial individual based on order deadlines - minimum
processing time. Account whether an order has a current machine and
current overtime.
"""
indiv = Schedule()
for o in self.orderList.orders:
if o.currentMachine == "":
minProcTime = o.recipe.calcMinProcTime(self.plant)
machineName = o.recipe.recipe[0][0]
else:
machineName = o.currentMachine
minProcTime = o.recipe.calcMinProcTime(self.plant, o.currentMachine)
indiv.startTimes.append(
[o, str(machineName), max(0, o.deadline - minProcTime)])
return indiv
|
3,818 | f379092cefe83a0a449789fbc09af490081b00a4 | from igbot import InstaBot
from settings import username, pw
from sys import argv
def execute_script(InstaBot):
InstaBot.get_unfollowers()
#InstaBot.unfollow()
#InstaBot.follow()
#InstaBot.remove_followers()
def isheadless():
if len(argv) > 1:
if argv[1] == 'head':
return False
else:
raise ValueError("optional arg must be : 'head'")
return True
if __name__ == '__main__':
bot = None
headless = isheadless()
if headless:
bot = InstaBot(username, pw, True)
else:
bot = InstaBot(username, pw)
if bot.legal:
execute_script(bot)
bot.close_session() |
3,819 | 2b8b502381e35ef8e56bc150114a8a4831782c5a | class Solution(object):
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
start = 0
end = 0
length = len(seats)
max_distance = 0
for i in range(len(seats)):
seat = seats[i]
if seat == 1:
if start == 0 or end == length - 1:
max_distance = max(max_distance, end - start + 1)
else:
max_distance = max(max_distance, (end - start + 1) / 2 + (end - start + 1) % 2)
if i + 1 < length:
start = end = i + 1
else:
end = i
if start == 0 or end == length - 1:
max_distance = max(max_distance, end - start + 1)
else:
max_distance = max(max_distance, (end - start + 1) / 2 + (end - start + 1) % 2)
return max_distance |
3,820 | a5559ff22776dee133f5398bae573f515efb8484 | # MINISTを読み込んでレイヤーAPIでCNNを構築するファイル
import tensorflow as tf
import numpy as np
import os
import tensorflow as tf
import glob
import numpy as np
import config as cf
from data_loader import DataLoader
from PIL import Image
from matplotlib import pylab as plt
dl = DataLoader(phase='Train', shuffle=True)
X_data , y_data = dl.shuffle_and_get()
# dl_test = DataLoader(phase='Test', shuffle=True)
X_data = np.reshape(X_data,[-1,cf.Height, cf.Width])
# plt.imshow(X_data[0])
# test_imgs, test_gts = dl_test.get_minibatch(shuffle=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list="0"
# def load_img():
# import cv2
# img = cv2.imread("test.jpg").astype(np.float32)
# img = cv2.resize(img, (cf.Width, cf.Height,1))
# img = img[:,:,(2,1,0)]
# img = img[np.newaxis, :]
# img = img / 255.
# return img
# with tf.Session(config=config) as sess:
# saver = tf.train.Saver()
# saver.restore(sess, "out.ckpt")
# img = load_img()
# pred = logits.eval(feed_dict={X: img, keep_prob: 1.0})[0]
# pred_label = np.argmax(pred)
# print(pred_label)
# X_data = dataset['train_img']
# y_data = dataset['train_label']
# print('Rows: %d, Columns: %d' % (X_data.shape[0], X_data.shape[1]))
# X_test =dataset['test_img']
# y_test =dataset['test_label']
# print('Rows: %d, Columns: %d' % (X_test.shape[0], X_test.shape[1]))
# X_train, y_train = X_data[:50000,:], y_data[:50000]
# X_valid, y_valid = X_data[50000:,:], y_data[50000:]
# print('Training: ', X_train.shape, y_train.shape)
# print('Validation: ', X_valid.shape, y_valid.shape)
# print('Test Set: ', X_test.shape, y_test.shape)
|
3,821 | 3acbb37809462ee69ff8792b4ad86b31dba5d630 | #!/usr/bin/env python2.7
from __future__ import print_function, division
import numpy as np
import matplotlib
import os
#checks if there is a display to use.
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import dtk
import sys
import time
import numpy.random
from matplotlib.colors import LogNorm
from scipy.optimize import minimize
from calc_ngal import *
from generate_parameter_dist import *
from zmr import ZMR
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], })
rc('font', size=18)
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
load_clusters._cache = {}
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR("output/"+param_fname+"/zmr_lkhd_cores.param")
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal() # only one z-bin, so we don't select it out
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = "figs/"+param_fname+"/calc_likelihood_bounds.py/grid_fit_param.txt"
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10**model_fit['mi']
if 'rd' in model_fit:
# print(model_fit['rd'])
r_disrupt = model_fit['rd']/1000.0 #convert to mpc/h from kpc/h
else:
r_disrupt = np.inf
# print("\ncalculating ngal for ", param_fname)
# print("\tmodel_fit_fname:", model_fit_fname)
# print("\tmodel params: {:.2e} {:.3f}".format(m_infall, r_disrupt))
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins)-1)
ngal_err = np.zeros(len(m_bins)-1)
ngal_std = np.zeros(len(m_bins)-1)
for i in range(0, len(m_bins)-1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i]/np.sqrt(np.sum(slct))
# print("{:.2e}->{:.2e}: {}".format(m_bins[i], m_bins[i+1], np.sum(slct)))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label='Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal-zmr_core_ngal_err, zmr_core_ngal+zmr_core_ngal_err, color=color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor='None'
markeredgecolor=color
xaxis_offset=offset_amount
lw = 1
else:
markerfacecolor=color
markeredgecolor='None'
xaxis_offset=1./offset_amount
lw = 2
# remove problematic 2.5 L* low mass cluster in the spider sample
if "mstar-1" in param_fname and "spider" in param_fname:
print("SPIDERSS!: ", zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1 ] = np.nan
plt.errorbar(dtk.bins_avg(m_bins)*xaxis_offset, zmr_sdss_ngal,
yerr=zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
# plt.fill_between(dtk.bins_avg(m_bins), ngal_mean-ngal_err, ngal_mean+ngal_err, color=color, alpha=0.3)
plt.yscale('log')
plt.xscale('log')
# plt.legend(loc='best')
def format_plot():
p4 = plt.plot([],[], 'tab:purple', lw=5, label=r'{:1.2f}~L$_*$'.format(0.4))
p3 = plt.plot([],[], 'tab:red', lw=5, label=r'{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([],[], 'tab:green', lw=5, label=r'{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([],[], 'tab:orange',lw=5, label=r'{:1.2f}~L$_*$'.format(1.58))
p1 = plt.plot([],[], 'tab:blue',lw=5, label=r'{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label="redMaPPer", capsize=0)
plt.plot([], [], color='k', label="Core Model")
# plt.errorbar([], [], yerr=[], fmt='o', lw=1, color='k', markerfacecolor='none', label='SPIDERS clusters', capsize=0)
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel(r'M$_{200c}$ [h$^{-1}$ M$_\odot$]')
plt.ylabel(r'Projected N$_{\rm{gal}}$')
plt.ylim([1e-1, 3e3])
plt.xlim([1e14, 5e15])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit("params/cfn/simet/mstar1/mean/a3_rd.param", None, 'c')
get_ngal_fit("params/cfn/simet/mstar0.5/mean/a3_rd.param", None, 'g')
get_ngal_fit("params/cfn/simet/mstar0/mean/a3_rd.param", None, 'b')
get_ngal_fit("params/cfn/simet/mstar-1/mean/a3_rd.param", None, 'r')
#just spider points
get_ngal_fit("params/cfn/spider/mstar1/mean/spider_rd.param", None, 'c', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar0.5/mean/spider_rd.param", None, 'g', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar0/mean/spider_rd.param", None, 'b', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar-1/mean/spider_rd.param", None, 'r', plot_fit=False, spider=True)
# get_ngal_fit("params/cfn/spider/mstar0/mean/spider_rd.param", None, 'm', plot_fit=False, spider=True)
# get_ngal_fit("params/cfn/spider/mstar0/mean/bcg_rd.param", None, 'c', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace("${mstarval}", mstar), None, color)
format_plot()
if __name__ == "__main__":
if len(sys.argv) > 2:
plot_name = sys.argv[1]
else:
plot_name = "OR_McClintock2019"
mstars = ['-1', '-0.5', '0', '0.5', '1']
if plot_name == "OR_Simet2017":
pattern = 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
plot_ngal_fits2(pattern, mstars)
elif plot_name == "OR_McClintock2019":
pattern = 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
plot_ngal_fits2(pattern, mstars)
# plot_ngal_fits()
dtk.save_figs("figs/"+__file__+"/"+plot_name+"/", extension='.pdf')
plt.show()
|
3,822 | 2ba5cb1265090b42b9a4838b792a3e81b209ba1a | import unittest
import A1
import part_manager
import security
class test_A1(unittest.TestCase):
# ----------------------------------- set up the mock data for test cases -----------------------------------
def setUp(self):
self.security1 = security.Security("XXX-1234-ABCD-1234", None)
self.security2 = security.Security(None, "kkklas8882kk23nllfjj88290")
self.security3 = security.Security("XXX-1234-ABCD-1234", "kkklas8882kk23nllfjj88290")
self.part_check1 = part_manager.Part_Manager("1233", "2")
self.part_check2 = part_manager.Part_Manager(None, "5")
self.part_check3 = part_manager.Part_Manager("2222", None)
self.delivery1 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", "Halifax", "NS", "B3J2K9")
self.delivery2 = part_manager.DeliveryAddress(None, "South Park St", "Halifax", "NS", "B3J2K9")
self.delivery3 = part_manager.DeliveryAddress("Mr. Jadeja", None, "Halifax", "NS", "B3J2K9")
self.delivery4 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", None, "NS", "B3J2K9")
self.delivery5 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", "Halifax", None, "B3J2K9")
self.delivery6 = part_manager.DeliveryAddress("Mr. Jadeja", "South Park St", "Halifax", "NS", None)
self.auth1 = security.Security("FAKEDEALER", "FAKEACCEESKEY")
self.auth2 = security.Security("XXX-1111-ABCD-1111", "abcd123wxyz456qwerty78901")
self.auth3 = security.Security("XXX-2222-ABCD-2222", "kkklas8882kk23nllfjj88292")
self.part_status1 = part_manager.Part_Manager(["1234", "1111", "2222", "3333", "4444", "fake_part_number"],
["1","2","3","4","5","6"])
# ----------------------------------- Class: Security -----------------------------------
# -----------------------------------------------------------------------------------------
# ------------------------------ Method: validate_dealer -----------------------------
def test_dealerCheck(self):
self.assertEqual(self.security1.validate_dealer(), "Invalid Input XML Response Error: in Dealer Access Key")
self.assertEqual(self.security2.validate_dealer(), "Invalid Input XML Response Error: in Dealer Id")
self.assertEqual(self.security3.validate_dealer(), "Dealer details validated")
# ------------------------------ Method: isDealerAuthorized ---------------------------
def test_dealer_auth(self):
self.assertEqual(self.auth1.isDealerAuthorized(), "dealer not authorized.")
self.assertEqual(self.auth2.isDealerAuthorized(), "dealer not authorized.")
self.assertEqual(self.auth3.isDealerAuthorized(), "dealer authenticated")
# ----------------------------------- Class: part_manager --------------------------------
# ------------------------------------------------------------------------------------------
# ------------------------------ Method: validate_parts -------------------------------
def test_partsCheck(self):
self.assertEqual(self.part_check1.validate_parts(), "Part Number and Quantity are good.")
self.assertEqual(self.part_check2.validate_parts(), "Invalid Input XML Response: Error in Part number")
self.assertEqual(self.part_check3.validate_parts(), "Invalid Input XML Response: Error in Quantity")
# ------------------------------ Method: validate_delivery ----------------------------
def test_delivery(self):
self.assertEqual(self.delivery1.validate_delivery(), "Delivery Details are good")
self.assertEqual(self.delivery2.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery3.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery4.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery5.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(self.delivery6.validate_delivery(), "Invalid Input XML Response: Error in Delivery Details")
# ------------------------------ Method: SubmitPartForManufactureAndDelivery -----------
def test_part_status_check(self):
self.assertEqual(self.part_status1.SubmitPartForManufactureAndDelivery(),
['success', 'out of stock', 'no longer manufactured', 'invalid part', 'success', 'Invalid Part'])
# ----------------------------------- Class: A1 -------------------------------------------
# -------------------------------------------------------------------------------------------
# ------------------------------ Method: main_function ---------------------------------
def test_main_function(self):
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Dealer is authorized, check the response in output.xml")
self.assertEqual(A1.main_function([None, 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],
['2', '25']), "Invalid Input XML Response Error: in Dealer Id")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', None], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response Error: in Dealer Access Key")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], [None, '35 Streetname', 'Halifax', 'NS', 'B2T1A4'], ['1234', '5678'],
['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', None, 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', None, 'NS', 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', None, 'B2T1A4'],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', None],
['1234', '5678'], ['2', '25']), "Invalid Input XML Response: Error in Delivery Details")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
["0000", '5678'], ['2', '25']), "Dealer is authorized, check the response in output.xml")
self.assertEqual(A1.main_function(['XXX-1234-ABCD-1234', 'kkklas8882kk23nllfjj88290'], ['Mrs. Jane Smith', '35 Streetname', 'Halifax', 'NS', 'B2T1A4'],
['1234', '5678'], ['0', '25']), "Invalid Input XML Response: Error in Quantity")
if __name__ == '__main__':
unittest.main()
|
3,823 | 726aaa0ef129f950e6da6701bb20e893d2f7373b | import os
import numpy as np
from argparse import ArgumentParser
from tqdm import tqdm
from models.networks import Perceptron
from data.perceptron_dataset import Dataset, batchify
from utils.utils import L1Loss, plot_line
from modules.perceptron_trainer import Trainer
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--name', type=str, default='test')
parser.add_argument('--input_dim', type=int, default=2)
parser.add_argument('--output_dim', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--lr', type=int, default=0.1)
parser.add_argument('--checkpoints_dir', type=str, default='../saves')
args = parser.parse_args()
input = np.array([[1, 1], [-1, -1], [0, 0.5], [0.1, 0.5], [0.2, 0.2], [0.9, 0.5]])
targets = np.array([1, -1, -1, -1, 1, 1])
args.train_data = Dataset(input, targets)
args.val_data = None
args.mode = 'numpy'
trainer = Trainer(args)
for i, epoch in enumerate(range(1, args.epochs)):
result = trainer.run_epoch()
filename = os.path.join(trainer.save_dir, 'plot_%d.png'%(i+1))
plot_line(trainer.weights, filename)
print("Epochs: [%d]/[%d]"%(epoch, args.epochs))
error_count = result['error_count']
if error_count == 0:
print('No error')
print(trainer.weights)
break
|
3,824 | 65d08fe1a3f6e5cc2458209706307513d808bdb2 | #!/usr/bin/env python
import os
import sys
#from io import open
import googleapiclient.errors
import oauth2client
from googleapiclient.errors import HttpError
from . import auth
from . import lib
debug = lib.debug
# modified start
def get_youtube_handler():
"""Return the API Youtube object."""
options = {}
home = os.path.expanduser("~")
default_credentials = os.path.join(home, ".youtube-upload-credentials.json")
#client_secrets = options.client_secrets or os.path.join(home, ".client_secrets.json")
#credentials = options.credentials_file or default_credentials
client_secrets = os.path.join(home, ".client_secrets.json")
credentials = default_credentials
debug("Using client secrets: {0}".format(client_secrets))
debug("Using credentials file: {0}".format(credentials))
#get_code_callback = (auth.browser.get_code
#if options.auth_browser else auth.console.get_code)
get_code_callback = auth.browser.get_code
return auth.get_resource(client_secrets, credentials,
get_code_callback=get_code_callback)
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
def add_video_to_existing_playlist(youtube, playlist_id, video_id):
"""Add video to playlist (by identifier) and return the playlist ID."""
lib.debug("Adding video to playlist: {0}".format(playlist_id))
return youtube.playlistItems().insert(part="snippet", body={
"snippet": {
"playlistId": playlist_id,
"resourceId": {
"kind": "youtube#video",
"videoId": video_id,
}
}
}).execute()
def add_video_to_playlist(youtube, args, privacy="public"):
"""Add video to playlist (by title) and return the full response."""
video_id = args['video_id']
playlist_id = args['playlist_id']
print(video_id)
#print(type(args))
if playlist_id:
return add_video_to_existing_playlist(youtube, playlist_id, video_id)
else:
lib.debug("Error adding video to playlist")
def main(args):
#print(args)
args = args
#print(args)
youtube = get_youtube_handler()
try:
if youtube:
add_video_to_playlist(youtube, args)
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
print('Tag "%s" was added to video id "%s".' % (args.add_tag, args.video_id))
def run():
titles = [title.strip('\n') for title in open('update_playlist.txt', 'r')]
playlist_id = "PLANgBzSjRA6PD-hnW8--eK61w5GTtH_8e"
for title in titles:
#print(title.split('|||')[0])
aa_id = title.split('|||')[0]
new_test = {'video_id':aa_id,
'playlist_id':playlist_id
}
main(
new_test
)
# modified end
|
3,825 | 7531480f629c1b3d28210afac4ef84b06edcd420 | # coding=utf-8
# __author__ = 'lyl'
import json
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def read_json(filename):
"""
读取json格式的文件
:param filename: json文件的文件名
:return: [{}, {}, {}, {}, {},{} ......]
"""
return json.loads(open(filename).read())
def write_csv(filename, data_list):
"""
将python对象 [{}, {}. {}, {} ...] 写入到csv文件中
:param filename: 生成的csv文件名
:param data_list: [{}, {}. {}, {} ...]
:return: None
"""
with open(filename,'w') as f:
dict_writer = csv.DictWriter(f, data_list[0].keys())
dict_writer.writeheader()
dict_writer.writerows(data_list)
def write_csv2(filename, content_list):
"""
与 write_csv 类似
:param filename:
:param content_list:
:return:
"""
with open(filename, 'w') as f:
csv_writer = csv.writer(f)
head_list = content_list[0].keys()
data_list = [content.values() for content in content_list]
csv_writer.writerow(head_list)
csv_writer.writerows(data_list)
if __name__ == "__main__":
# 读出json数据内容
content_list = read_json('lagou_info_lin3.json')
# 将数据写入到csv文件
write_csv( "lagou_info_lin3.csv", content_list) |
3,826 | 68c9944c788b9976660384e5d1cd0a736c4cd0e6 | import drawSvg
import noise
import random
import math
import numpy as np
sizex = 950
sizey = 500
noisescale = 400
persistence = 0.5
lacunarity = 2
seed = random.randint(0, 100)
actorsnum = 1000
stepsnum = 50
steplenght = 2
noisemap = np.zeros((sizex, sizey))
for i in range(sizex):
for j in range(sizey):
noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale, octaves=2, persistence=persistence,
lacunarity=lacunarity, repeatx=1024, repeaty=1024, base=seed)
map_max = np.max(noisemap)
map_min = np.min(noisemap)
map_range = map_max - map_min
for i in range(sizex):
for j in range(sizey):
k = noisemap[i][j]
k = (k - map_min)/map_range
noisemap[i][j] = k
map_max = np.max(noisemap)
map_min = np.min(noisemap)
def getnoise(x, y):
return noisemap[math.floor(x)][math.floor(y)]
class Actor:
def __init__(self):
self.x = random.random() * sizex
self.y = random.random() * sizey
self.xn = self.x
self.yn = self.y
def step(self):
t = getnoise(self.x, self.y) * 5 * math.pi
self.x = self.xn
self.y = self.yn
self.xn += steplenght * math.cos(t)
self.yn += steplenght * math.sin(t)
if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:
return None
return self.xn, self.yn, self.x, self.y
canvas = drawSvg.Drawing(sizex, sizey, displayInline='False')
actors = []
for a in range(actorsnum):
n = Actor()
actors.append(n)
for s in range(stepsnum):
for a in actors:
p = a.step()
if p:
canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke='black', stroke_width=1))
else:
actors.remove(a)
canvas.saveSvg('test.svg')
|
3,827 | d71ffd022d87aa547b2a379f4c92d767b91212fd | from channels.db import database_sync_to_async
from django.db.models import Q
from rest_framework.generics import get_object_or_404
from main.models import UserClient
from main.services import MainService
from .models import Message, RoomGroup, UsersRoomGroup
class AsyncChatService:
@staticmethod
@database_sync_to_async
def get_group_by_id(room_id):
try:
return RoomGroup.objects.get(room_id=room_id)
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_room_open(room_id: int):
try:
return RoomGroup.objects.get(room_id=room_id).status
except RoomGroup.DoesNotExist:
return None
@staticmethod
@database_sync_to_async
def is_user_in_room(room_id, user):
return UsersRoomGroup.objects.filter(Q(user=user) & Q(room_group__room_id=room_id)).exists()
@staticmethod
@database_sync_to_async
def save_chat_message(message, user, room):
return Message.objects.create(author=user, room_group=room, message=message)
class ChatService:
@staticmethod
def is_room_exists(room_id: int) -> bool:
return RoomGroup.objects.filter(id=room_id).exists()
@staticmethod
def create_users_room(**data) -> RoomGroup:
room = RoomGroup.objects.create(room_id=data.get('room_id'))
room.add_users([data.get('asker_id'), data.get('expert_id')])
return room
@staticmethod
def get_group_by_id(room_id: int):
return get_object_or_404(RoomGroup, room_id=room_id)
@staticmethod
def socket_chat_created(data: dict) -> None:
message = f"""<div><b>Question:</b>{data.get('message')}</div>"""
author = MainService.get_user_client(data.get('asker_id'))
room = ChatService.get_group_by_id(data.get('room_id'))
ChatService.save_chat_message(user=author, message=message, room=room, is_system=True)
@staticmethod
def save_chat_message(message: str, user: UserClient, room: RoomGroup, is_system: bool) -> Message:
return Message.objects.create(author=user, room_group=room, message=message, is_system=is_system)
|
3,828 | 08ed57ffb7a83973059d62f686f77b1bea136fbd | from flask import Flask, request, render_template, redirect
from stories import Story, stories
# from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
# app.config['SECRET_KEY'] = "secret"
# debug = DebugToolbarExtension(app)
# my original approach involved using a global story variable to store the instances which were in this file
# After looking at the answer code, storing this data in the instance maskes more sense
# story_global = None
@app.route('/')
def home_page():
"""Offer user choice of Madlib Games"""
return render_template('index.html', stories=stories.values())
@app.route('/form')
def show_form():
"""Show Form for User Input"""
story_title = request.args["madlib"]
for story in stories.values():
if story.title == story_title:
story_for_form = story
return render_template('form.html', s=story_for_form, story_title=story_title)
@app.route("/story")
def show_story():
"""Display Madlib Story"""
answers = request.args
story_title = request.args["story_title"]
for story in stories.values():
if story.title == story_title:
story_to_gen = story
return render_template("story.html", story_to_gen=story_to_gen, user_answers=answers)
@app.route('/play-again')
def play_again():
"""Redirect Home"""
return redirect('/')
|
3,829 | 6336b31e51f0565c6b34ab5148645748fe899541 | import copy
import pandas as pd
import numpy as np
from pandas import DataFrame
from collections import Counter
from sklearn.metrics import roc_auc_score, roc_curve
from statsmodels.stats.outliers_influence import variance_inflation_factor
class Get_res_DataFrame:
'''
sheet1:数据概况
sheet2:变量的大小,效果,相关性 ok
sheet3:分箱结果及woe ok
sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks
通过输入不同的df来返回不同的df分析
ins,oot,oot2 第一个函数
新老客区分 第一个函数 输入df_new, df_old, type_train
月份区分 第一个函数 输入df_new , df_old, month
'''
def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train='type_train', y='is_7_p'):
self.df = df
self.df_bin = df_bin
self.df_woe = df_woe
self.use_lst = use_lst
self.woe_dic = woe_dic
self.type_train = type_train
self.model = lr
self.y = y
def main(self):
print('d2_1 = self.get_2_1_imp()',#依次放好,
'd2_2 = self.get_2_2_des()',
'd2_3 = self.get_2_3_corr()',
'''d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个
#return d2_1, d2_2, d2_3, d3
#df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr
def get_2_1_imp(self, df):
d1 = DataFrame(index=self.use_lst)
cover_dic = dict(df[use_lst].notnull().sum())
d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst]
#d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst]
d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst]
d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst]
d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst]
d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]
#d1['importance'] = self.model.feature_importances_
d1 = d1.reset_index()
d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]
d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))]
#d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst]
#d2_1 = d1
d1.index = range(1, d1.shape[0]+1)
return d1
#df, use_lst, type_train
def get_2_2_des(self):
df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]
df_data_des = df[self.use_lst].describe().T
cover_dic = dict(df[use_lst].notnull().sum())
df_data_des = df_data_des.reset_index()
df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4))
df_data_des.index = df_data_des['index']
df_data_des.drop(columns=['index', 'count'], inplace=True)
d2_2 = df_data_des.reset_index()
d2_2.index = range(1, d2_2.shape[0]+1)
return d2_2
#df_woe, use_lst
def get_2_3_corr(self):
corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)
d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self.use_lst).reset_index()
d2_3.index = range(1, d2_3.shape[0]+1)
return d2_3
#df_bin, use_lst, #type_lst#, type_train, woe_dic
def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):
res = []
for loc, i in enumerate(type_lst):
lst = []
df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)]
for name in self.use_lst:
#ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap'])
#while len(ks_lst) > df_tmp.shape[0]:
# ks_lst.pop()
#while len(ks_lst) < df_tmp.shape[0]:
# ks_lst.append(0)
#print(ks_lst)
dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]
dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count']
dd_tmp = dd_tmp.reset_index()
dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x])
dd_tmp.sort_values(by='bad_rate', inplace=True)
dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]]
#print(dd_tmp)
dd_tmp.sort_values(by='sort_key', inplace=True)
dd_tmp.drop(columns=['sort_key'], inplace=True)
name1 = '-'
d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'],
data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:],
index=[[name]]+['-']*dd_tmp.shape[0])
if loc < 1:
split_name = '<-->'+str(i)
else:
split_name = str(type_lst[loc-1])+'<-->'+str(i)
d[split_name] = [split_name for i in range(d.shape[0])]
d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]]
lst.append(d)
res.append(lst)
return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1)
#按照类别做DataFrame
def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'):
df_tmp = copy.deepcopy(df[[cate, self.y, y]])
df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True)
cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10)))
#np.percentile出来的是np.array格式
cut_line[0] = -float('inf')
cut_line.append(float('inf'))
df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)
df_tmp['count'] = [1 for i in range(df_tmp.shape[0])]
#print(df_tmp)
ks_lst = []
for i in sorted(Counter(df_tmp['category']).keys()):
#print(df_tmp[df_tmp['category']==i].shape)
lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap'])
#print(lst)
while len(lst) < 10:
lst = [0]+lst
ks_lst.extend(lst)
df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]
df = df.reset_index()
df['bad_rate'] = df['bad']/df['count']
df['ks'] = ks_lst
#print(df)
for i in ['bad', 'count', 'bad_rate', 'ks']:
df[i] = df[i].astype(float)
#df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float)
#df = df.astype(str)
df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad', 'count', 'bad_rate', 'ks']].fillna(0)
#添加几行用来画画
#
#n = len(Counter(df_tmp[cate]))
#length = df.shape[0]//n
#for i in range(n):
#
#df[:length]
#print(df)
#
df.index = range(1, df.shape[0]+1)
return df
def ks_calc_cross(self,data,pred,y_label):
'''
功能: 计算KS值,输出对应分割点和累计分布函数曲线图
输入值:
data: 二维数组或dataframe,包括模型得分和真实的标签
pred: 一维数组或series,代表模型得分(一般为预测正类的概率)
y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})
输出值:
'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap
'''
crossfreq = pd.crosstab(data[pred],data[y_label])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]
return ks,crossdens
def cal_iv(self,df1, x, y='is_7_p'):
df = copy.deepcopy(df1)
if 'count' not in df.columns:
df['count'] = [1 for i in range(df.shape[0])]
df_tmp = df[[x,'count', y]].groupby(x).sum()
df_tmp['good'] = df_tmp['count'] - df_tmp[y]
df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 0.00001)/sum(df_tmp[y]))
df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 0.00001)/sum(df_tmp['good']))
#计算woe
df_tmp['woe'] = np.log(df_tmp[y]/df_tmp['good'])
#计算iv
df_tmp['iv'] = (df_tmp[y]-df_tmp['good']) * df_tmp['woe']
return df_tmp['iv'].sum()
#计算psi
def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):
name1, name2 = lst
df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train']==name1])
sum_1 = df_in.shape[0]
df_in['count1'] = [1 for i in range(sum_1)]
df_in = df_in.groupby(name).sum()[['count1']]
df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train']==name2])
sum_2 = df_out.shape[0]
df_out['count2'] = [1 for i in range(sum_2)]
df_out = df_out.groupby(name).sum()[['count2']]
df_psi = pd.concat((df_in, df_out), axis=1)
#计算psi
df_psi['count1'] = df_psi['count1'].apply(lambda x: x/sum_1)
df_psi['count2'] = df_psi['count2'].apply(lambda x: x/sum_2)
#处理出现0的空箱
df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)
#
df_psi['psi_tmp'] = df_psi['count1']/df_psi['count2']
df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))
# print(df_psi)
df_psi['psi'] = (df_psi['count1'] - df_psi['count2'])*df_psi['psi_tmp']
#df_psi
return sum(df_psi['psi'])
if __name__ == '__main__':
s = '''
c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')
d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])
d2_2 = c.get_2_2_des()
d2_3 = c.get_2_3_corr()
d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])
d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')
#
df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']
df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']
#
d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')
d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')
d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')
d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')
'''
|
3,830 | 85e5bf57f7eba2cbee0fbb8a4d37b5180208f9b7 | # -*- coding: utf-8 -*-
from odoo import fields, models
class LunchWizard(models.TransientModel):
_name = "lunch.wizard"
_description = "LunchWizard"
lun_type = fields.Char(string="Set New Lunch Type")
lunch_id = fields.Many2one('lunch.lunch', string="Lunch Id")
def action_process_lunch(self):
self.lunch_id.lunch_type = self.lun_type
#self.write( { self.lunch_id.lunch_type : self.lun_type } ) |
3,831 | 53eb1dcd54ce43d9844c48eb1d79f122a87dca39 | from selenium.webdriver import Chrome
path=("/Users/karimovrustam/PycharmProjects/01.23.2020_SeleniumAutomation/drivers/chromedriver")
driver=Chrome(executable_path=path)
driver.maximize_window()
driver.get("http://www.toolsqa.com/iframe-practice-page/")
# driver.switch_to.frame("iframe2") # When working with few windows, you need switch to necessary
# # or
# # driver.switch_to.frame("IF2")
# # or
# # driver.switch_to.frame(driver.find_element_by_xpath("//iframe[@name='iframe2']"))
# driver.find_element_by_xpath("//a[contains(text(),'Read more')]").click()
driver.switch_to.default_content() # When you need stop working with one window, and come to whole page
driver.find_element_by_xpath("//span[text()='VIDEOS']").click()
# TODO: Could not reproduce looking for XPath through switching windows. Repeat it! |
3,832 | 99c12e925850fe7603831df5b159db30508f4515 | from coarsegrainparams import *
from inva_fcl_stab import *
from Eq import *
from Dynamics import *
from sympy import Matrix,sqrt
def construct_param_dict(params,K_RC,K_CP,m_P):
"""
Construct all the parameters from its relationships with body size and temperature, using the normalizing constants and scaling exponent w
"""
###scaling constants
w=params['w']
pd=params['pd'] # in 3D and 0.21 in 2D
pv=params['pv']
Er=params['Er'] ;Ek=params['Ek']
ER=params['ER'];EC=params['EC'];EP=params['EP'];
Eq1=params['Eq1'];Eq2=params['Eq2']
#capture success function
a = params['a']
b = params['b']
c = params['c']
formC = params['formC']
formPC = params['formPC']
formPR = params['formPR']
###variables
TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']
K_RP=K_RC*K_CP
fmC=params['fmC'];thermyR=params['thermyR']
thermyC=params['thermyC'];thermyP=params['thermyP']
fmPR=params['fmPR']
fmPC=params['fmPC']
m_C = K_CP*m_P;m_R = K_RP*m_P
###normalization constants and boltzmann constant
r0 = params['r0']
k0 = params['k0'] # will depend on the productivity of the habitat
a01 = a02 = params['a012'] # will depedend on the dimension of the habitat
a03 = params['a03']
d0= params['d0']
q10 = params['q10'];q20 = params['q20'];
v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k
hC0 = params['hC0'];hP0 = params['hP0']
#intrapopulation parameters
q1=set_q1(q10,m_C,w,Eq1,TR,k)
q2=set_q2(q20,m_P,w,Eq2,TC,k)
K=set_K(k0,m_R,w,Ek,TR,k)
r=set_r(r0,m_R,w,Er,TR,k)
#interpopulation parameters
a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)
a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)
a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)
t_hp = set_th(hP0,m_P,w,EP,k,TP)
t_hc = set_th(hC0,m_C,w,EC,k,TC)
param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}
return param_dict
def construct_equilibrium(params,par_dict,K_RC,K_CP,m_P):
"""
Construct all the functions related to the computation of equilibrium values in the model, in any subsytem
"""
#intrapopulation parameters
q1=par_dict['q1']
q2=par_dict['q2']
q1_0 = params['q10']
q20 = params['q20']
hC0 = params['hC0']
hP0 = params['hP0']
K=par_dict['K']
r=par_dict['r']
m_C = K_CP*m_P
#interpopulation parameters
a1=par_dict['a1']
a2=par_dict['a2']
a3=par_dict['a3']
t_hc = par_dict['t_hc']
t_hp = par_dict['t_hp']
e1=params['e1']
e2=params['e2']
e3=params['e3']
# Equilibrium values
##Sc2
###L-V
R_eq_s2 , C_eq_s2 = set_R_C_eq_sLV(r,K,q1,a1,e1)
###R-M
R_eq_s2RM, C_eq_s2RM = set_R_C_eq_sRM(r,K,q1,q1_0,a1,e1,hC0)
##Sc3
###L-V
R_eq_s3,P_eq_s3 = set_R_C_eq_sLV(r,K,q2,a2,e2)
###R-M
R_eq_s3RM , P_eq_s3RM = set_R_C_eq_sRM(r,K,q2,q20,a2,e2,hP0)
###full system ( need to correct this.. in case want to use it, focus at the moment in invasibility stuff)
R_eq = set_R_eq(K,q1,q2,r,a1,a2,a3,e1,e2,e3)
C_eq = set_C_eq(K,q1,q2,r,a1,a2,a3,e1,e2,e3)
P_eq = set_P_eq(K,q1,q2,r,a1,a2,a3,e1,e2,e3)
D = setD(K,a1,a2,a3,e1,e2,e3,r)
DBound= setDBound(K,a1,a2,a3,e1,e2,e3,m_C,r)
#Roots for Req
R1 = setRoot1(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)
Dis = setDis(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)
bR = setb_R(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)
denR = setden_R(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)
R2 = (bR + sqrt(Dis))/(2*denR)
R3 = (bR - sqrt(Dis))/(2*denR)
eq_dict={'R_eq_s2':R_eq_s2,'C_eq_s2':C_eq_s2,'R_eq_s3':R_eq_s3,'P_eq_s3':P_eq_s3,'R_eq':R_eq,'C_eq':C_eq,'P_eq':P_eq,
'R_eq_s2RM':R_eq_s2RM,'C_eq_s2RM':C_eq_s2RM,'R_eq_s3RM':R_eq_s3RM,'P_eq_s3RM':P_eq_s3RM,'R1':R1,'Discriminant':Dis,'R2':R2,'R3':R3,'bR':bR,'denR':denR,'D' : D,'DBound':DBound}
return eq_dict
def construct_inv_boundaries(params,par_dict,eq_dict,K_RC,K_CP,m_P):
"""
Construct in sympy format all the functions related to the invasibility conditions in each of the explored scenarios
"""
#intrapop params
q1=par_dict['q1']
q2=par_dict['q2']
K =par_dict['K']
m_C= K_CP*m_P
q10 = params['q10']
q20 = params['q20']
hC0 = params['hC0']
hP0 = params['hP0']
#interpop params
a1=par_dict['a1']
a2=par_dict['a2']
a3=par_dict['a3']
e1=params['e1']
e2=params['e2']
e3=params['e3']
t_hc = par_dict['t_hc']
t_hp = par_dict['t_hp']
#eq values
#L-V
R_eq_s2 = eq_dict['R_eq_s2']
C_eq_s2 = eq_dict['C_eq_s2']
P_eq_s3 = eq_dict['P_eq_s3']
R_eq_s3 = eq_dict['R_eq_s3']
#R-M
R_eq_s2RM = eq_dict['R_eq_s2RM']
C_eq_s2RM = eq_dict['C_eq_s2RM']
R_eq_s3RM = eq_dict['R_eq_s3RM']
P_eq_s3RM = eq_dict['P_eq_s3RM']
##Invasibility boundaries
#L-V
I_C_s2 = set_I_C_s2(e1,a1,K,q1)
I_P_s3 = set_I_P_s3(e2,a2,K,q2)
I_P_s4 = set_I_P_s4(e2,e3,a2,a3,q2,R_eq_s2,C_eq_s2)
I_C_s5 = set_I_C_s5(e1,a1,a3,R_eq_s3,P_eq_s3,q1)
#R-M
I_C_s2RM = set_I_C_s2RM(e1,a1,K,q1,hC0,q10)
I_P_s3RM = set_I_P_s3RM(e2,a2,K,q2,hP0,q20)
I_P_s4RM = set_I_P_s4RM(e2,e3,a2,a3,q2,R_eq_s2RM,C_eq_s2RM,hP0,q20)
I_C_s5RM = set_I_C_s5RM(e1,e2,a1,a3,m_C,R_eq_s3RM,P_eq_s3RM,q1,t_hc,q10,q20,hP0,hC0)
inv_dict= {'I_C_s2':I_C_s2,'I_P_s3':I_P_s3,'I_P_s4':I_P_s4,'I_C_s5':I_C_s5,
'I_C_s2RM':I_C_s2RM,'I_P_s3RM':I_P_s3RM,'I_P_s4RM':I_P_s4RM,'I_C_s5RM':I_C_s5RM}
return inv_dict
def Trophic_position(params,par_dict,eq_dict):
R_eq = eq_dict['R_eq']
a2 = par_dict['a2']
q2 = par_dict['q2']
e2 = params['e2']
#Trophic position in the coexistence domain
MTP_C= set_MTP_C(R_eq,a2,q2,e2)
return MTP_C
def Stability(params,par_dict,eq_dict,K_RC,K_CP,m_P):
#intrapop params
K=par_dict['K']
r=par_dict['r']
m_C = K_CP*m_P
#interpop params
a1=par_dict['a1']
a2=par_dict['a2']
a3=par_dict['a3']
e1=params['e1']
e2=params['e2']
e3=params['e3']
#equilibrium
R_eq= eq_dict['R_eq']
C_eq = eq_dict['C_eq']
P_eq = eq_dict['P_eq']
##Stability
D = set_D(K,a1,a2,a3,e1,e2,e3,r)
d1 = set_d1(r,R_eq,K)
d2 = set_d2(e1,e2,e3,a1,a2,a3,C_eq,R_eq,P_eq)
d3 = set_d3(D,a3,C_eq,R_eq,P_eq,K)
hd2 = set_hdet2(d1,d2,d3)
return hd2
def Jacobian(dR,dC,dP,R,C,P):
X = Matrix([dR,dC,dP])
Y = Matrix([R,C,P])
return X.jacobian(Y)
def Jacobian2(dX,dY,X,Y):
A = Matrix([dX,dY])
B = Matrix([X,Y])
return A.jacobian(B)
def setJacobianDict(DynamicsDict,R,C,P):
dRLV = DynamicsDict['dxLVa']
dCLV = DynamicsDict['dyLVa']
dPLV = DynamicsDict['dzLVa']
dRRM = DynamicsDict['dRRM']
dCRM = DynamicsDict['dCRM']
dPRM = DynamicsDict['dPRM']
dRLVP = DynamicsDict['dRLVP']
dRLVC = DynamicsDict['dRLVC']
dPLVP = DynamicsDict['dPLVP']
dCLVC = DynamicsDict['dCLVC']
JLV = Jacobian(dRLV,dCLV,dPLV,R,C,P)
JRM = Jacobian(dRRM,dCRM,dPRM,R,C,P)
JLVP = Jacobian2(dRLVP,dPLVP,R,P)
JLVC = Jacobian2(dRLVC,dCLVC,R,C)
return {'JLV':JLV,'JRM':JRM,'JLVP':JLVP,'JLVC':JLVC}
def ConstructDynamicalFunctions(params,par_dict,K_RC,K_CP,m_P,R,C,P):
#intrapopulation parameters
q1=par_dict['q1']
q2=par_dict['q2']
K=par_dict['K']
r=par_dict['r']
e1=params['e1']
e2=params['e2']
e3=params['e3']
m_C = K_CP*m_P
q20 = params['q20']
q10 = params['q10']
#interpopulation parameters
a1=par_dict['a1']
a2=par_dict['a2']
a3=par_dict['a3']
t_hp=par_dict['t_hp']
t_hc=par_dict['t_hc']
hC0=params['hC0']
hP0=params['hP0']
#Construct LV functions
dRLV=set_dRLV(R,C,P,r,K,a1,a2)
dPLV=set_dPLV(R,C,P,a2,a3,e2,e3,q2)
dCLV=set_dCLV(R,C,P,a1,a3,e1,q1)
dRLVP = set_dRLVPart(R,P,r,K,a2)
dPLVP = set_dPredLV(R,P,a2,e2,q2)
dRLVC = set_dRLVPart(R,C,r,K,a1)
dCLVC = set_dPredLV(R,C,a1,e1,q1)
dxLVa,dyLVa,dzLVa = set_LVAdim(R,C,P,r,K,a1,a2,a3,e1,e2,e3,q1,q2)
#Construct RM functions
dRRM = set_dRRM(R,C,P,r,K,a1,a2,a3,t_hp,t_hc,m_C,m_P)
dCRM = set_dCRM(R,C,P,a1,a2,a3,e1,t_hc,t_hp,q1,m_C,m_P)
dPRM = set_dPRM(R,C,P,a2,a3,e2,e3,t_hp,q2,m_P)
#RM eq expresions
CNum_eq_RM = setEqCNum_RM(q2,m_P,a2,R,e2,q20,hP0)
CDen_eq_RM = setEqCDen_RM(e3,q20,hP0)
PNum_eq_RM = setEqPNum_RM(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,R,C,P,m_P,m_C,q20,q10,hC0,hP0)
PDen_eq_RM = setEqPDen_RM(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,R,C,P,m_P,m_C,q20,q10,hC0,hP0)
C_eq_RM = CNum_eq_RM/CDen_eq_RM
P_eq_RM = PNum_eq_RM/PDen_eq_RM
#Isoclines
RIsoLVa,CIsoLVa,PIsoLVa = set_IsoclinesLVAdim(R,C,P,r,K,a1,a2,a3,e1,e2,e3,q1,q2)
DynamicsDict={'dRLV':dRLV,'dPLV':dPLV,'dCLV':dCLV,'dRRM':dRRM,'dPRM':dPRM,'dCRM':dCRM,'C_eq_RM':C_eq_RM,'P_eq_RM':P_eq_RM,'PNum_eq_RM':PNum_eq_RM,'CNum_eq_RM':CNum_eq_RM,'dRLVP':dRLVP,'dPLVP':dPLVP,'dRLVC':dRLVC,'dCLVC':dCLVC,'EigR':-r,'dxLVa':dxLVa,'dyLVa':dyLVa,'dzLVa':dzLVa,'RIsoLVa':RIsoLVa,'CIsoLVa':CIsoLVa,'PIsoLVa':PIsoLVa}
return DynamicsDict
|
3,833 | 91eb0ae8e59f24aeefdabd46546bc8fb7a0b6f6c | from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import metrics, ensemble, linear_model, svm
from numpy import log, ones, array, zeros, mean, std, repeat
import numpy as np
import scipy.sparse as sp
import re
import csv
from time import time
import functools
from nltk.util import skipgrams
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
DIR_PATH = ""
TRAIN_FILE = DIR_PATH + "train.csv"
TEST_SOL_FILE = DIR_PATH + "test_with_solutions.csv" # This is also used for training, together with TRAIN_FILE
BADWORDS_FILE = DIR_PATH + "bad_words.txt" # attached with submission
TEST_FILE = DIR_PATH + "test.csv" # set this to the new test file name
PREDICTION_FILE = DIR_PATH + "preds.csv" # predictions will be written here
def normalize(f , lammatize= False):
f = [x.lower() for x in f]
f = [x.replace("\\n"," ") for x in f]
f = [x.replace("\\t"," ") for x in f]
f = [x.replace("\\xa0"," ") for x in f]
f = [x.replace("\\xc2"," ") for x in f]
#f = [x.replace(","," ").replace("."," ").replace(" ", " ") for x in f]
#f = [re.subn(" ([a-z]) ","\\1", x)[0] for x in f]
#f = [x.replace(" "," ") for x in f]
f = [x.replace(" u "," you ") for x in f]
f = [x.replace(" em "," them ") for x in f]
f = [x.replace(" da "," the ") for x in f]
f = [x.replace(" yo "," you ") for x in f]
f = [x.replace(" ur "," you ") for x in f]
#f = [x.replace(" ur "," your ") for x in f]
#f = [x.replace(" ur "," you're ") for x in f]
f = [x.replace("won't", "will not") for x in f]
f = [x.replace("can't", "cannot") for x in f]
f = [x.replace("i'm", "i am") for x in f]
f = [x.replace(" im ", " i am ") for x in f]
f = [x.replace("ain't", "is not") for x in f]
f = [x.replace("'ll", " will") for x in f]
f = [x.replace("'t", " not") for x in f]
f = [x.replace("'ve", " have") for x in f]
f = [x.replace("'s", " is") for x in f]
f = [x.replace("'re", " are") for x in f]
f = [x.replace("'d", " would") for x in f]
#f = [x.replace("outta", "out of") for x in f]
bwMap = loadBW()
for key, value in bwMap.items():
kpad = " " + key + " "
vpad = " " + value + " "
f = [x.replace(kpad, vpad) for x in f]
# stemming
"""
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
#f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\S*>","", x)[0].strip() for x in f]
"""
tokenized_sents = [word_tokenize(i) for i in f]
if not lammatize:
stemmer = PorterStemmer()
for i in range (0, len(tokenized_sents)):
for j in range (0,len(tokenized_sents[i])):
tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])
else:
lammatizer = WordNetLemmatizer()
for i in range (0, len(tokenized_sents)):
for j in range (0,len(tokenized_sents[i])):
tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j])
for i in range (0, len(tokenized_sents)):
f[i] = " ".join(tokenized_sents[i])
return f
def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
if binary:
vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True)
elif stopwords:
vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True)
else:
vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type)
if verbose:
print ("extracting ngrams... where n is [%d,%d]" % (max_ngrams,min_ngrams))
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = no_of_features
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape )
return X_train, y, X_test
def skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
skipper = functools.partial(skipgrams, n=2, k=3)
vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper)
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = nm
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape)
return X_train, y, X_test
def specialCases(data, labels, ntrain, verbose = True):
g = [x.lower().replace("you are"," SSS ").replace("you're"," SSS ").replace(" ur ", " SSS ").split("SSS")[1:] for x in data]
f = []
for x in g:
fts = " "
x = normalize(x)
for y in x:
w = y.strip().replace("?",".").split(".")
fts = fts + " " + w[0]
f.append(fts)
X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose)
return X_trn, y_trn, X_tst
def loadBW():
f = open(BADWORDS_FILE, "r")
bwMap = dict()
for line in f:
sp = line.strip().lower().split(",")
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
return bwMap
def readCsv(fname, skipFirst=True, delimiter = ","):
reader = csv.reader(open(fname),delimiter=delimiter)
rows = []
count = 1
for row in reader:
if not skipFirst or count > 1:
rows.append(row)
count += 1
return rows
def write_submission(x,filename):
wtr = open(filename,"w")
for i in range(len(x)):
wtr.write(format(x[i],"0.10f"))
wtr.write("\n")
wtr.close()
def run(verbose = True):
t0 = time()
train_data = readCsv(TRAIN_FILE)
train2_data = readCsv(TEST_SOL_FILE)
train_data = train_data + train2_data
# print(train_data)
labels = array([int(x[0]) for x in train_data])
# print(labels)
train = [x[2] for x in train_data]
test_data = readCsv(TEST_FILE)
test_data = [x[2] for x in test_data]
data = train + test_data
n = len(data)
ntrain = len(train)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)
"""
X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose)
X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose)
X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose)
X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True)
X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True)
X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)
X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose)
X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8])
X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8])
if verbose:
print "######## Total time for feature extraction: %fs" % (time() - t0), X_tn.shape, X_tt.shape
predictions = runClassifiers(X_tn, labels, X_tt)
write_submission(predictions, PREDICTION_FILE)
print "Predictions written to:", PREDICTION_FILE
"""
run()
#some code for n grams (use tdifvectorizer)
|
3,834 | 7251d32918b16166e9b7c9613726e6dc51d6fea4 | from sqlalchemy import (Column, Integer, Float, String, ForeignKey)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from .meta import Base, BaseModel
class Stock(Base, BaseModel):
__tablename__ = 'stock'
name = Column(String(255), nullable=False)
starting_price = Column(Float, nullable=False)
current_price = Column(Float, nullable=False)
max_price = Column(Float, nullable=True)
min_price = Column(Float, nullable=True)
starting_stock = Column(Integer, nullable=True)
current_stock = Column(Integer, nullable=True)
stock_type_id = Column(UUID(as_uuid=True), ForeignKey('stock_type.id'))
stock_type = relationship('StockType', back_ref='stocks')
user_id = Column(UUID(as_uuid=True), ForeignKey('user.id'))
user = relationship('User')
def __json__(self, _):
return {
"id": self.id,
"name": self.name,
"starting_price": self.starting_price,
"current_price": self.current_price,
"max_price": self.max_price,
"min_price": self.min_price,
"starting_stock": self.starting_stock,
"current_stock": self.current_stock
}
class StockType(Base, BaseModel):
__tablename__ = 'stock_type'
name = Column(String(255), nullable=False)
stocks = relationship('Stock', back_ref='stock_type')
user_id = Column(UUID(as_uuid=True), ForeignKey('user.id'))
user = relationship('User')
def __json__(self, _):
return {
"id": self.id,
"name": self.name
}
|
3,835 | 429af603bf8f1c003799c3d94c0ce9a2c2f80dfc | class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
i = 0
for j in range(1, len(A), 2):
if A[j] % 2 == 1:
continue
else:
while i + 2 < len(A) and A[i] % 2 == 0:
i += 2
A[i], A[j] = A[j], A[i]
i += 2
return A
|
3,836 | b07073a7f65dbc10806b68729f21a8bc8773a1ab | #!/usr/bin/env python
from math import ceil, floor, sqrt
def palindromes(n: int) -> int:
"""yield successive palindromes starting at n"""
# 1 -> 2 -> 3 ... 9 -> 11 -> 22 -> 33 -> 44 .. 99 -> 101
# 101 -> 111 -> 121 -> 131 -> ... -> 191 -> 202 -> 212
# 989 -> 999 -> 1001 -> 1111 -> 1221
# 9889 -> 9999 -> 10001 -> 10101 -> 10201
prev = n
s = str(n)
even = len(s) % 2 == 0
s = s[:ceil(len(s) / 2)]
n = int(s)
while True:
if even:
pal = int(''.join([s, s[-1::-1]])) # join '12' with '21'
else:
pal = int(''.join([s, s[-2::-1]])) # join '12' with '1'
if prev <= pal:
yield pal
n += 1
if all(digit == '9' for digit in s):
even = not even
if even: n //= 10
s = str(n)
def isPrime(n: int) -> bool:
if n < 2:
return False
for i in range(2, floor(sqrt(n)) + 1):
if n % i == 0:
return False
return True
class Solution:
def primePalindrome(self, N: int) -> int:
"""return lowest prime palindrome >= N"""
for p in palindromes(N):
if isPrime(p):
return p
|
3,837 | 2539411c7b348662dbe9ebf87e26faacc20f4c5e | import numpy as np
import math
import os
if os.getcwd().rfind('share') > 0:
topsy = True
import matplotlib as mpl
mpl.use('Agg')
else:
topsy = False
from matplotlib import rc
import matplotlib.pyplot as plt
from matplotlib import rc
from matplotlib import cm
from scipy.optimize import curve_fit
import sys
import h5py
from glob import glob
pwd = os.getcwd()
k = int(pwd[pwd.rfind('pred')+4:])
number_of_lines = len(glob('group*[0-9]*'))
cm_subsection = np.linspace(0., 1., number_of_lines)
colors = [ cm.magma(x) for x in cm_subsection]
Z = [[0,0],[0,0]]
levels = range(5,500+5,5)
CS3 = plt.contourf(Z, levels, cmap='magma')
plt.clf()
area = []
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
j = 0
for group in sorted(glob('group*[0-9]*')):
files = glob(group + '/data*.h5')
print group
alive = []
time = []
plotPeriod = 0.1
for dFile in files:
value = dFile[dFile.rfind('-')+1:dFile.rfind('.')]
data = dict()
h5f = h5py.File(dFile,'r')
itime = np.copy(h5f['itime'])[0]
data['alive'] = np.copy(h5f['alive'])
data['t'] = np.copy(h5f['t'])
lastPlot = 0
for i in range(itime):
if data['t'][i] - lastPlot > plotPeriod:
time.append(data['t'][i])
alive.append(data['alive'][i].sum())
lastPlot = data['t'][i]
alive = np.array(alive).reshape(len(alive), 1)
time = np.array(time).reshape(len(time), 1)
data = np.append(time, alive, axis = 1)
data = data.tolist()
data2 = sorted(data, key=lambda x : x[0])
data2 = np.array(data2)
if np.shape(data2)[0] > 0:
y_av = movingaverage(data2[:,1], 75)
plt.plot(data2[:,0][100:-50], y_av[100:-50], label = group, color = colors[j])
trap = np.trapz(y_av[100:-50], x = data2[:,0][100:-50])
area += [[int(group[5:]), trap]]
j +=1
plt.colorbar(CS3)
plt.xlabel('Time', fontsize = 18)
plt.ylabel('$N(t)$', fontsize = 18)
plt.savefig('./groupPredation')
np.save('./area.npy', area)
area = np.array(area)
plt.plot(area[:,0], area[:,1], lw = 2)
y_av = movingaverage(area[:,1], 3)
plt.plot(area[:,0][5:-5], y_av[5:-5], lw = 2)
plt.xlabel('Group Size', fontsize = 18)
plt.ylabel('Area', fontsize = 18)
plt.savefig('./groupPredationArea.png')
|
3,838 | 63a2c8b0c2eba2d5f9f82352196ef2b67d4d63b5 | inp = int(input())
print(bytes(inp))
|
3,839 | 7bf81954bef81004b6c9838ed00c624d24fcf0c6 | # Generated by Django 2.0.3 on 2018-07-05 04:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('application_manager', '0015_auto_20180705_0415'),
]
operations = [
migrations.RemoveField(
model_name='application',
name='user',
),
]
|
3,840 | 4d707e23f66e8b6bea05a5901d3d8e459247c6c1 | import cv2
import sys
# Load the Haar cascades
face_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
_, image = capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
capture.release()
cv2.destroyAllWindows()
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) >= 1:
sys.stdout.write("1")
else:
sys.stdout.write("0")
|
3,841 | 791df87235f5da634fc62ebc3a3741cea6e2deca | def summation(numbers):
positive_numbers = []
normalized_numbers = []
numbers_list = numbers.split()
for idx, arg in enumerate(numbers_list):
int_arg = int(arg)
if int_arg < 0:
new_arg = abs(int_arg) * 2
else:
new_arg = int_arg
positive_numbers.append(new_arg)
max_of_positive_numbers = max(positive_numbers)
for idx, arg in enumerate(positive_numbers):
normalized_arg = arg / max_of_positive_numbers
normalized_numbers.append(normalized_arg)
print(sum(normalized_numbers))
|
3,842 | c179d27f1620414061d376d4f30d2ddd4fd2750e | import sys, serial, time, signal, threading
from MFRC522 import MFRC522
from event import Event
class Sensor(threading.Thread):
# main program for reading and processing tags
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
self.continue_reading = False
self.tag_reader = MFRC522()
self.signal = signal.signal(signal.SIGINT, self.end_read)
self.last_tag = ''
#EVENTS
self.FOUND_TAG = Event()
def end_read(self, signal,frame):
print "Ctrl+C captured, ending read."
self.stop()
def stop(self):
self.continue_reading = False
def run(self):
print "sensor running"
self.continue_reading = True
#if RFID is working - start monitoring it
while self.continue_reading:
(status,TagType) = self.tag_reader.MFRC522_Request(self.tag_reader.PICC_REQIDL)
if status == self.tag_reader.MI_OK:
print "Card detected"
(status,backData) = self.tag_reader.MFRC522_Anticoll()
if status == self.tag_reader.MI_OK:
rfid_tag = "".join(str(val) for val in backData)
print 'TAG : %s' % rfid_tag
self.last_tag = rfid_tag
self.FOUND_TAG(self)
time.sleep(.1)
print 'not reading sensor'
# def start(self):
# print "sensor running"
# self.continue_reading = True
# #if RFID is working - start monitoring it
# while self.continue_reading:
# (status,TagType) = self.tag_reader.MFRC522_Request(self.tag_reader.PICC_REQIDL)
# if status == self.tag_reader.MI_OK:
# print "Card detected"
# (status,backData) = self.tag_reader.MFRC522_Anticoll()
# if status == self.tag_reader.MI_OK:
# rfid_tag = "".join(str(val) for val in backData)
# print 'TAG : %s' % rfid_tag
# self.last_tag = rfid_tag
# self.FOUND_TAG(self)
# time.sleep(.1)
# print 'not reading sensor'
|
3,843 | bee6ba1db608c1d9c8114f89d4b3abab795a6b86 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
import os
db = SQLAlchemy()
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
def create_app(config_name):
app = Flask(__name__, static_folder=static_file_dir)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
db.init_app(app)
return app |
3,844 | 38abc4bc99f3b15b416c77481818464a6c7f11ef | import mysql.connector
from mysql.connector import errorcode
DB_NAME = 'PieDB'
TABLES = {}
# TABLES['pietweets'] = (
# "CREATE TABLE `pietweets` ("
# " `id` int NOT NULL AUTO_INCREMENT,"
# " `tweet_id` bigint NOT NULL,"
# " `username` varchar(32) NOT NULL,"
# " `geo_lat` float(53) NOT NULL,"
# " `geo_long` float(53) NOT NULL,"
# " `text` varchar(255) NOT NULL,"
# " `timestamp` datetime NOT NULL,"
# " PRIMARY KEY (`id`)"
# ") ENGINE=InnoDB")
TABLES['lemonpie'] = (
"CREATE TABLE `lemonpie` ("
" `id` int NOT NULL AUTO_INCREMENT,"
" `tweet_id` bigint NOT NULL,"
" `username` varchar(32) NOT NULL,"
" `geo_lat` float(53) NOT NULL,"
" `geo_long` float(53) NOT NULL,"
" `text` varchar(255) NOT NULL,"
" `timestamp` datetime NOT NULL,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
# DB credentials
config = {
'user': 'piemaster',
'password': 'piemaster123',
'host': 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com',
'database': 'PieDB',
'raise_on_warnings': True,
}
# establish connection with DB config credentials
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
def create_database(cursor):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
# try connecting to designated DB, if not exist - create this DB
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
# iterate through TABLES and create each table
for name, ddl in TABLES.iteritems():
try:
print("Creating table {}: ".format(name))
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("OK")
# closing db connection
cursor.close()
cnx.close()
|
3,845 | 9f3b7d6dbf57157b5ebd6ad72f46befc94798a5f | def count_words(word):
count = 0
count = len(word.split())
return count
if __name__ == '__main__':
print count_words("Boj is dope")
|
3,846 | d0d86d8b5b276218add6dd11a44d5c3951cc4e14 | from django.db.models import Q
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from carga_horaria.models import Profesor, AsignaturaBase, Asignatura, Asistente
from carga_horaria.formsAlexis import ProfesorForm, AsignaturaBaseForm, AsignaturaCreateForm, AsignaturaUpdateForm, AsistenteForm
from django.core.urlresolvers import reverse_lazy, reverse
from guardian.shortcuts import get_objects_for_user
from .models import Persona
from .models import Fundacion
from .models import Colegio
from .models import Periodo
from .models import Nivel
class LevelFilterMixin(object):
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
# FIXME: I will leave it like this for now,
# but it's still possible for somebody to poke object ids to see what shouldn't see
# fix this!!1
class SearchMixin(object):
def get_queryset(self):
qs = super(SearchMixin, self).get_queryset()
q = self.request.GET.get('q', None)
if q:
if qs.model == Profesor:
qs = qs.filter(Q(persona__nombre__unaccent__icontains=q) | Q(persona__rut__unaccent__icontains=q) | Q(asignacionextra__descripcion__unaccent__icontains=q) | Q(asignacionnoaula__descripcion__unaccent__icontains=q))
else:
qs = qs.filter(Q(persona__nombre__unaccent__icontains=q) | Q(persona__rut__unaccent__icontains=q) | Q(asignacionasistente__descripcion__unaccent__icontains=q) | Q(funcion__unaccent__icontains=q))
return qs
def get_for_user(request, qs, lookup, user):
periodo = request.session.get('periodo', 2020)
if not user.is_superuser:
colegios = [c.pk for c in get_objects_for_user(user, "carga_horaria.change_colegio")]
# new logic for colegio switcher
selected = request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(lookup): colegios,
"{}periode".format(lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
else:
colegios = [c.pk for c in Colegio.objects.all()]
# new logic for colegio switcher
selected = request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(lookup): colegios,
"{}periode".format(lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
class GetObjectsForUserMixin(object):
def get_queryset(self):
qs = super(GetObjectsForUserMixin, self).get_queryset()
periodo = self.request.session.get('periodo', 2020)
if not self.request.user.is_superuser:
colegios = [c.pk for c in get_objects_for_user(self.request.user, "carga_horaria.change_colegio")]
# new logic for colegio switcher
selected = self.request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(self.lookup): colegios,
"{}periode".format(self.lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
else:
colegios = [c.pk for c in Colegio.objects.all()]
# new logic for colegio switcher
selected = self.request.session.get('colegio__pk', None)
if selected:
colegios = [selected]
# end
kwargs = {"{}__in".format(self.lookup): colegios,
"{}periode".format(self.lookup[:-2]): periodo}
return qs.filter(**kwargs).distinct()
class ObjPermissionRequiredMixin(object):
def get_object(self, *args, **kwargs):
obj = super(ObjPermissionRequiredMixin, self).get_object(*args, **kwargs)
if self.request.user.has_perm(self.permission, obj):
return obj
else:
raise Http404
"""
Comienzo Crud Profesor
"""
class ProfesorListView(LoginRequiredMixin, SearchMixin, GetObjectsForUserMixin, ListView):
"""
Listado de profesores
"""
model = Profesor
lookup = 'colegio__pk'
template_name = 'carga_horaria/profesor/listado_profesor.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class ProfesorDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Profesor
"""
model = Profesor
template_name = 'carga_horaria/profesor/detalle_profesor.html'
class ProfesorCreateView(LoginRequiredMixin, CreateView):
model = Profesor
form_class = ProfesorForm
template_name = 'carga_horaria/profesor/nuevo_profesor.html'
success_url = reverse_lazy('carga-horaria:profesores')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(ProfesorCreateView, self).get_form_kwargs(*args, **kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user,
'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
profesor = form.save(commit=False)
profesor.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
profesor.save()
return redirect(reverse('carga-horaria:profesores'))
class ProfesorUpdateView(LoginRequiredMixin, UpdateView):
model = Profesor
form_class = ProfesorForm
template_name = 'carga_horaria/profesor/editar_profesor.html'
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(ProfesorUpdateView, self).get_form_kwargs(*args, **kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user,
'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
profesor = form.save(commit=False)
profesor.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
profesor.save()
return redirect(self.get_success_url())
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.pk,
}
)
class ProfesorDeleteView(LoginRequiredMixin, DeleteView):
model = Profesor
success_url = reverse_lazy('carga-horaria:profesores')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
# """
# Comienzo Crud Curso
# """
# class CursoListView(ListView):
# """
# Listado de cursos
# """
# model = Curso
# template_name = 'carga_horaria/curso/listado_curso.html'
# search_fields = ['periodo', 'letra']
# paginate_by = 6
# class CursoDetailView(DetailView):
# """
# Detalle de curso
# """
# model = Curso
# template_name = 'carga_horaria/curso/detalle_curso.html'
# class CursoCreateView(CreateView):
# model = Curso
# form_class = CursoForm
# template_name = 'carga_horaria/curso/nuevo_curso.html'
# success_url = reverse_lazy('carga-horaria:cursos')
# class CursoUpdateView(UpdateView):
# model = Curso
# form_class = CursoForm
# template_name = 'carga_horaria/curso/editar_curso.html'
# def get_success_url(self):
# return reverse(
# 'carga-horaria:curso',
# kwargs={
# 'pk': self.object.pk,
# }
# )
# class CursoDeleteView(DeleteView):
# model = Curso
# success_url = reverse_lazy('carga-horaria:cursos')
# def get(self, request, *args, **kwargs):
# return self.post(request, *args, **kwargs)
"""
Comienzo Crud Asistente
"""
class AsistenteListView(LoginRequiredMixin, SearchMixin, GetObjectsForUserMixin, ListView):
"""
Listado de asistentes
"""
model = Asistente
lookup = 'colegio__pk'
template_name = 'carga_horaria/asistente/listado_asistente.html'
search_fields = ['nombre', 'horas']
paginate_by = 6
class AsistenteDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Asistente
"""
model = Asistente
template_name = 'carga_horaria/asistente/detalle_asistente.html'
class AsistenteCreateView(LoginRequiredMixin, CreateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/nuevo_asistente.html'
success_url = reverse_lazy('carga-horaria:asistentes')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsistenteCreateView, self).get_form_kwargs(*args, **kwargs)
colegio_pk = self.request.session.get('colegio__pk', None)
if colegio_pk:
kwargs.update({'user': self.request.user,
'colegio': colegio_pk,
'fundacion': Colegio.objects.get(pk=self.request.session.get('colegio__pk', None)).fundacion.pk})
else:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(reverse('carga-horaria:asistentes'))
class AsistenteUpdateView(LoginRequiredMixin, UpdateView):
model = Asistente
form_class = AsistenteForm
template_name = 'carga_horaria/asistente/editar_asistente.html'
def get_success_url(self):
return reverse(
'carga-horaria:asistente',
kwargs={
'pk': self.object.pk,
}
)
def form_valid(self, form):
asistente = form.save(commit=False)
asistente.persona, _ = Persona.objects.update_or_create(rut=form.cleaned_data['rut'],
defaults={'nombre': form.cleaned_data['nombre'],
'direccion': form.cleaned_data['direccion'],
'comuna': form.cleaned_data['comuna'],
'nacionalidad': form.cleaned_data['nacionalidad'],
'telefono': form.cleaned_data['telefono'],
'email_personal': form.cleaned_data['email_personal'],
'email_institucional': form.cleaned_data['email_institucional'],
'estado_civil': form.cleaned_data['estado_civil'],
'discapacidad': form.cleaned_data['discapacidad'],
'recibe_pension': form.cleaned_data['recibe_pension'],
'adventista': form.cleaned_data['adventista'],
'fecha_nacimiento': form.cleaned_data['fecha_nacimiento']})
asistente.save()
return redirect(self.get_success_url())
class AsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = Asistente
success_url = reverse_lazy('carga-horaria:asistentes')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Comienzo Crud Asignatura Base
"""
class AsignaturaBaseListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de asignatura base
"""
model = AsignaturaBase
lookup = 'plan__colegio__pk'
template_name = 'carga_horaria/asignaturabase/listado_asignaturabase.html'
search_fields = ['nombre', 'plan']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class AsignaturaBaseDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura base
"""
model = AsignaturaBase
template_name = 'carga_horaria/asignaturabase/detalle_asignaturabase.html'
class AsignaturaBaseCreateView(LoginRequiredMixin, CreateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/nuevo_asignaturabase.html'
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(AsignaturaBaseCreateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
class AsignaturaBaseUpdateView(LoginRequiredMixin, UpdateView):
model = AsignaturaBase
form_class = AsignaturaBaseForm
template_name = 'carga_horaria/asignaturabase/editar_asignaturabase.html'
def get_success_url(self):
return reverse(
'carga-horaria:asignaturabase',
kwargs={
'pk': self.object.pk,
}
)
class AsignaturaBaseDeleteView(LoginRequiredMixin, DeleteView):
model = AsignaturaBase
success_url = reverse_lazy('carga-horaria:asignaturasbase')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Comienzo Crud Asignatura
"""
class AsignaturaListView(LoginRequiredMixin, ListView):
"""
Listado de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/listado_asignatura.html'
search_fields = ['base', 'periodo']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel][::-1]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(base__plan__nivel=nivel)
periodo = self.request.GET.get('periodo')
if periodo:
qs = qs.filter(periodo__pk=periodo)
return qs
class AsignaturaDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de asignatura
"""
model = Asignatura
template_name = 'carga_horaria/asignatura/detalle_asignatura.html'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['periodo'] = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
return ctx
class AsignaturaCreateView(LoginRequiredMixin, CreateView):
model = Asignatura
form_class = AsignaturaCreateForm
template_name = 'carga_horaria/asignatura/nuevo_asignatura.html'
def form_valid(self, form):
# dirty validation
periodo = Periodo.objects.get(pk=self.kwargs['pk'])
horas = form.cleaned_data['horas']
available = periodo.available
if horas > available:
form.add_error('horas', "Horas superan el tiempo disponible ({})".format(available))
return self.form_invalid(form)
else:
self.object = form.save()
self.object.periodos.add(periodo)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse(
'carga-horaria:periodo',
kwargs={
'pk': self.kwargs['pk'],
}
)
class AsignaturaUpdateView(LoginRequiredMixin, UpdateView):
model = Asignatura
form_class = AsignaturaUpdateForm
template_name = 'carga_horaria/asignatura/editar_asignatura.html'
def get_success_url(self):
return reverse('carga-horaria:periodo', kwargs={'pk': self.kwargs['periodo_pk']})
def form_valid(self, form):
# dirty validation
periodo = Periodo.objects.get(pk=self.kwargs['periodo_pk'])
horas = form.cleaned_data['horas']
old_horas = Asignatura.objects.get(pk=self.object.pk).horas
delta = horas - old_horas
available = periodo.available
if delta > available:
form.add_error('horas', "Horas superan el tiempo disponible ({})".format(available + old_horas))
return self.form_invalid(form)
elif self.object.base:
if periodo.colegio.jec:
horas_base = self.object.base.horas_jec
else:
horas_base = self.object.base.horas_nec
if horas < horas_base:
form.add_error('horas', "Horas deben ser como mínimo las del plan de estudios original ({})".format(horas_base))
return self.form_invalid(form)
return super().form_valid(form)
class AsignaturaDeleteView(LoginRequiredMixin, DeleteView):
model = Asignatura
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:periodo',
kwargs={
'pk': self.kwargs['periodo_pk'],
}
)
|
3,847 | ad1aa69f92f104ac8b82aca3c0a64ce3de48b36d | # Copyright (c) 2021 Koichi Sakata
from pylib_sakata import init as init
# uncomment the follows when the file is executed in a Python console.
# init.close_all()
# init.clear_all()
import os
import shutil
import numpy as np
from control import matlab
from pylib_sakata import ctrl
from pylib_sakata import plot
print('Start simulation!')
# Common parameters
figurefolderName = 'figure_2mass_pl'
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
Ts = 1/4000
dataNum = 10000
freqrange = [1, 1000]
freq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum, base=10)
s = ctrl.tf([1, 0], [1])
z = ctrl.tf([1, 0], [1], Ts)
print('Common parameters were set.')
# Plant model
M1 = 1.0
M2 = 1.0
M = M1 + M2
C = 10.0
K = 0.0
Creso = 10.0
Kreso = 50000.0
k1 = M2/(M1 * (M1 + M2))
k2 = -1.0/(M1 + M2)
omegaPreso = np.sqrt(Kreso * (M1 + M2)/(M1 * M2))
zetaPreso = 0.5 * Creso*np.sqrt((M1 + M2)/(Kreso * M1 * M2))
Pmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])
Pmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])
numDelay, denDelay = matlab.pade(Ts*4, n=4)
Ds = ctrl.tf(numDelay, denDelay)
Dz = z**-4
Pns1 = Pmechs1 * Ds
Pns2 = Pmechs2 * Ds
Pnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz
Pnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz
Pnz1_frd = ctrl.sys2frd(Pnz1, freq)
Pnz2_frd = ctrl.sys2frd(Pnz2, freq)
print('Plant model was set.')
# Design PID controller
freq1 = 10.0
zeta1 = 1.0
freq2 = 10.0
zeta2 = 1.0
Cz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)
Cz_frd = ctrl.sys2frd(Cz, freq)
print('PID controller was designed.')
# Design phase lead filter
zeta1 = 0.7
freq1 = 40
zeta2 = 0.7
freq2 = 60
PLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)
PLz1_frd = ctrl.sys2frd(PLz1, freq)
PLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)
PLz2_frd = ctrl.sys2frd(PLz2, freq)
print('Phase lead filters were desinged.')
print('Frequency respose alanysis is running...')
# Motor side
Gn1_frd = Pnz1_frd * Cz_frd
Sn1_frd = 1/(1 + Gn1_frd)
Tn1_frd = 1 - Sn1_frd
Gn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd
Sn1_pl_frd = 1/(1 + Gn1_pl_frd)
Tn1_pl_frd = 1 - Sn1_pl_frd
# Load side
Gn2_frd = Pnz2_frd * Cz_frd
Sn2_frd = 1/(1 + Gn2_frd)
Tn2_frd = 1 - Sn2_frd
Gn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd
Sn2_pl_frd = 1/(1 + Gn2_pl_frd)
Tn2_pl_frd = 1 - Sn2_pl_frd
print('Plotting figures...')
# Plant
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of plant')
plot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName+'/freq_P.png')
# PID controller
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange, title='Frequency response of PID controller')
plot.savefig(figurefolderName+'/freq_C.png')
# Phase lead filters
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of filters')
plot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange, [-10, 10], legend=['PL for motor side', 'PL for load side'])
plot.savefig(figurefolderName+'/freq_PL.png')
# Open loop function
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of open loop transfer function')
plot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName+'/freq_G.png')
# Sensitivity function
fig = plot.makefig()
ax_mag = fig.add_subplot(111)
ax_phase = None
plot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.savefig(figurefolderName+'/freq_S.png')
# Complementary sensitivity function
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of complementary sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.savefig(figurefolderName+'/freq_T.png')
# Nyquist
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5], yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist_.png')
print('Finished.')
|
3,848 | 599c5c02397f283eb00f7343e65c5cb977442e38 | from django import forms
from .models import Project
from user.models import User
from assets.models import Assets
class CreateProjectForm(forms.ModelForm):
project_name = forms.CharField(
label='项目名',
widget=forms.TextInput(
attrs={"class": "form-control"}
)
)
project_desc = forms.CharField(
label='项目说明',
required=False,
widget=forms.Textarea(
attrs={"class": "form-control", "cols": 40, "rows": 5}
)
)
auth_users = forms.ModelMultipleChoiceField(
label='授权用户',
required=False,
queryset=User.get_all(),
widget=forms.SelectMultiple(
attrs={"class": "form-control selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%", }
)
)
assets_set = forms.ModelMultipleChoiceField(
label="旗下资产",
required=False,
help_text="如果你从资产创建打开此页面,晴忽略该项内容",
queryset=Assets.get_all(),
widget=forms.SelectMultiple(
attrs={
"class": "selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%",
}
)
)
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
def clean_project_name(self):
pro_name = self.cleaned_data['project_name']
name = Project.get_by_name(pro_name)
if name:
raise forms.ValidationError("该项目已存在")
return pro_name
class UpdateProjectForm(forms.ModelForm):
project_name = forms.CharField(
label='项目名',
widget=forms.TextInput(
attrs={"class": "form-control"}
)
)
project_desc = forms.CharField(
label='项目说明',
required=False,
widget=forms.Textarea(
attrs={"class": "form-control", "cols": 40, "rows": 5}
)
)
auth_users = forms.ModelMultipleChoiceField(
label='授权用户',
required=False,
queryset=User.get_all(),
widget=forms.SelectMultiple(
attrs={"class": "form-control selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%", }
)
)
assets_set = forms.ModelMultipleChoiceField(
label="旗下资产",
required=False,
help_text="如果你从资产创建打开此页面,晴忽略该项内容",
queryset=Assets.get_all(),
widget=forms.SelectMultiple(
attrs={
"class": "selectpicker", "data-live-search": "true", "data-size": "5",
"data-width": "100%",
}
)
)
class Meta:
model = Project
fields = ['project_name', 'project_desc', 'auth_users', 'assets_set']
|
3,849 | dabc38db6a5c4d97e18be2edc9d4c6203e264741 | from django import forms
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import time
from page.models import Submit, Assignment
class UploadFileForm(forms.ModelForm):
class Meta:
model = Submit
fields = ['email', 'student_no', 'file']
@csrf_exempt
def upload(request):
# TODO: check file size and type
frm = UploadFileForm(request.POST, request.FILES)
if not frm.is_valid():
return JsonResponse({'error': frm.errors})
submit = frm.save(commit=False)
submit.assignment, _ = Assignment.objects.get_or_create(name='HW3')
submit.time = time.time()
submit.save()
res = JsonResponse({'success': True})
if 'application/json' not in request.META['HTTP_ACCEPT']:
# INTERNET EXPLORER!!
res['Content-Type'] = 'text/plain'
return res
|
3,850 | 2fd40f4d69223933d53d8ed2abd5f6d3ccd2f509 | from django.shortcuts import render
from django.views.generic.base import View
from .models import Article, Tag, Category
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
class ArticleView(View):
'''文章详情页'''
def get(self, request, article_id):
# 文章详情
article = Article.objects.get(id=int(article_id))
article.views += 1
article.save()
previous_article = Article.objects.filter(created_time__gt=article.created_time,
category=article.category.id).first()
next_article = Article.objects.filter(created_time__lt=article.created_time,
category=article.category.id).last()
# 取出文章对应标签所有标签
tags = article.tags.all()
relate_articles = Article.objects.all().order_by('?')[0:10]
# 旅游指南是多对多查询
guide_articles = Article.objects.prefetch_related('tags').order_by('?')[:5]
hot_articles = Article.objects.all().order_by('-views')[0:6]
return render(request, 'article.html', {
'article': article,
'previous_article': previous_article,
'next_article': next_article,
'relate_articles': relate_articles,
'guide_articles': guide_articles,
'hot_articles': hot_articles,
'tags': tags
})
class CategoryView(View):
'''文章分类页'''
def get(self, request, category_id):
category = Category.objects.get(id=int(category_id))
category_articles = category.article_set.all()
new_articles = category_articles.order_by('-modified_time')
category_hot_articles = category_articles.order_by('-views')[0:5]
category_guide_articles = category_articles.order_by('?')[0:6]
category_articles_nums = category_articles.count()
# 对新闻进行分页
# 尝试获取前台get请求传递过来的page参数
# 如果是不合法的配置参数默认返回第一页
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 这里指从category_articles中取10个出来,每页显示10个
p = Paginator(new_articles, 10, request=request)
category_all_articles = p.page(page)
return render(request, 'category.html', {
'category': category,
'category_all_articles': category_all_articles,
'category_hot_articles': category_hot_articles,
'category_guide_articles': category_guide_articles,
})
class A_listView(View):
'''文章列表'''
def get(self, request):
hot_articles = Article.objects.all().order_by('-views')[0:10]
guide_articles = Article.objects.order_by('-modified_time')[0:26]
return render(request, 'a_list.html', {
'hot_articles': hot_articles,
'guide_articles': guide_articles,
})
|
3,851 | 5da61b4cd8e4faf135b49396d3b346a219bf73f6 | import os
from src.model_manager import ModelManager
dir_path = os.path.dirname(os.path.realpath(__file__))
config_file = '{}/data/config/config_1.json'.format(dir_path)
model_dir = '{}/data/models'.format(dir_path)
def test_init():
mm = ModelManager(config_file, model_dir)
def test_predict():
pass
|
3,852 | 5a2106f5255493d2f6c8cb9e06a2666c8c55ed38 | """
Suffix Arrays - Optimized O(n log n) - prefix doubling
A suffix is a non-empty substring at the end of the string. A suffix array
contains all the sorted suffixes of a string
A suffix array provides a space efficient alternative to a suffix tree which
itself is a compressed version of a trie. Suffix array can do something a suffix
tree can, with some additional information such as Longest Common Prefix (LCP)
array.
A suffix array can be constructed from Suffix tree by doing a DFS traversal of
the suffix tree. In fact Suffix array and suffix tree both can be constructed
from each other in linear time.
Advantages of suffix arrays over suffix trees include improved space
requirements, simpler linear time construction algorithms (e.g., compared to
Ukkonen’s algorithm) and improved cache locality
source: https://www.geeksforgeeks.org/suffix-array-set-2-a-nlognlogn-algorithm/
# Algorithm
1. The first step is to generate all the suffix starting with the whole string
and then looping through and producing the 1 to end, 2 to end etc until the
end character.
2. We assign current and next rank to the first two characters of the suffixes.
A simple rank could be str[i]-'a'. If no characters are found, set it to -1
Index Suffix Rank Next Rank
0 banana 1 0
1 anana 0 13
2 nana 13 0
3 ana 0 13
4 na 13 0
5 a 0 -1
3. Sort the array using the current and next rank
Index Suffix Rank Next Rank
5 a 0 -1
1 anana 0 13
3 ana 0 13
0 banana 1 0
2 nana 13 0
4 na 13 0
4. So far we sorted all the suffixes through first two characters. Now we do the
next 4, 8 and so on until 2*len(n) times. We loop from 4 to 2N and
calculate the current and next rank the following way.
a. Current Rank - Assign 0 as the current rank for the first suffix. For
remaining suffixes, we take the rank pair from previous iteration i.e
(current rank, next rank) from the previous time and see if it's the
same as the rank pair of the previous suffix. If they are the same, set
current rank to same as previous suffix current rank, else increment by
1 and set it as current rank for the current suffix.
Index Suffix Rank
5 a 0 [Assign 0 to first]
1 anana 1 (0, 13) is different from previous
3 ana 1 (0, 13) is same as previous
0 banana 2 (1, 0) is different from previous
2 nana 3 (13, 0) is different from previous
4 na 3 (13, 0) is same as previous
b. Next Rank - suppose k is the loop and the initial value is 4, we take
the subarray from k/2 to end and see what current rank is assigned for
that suffix (i.e. suffix[k/2:].current_rank) and set that rank. If no
suffix is found or theres no characters for k/2 to end, set it to -1
Index Suffix Rank Next Rank
5 a 0 -1
1 anana 1 1
3 ana 1 0
0 banana 2 3
2 nana 3 3
4 na 3 -1
5. Now sort current and next rank
6. Proceed like this until k <= 2N
"""
def prefix_doubling_suffix_array(n):
n_len = len(n)
# base cases
if n_len == 0:
return []
if n_len == 1:
return [0]
# declare suffixes dictionary which will hold all the suffixes in the sorted
# order eventually and also a current and next rank attribute to help with
# sorting
# suffixes = {
# 0: {
# "suffix": "banana",
# "current_rank": None,
# "next_rank": None
# },
# 1: {..}
# }
suffixes = []
# generate all suffixes for n and set current rank for first character and
# next rank for second character
for i in range(n_len):
suffixes.append((i, {}))
suffixes[i][1]["suffix"] = n[i:]
suffixes[i][1]["current_rank"] = ord(suffixes[i][1]["suffix"][0])
if len(suffixes[i][1]["suffix"]) > 1:
suffixes[i][1]["next_rank"] = ord(suffixes[i][1]["suffix"][1])
else:
suffixes[i][1]["next_rank"] = -1
# sort the suffixes by the first two characters i.e. current and next rank.
# Leverage the sorted() with custom key to sort the tuples by current/next
# rank. Sorted returns a list of tuples.
suffixes = sorted(suffixes, key=lambda x: (x[1]["current_rank"], x[1]["next_rank"]))
# Now that first two characters are sorted, calculate current/next rank and
# sort first 4, 8.. etc characters until 2*n
k = 4
for k in range(4, 2 * n_len, 2 * k):
# store previous rank pair to use it to set the current rank
prev_rank_pair = str(suffixes[0][1]["current_rank"]) + str(suffixes[0][1]["next_rank"])
# set current rank of first suffix to 0
suffixes[0][1]["current_rank"] = 0
# To make the lookup easier for getting the current rank of a suffix
# to be able to set it as the next rank, maintain a hash table with
# suffix as the key and current rank as the value
curr_rank_ht = {}
curr_rank_ht[suffixes[0][1]["suffix"]] = 0
# Loop through suffix array and set the current rank based on current
# rank pair/previous rank pair comparison.
for i in range(1, len(suffixes)):
current_rank_pair = str(suffixes[i][1]["current_rank"]) + str(suffixes[i][1]["next_rank"])
# if current and previous are same rank pairs, set current rank of
# the current suffix to current rank of previous suffix
if current_rank_pair == prev_rank_pair:
suffixes[i][1]["current_rank"] = suffixes[i-1][1]["current_rank"]
# else add 1 to the current rank of previous suffix and set it as
# current rank of current suffix.
else:
suffixes[i][1]["current_rank"] = suffixes[i-1][1]["current_rank"] + 1
# set previous rank pair to the current rank pair for the next
# iteration check.
prev_rank_pair = current_rank_pair
curr_rank_ht[suffixes[i][1]["suffix"]] = suffixes[i][1]["current_rank"]
# Loop through suffix array and set the next rank based on the current
# rank of suffix[k/2:] and if no such suffix exists set it to -1
for i in range(len(suffixes)):
sub_suffix = suffixes[i][1]["suffix"][k//2:]
if sub_suffix in curr_rank_ht:
suffixes[i][1]["next_rank"] = curr_rank_ht[sub_suffix]
else:
suffixes[i][1]["next_rank"] = -1
# Now that we have set both current and next rank, sort the suffix array
# using those two values
suffixes = sorted(suffixes, key=lambda x: (x[1]["current_rank"], x[1]["next_rank"]))
suffix_array = []
for i in suffixes:
suffix_array.append(i[0])
# print(i[1]["suffix"])
return suffix_array
n = "banana"
suffix_array = prefix_doubling_suffix_array(n)
print(suffix_array) |
3,853 | 44e9fd355bfab3f007c5428e8a5f0930c4011646 | from flask import Flask, jsonify, abort, make_response
from matchtype import matchtyper
from db import db_handle
import sys
api = Flask(__name__)
@api.route('/get/<key_name>', methods=['GET'])
def get(key_name):
li = db_handle(key_name)
if li[1] is None:
abort(404)
else:
result = matchtyper(li)
return make_response(jsonify(result))
@api.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
api.debug = True
api.run(host='localhost', port=8080)
|
3,854 | f87d08f3bb6faa237cce8379de3aaaa3270a4a34 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_core.actions.action import Action
from rasa_core.events import SlotSet
from rasa_core.dispatcher import Button, Element, Dispatcher
import json
import pickle
class ActionWeather(Action):
def name(self):
return 'action_doctor'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('department')
#response = tracker.current_slot_values()
# response = '#' + json.dumps(aaa) + '#'
if loc == 'algology':
#response = "Prof. Dr. Öznur Öken"
buttons = [
Button(title="Prof. Dr. Öznur Öken", payload="/Dr1")
]
elif loc == 'brain and neurosurgery':
#response = "1- Doç. Dr. Gülşah Bademci\n2- Doç. Dr. Suat CANBAY"
buttons = [
Button(title="Doç. Dr. Gülşah Bademci", payload="/btn1"),
Button(title="Doç. Dr. Suat CANBAY", payload="/btn2")
]
elif loc == 'child hematology':
#response = "Prof. Dr. Hatice Emel Özyürek"
buttons = [
Button(title="Prof. Dr. Hatice Emel Özyürek", payload="/btn1")
]
elif loc == 'child nephrology':
#response = "Prof. Dr. Süleyman Kalman"
buttons = [
Button(title="Prof. Dr. Süleyman Kalman", payload="/btn1")
]
elif loc == 'child health and illness':
#response = "1- Prof. Dr. Musa Kazım Çağlar\n2- Prof. Dr. Süleyman Kalman\n3- Prof. Dr. Hatice Emel Özyürek\n4- Yar. Doç. Dr. Pakize Elif Alkış\n5- Uzm. Dr. Mustafa Yücel Kızıltan\n6- Uzm. Dr. Gökalp Başbozkurt\n7- Uzm. Dr. Hafsa Uçur\n8- Uzm. Dr. Hüsniye Altan\n 9- Uzm. Dr. Sarkhan Elbayıyev\n 10- Uzm. Dr. Shahın Guliyev"
buttons = [
Button(title="Prof. Dr. Musa Kazım Çağlar", payload="/btn1"),
Button(title="Prof. Dr. Süleyman Kalman", payload="/btn2"),
Button(title="Prof. Dr. Hatice Emel Özyürek", payload="/btn3"),
Button(title="Yar. Doç. Dr. Pakize Elif Alkışn", payload="/btn4"),
Button(title="Uzm. Dr. Mustafa Yücel Kızıltan", payload="/btn5"),
Button(title="Uzm. Dr. Gökalp Başbozkurt", payload="/btn6"),
Button(title="Uzm. Dr. Hafsa Uçur", payload="/btn7"),
Button(title="Uzm. Dr. Hüsniye Altan", payload="/btn8"),
Button(title="Uzm. Dr. Sarkhan Elbayıyev", payload="/btn9"),
Button(title="Uzm. Dr. Shahın Guliyev", payload="/btn10")
]
elif loc == 'dermatology':
#response = "1- Uzm. Dr. Aylin Gözübüyükoğulları\n2- Uzm. Dr. Yeşim Akpınar Kara"
buttons = [
Button(title="Uzm. Dr. Aylin Gözübüyükoğulları", payload="/Dr1"),
Button(title="Uzm. Dr. Yeşim Akpınar Kara", payload="/Dr2")
]
elif loc == 'diet policlinic':
#response = "1- Uzm. Dyt. Gaye Başkurt\n2- Dyt. Deniz Özdemir\n3- Dyt. Halime Besler"
buttons = [
Button(title="Uzm. Dyt. Gaye Başkurt", payload="/Dr1"),
Button(title="Dyt. Deniz Özdemir", payload="/Dr2"),
Button(title="Dyt. Halime Besler", payload="/Dr3")
]
elif loc == 'endocrinology':
#response = "Prof. Dr. Serdar Güler"
buttons = [
Button(title="Prof. Dr. Serdar Güler", payload="/Dr1")
]
elif loc == 'infectious diseases':
#response = "Uzm. Dr. Mine Işık Arıgün"
buttons = [
Button(title="Uzm. Dr. Mine Işık Arıgün", payload="/Dr1")
]
elif loc == 'physical therapy and rehabilitation':
#response = "1- Prof. Dr. Öznur Öken\n2- Uzm. Dr. Beril Özturan"
buttons = [
Button(title="Prof. Dr. Öznur Öken", payload="/Dr1"),
Button(title="Uzm. Dr. Beril Özturan", payload="/Dr2")
]
elif loc == 'gastroenterology':
#response = "1- Doç. Dr. Reskan Altun\n2- Doç. Dr. Yasemin Özderin Özin"
buttons = [
Button(title="Doç. Dr. Reskan Altun", payload="/Dr1"),
Button(title="Doç. Dr. Yasemin Özderin Özin", payload="/Dr2")
]
elif loc == 'general surgery':
#response = "1- Prof. Dr. Mehmet Mahir Özmen\n2- Yar. Doç. Dr. Cem Emir Güldoğan\n3- Yar. Doç. Dr. Emre Gündoğdu"
buttons = [
Button(title="Prof. Dr. Mehmet Mahir Özmen", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Cem Emir Güldoğan", payload="/Dr2"),
Button(title="Yar. Doç. Dr. Emre Gündoğdu", payload="/Dr3")
]
elif loc == 'chest diseases':
#response = "Prof. Dr. Uğur Gönüllü"
buttons = [
Button(title="Prof. Dr. Uğur Gönüllü", payload="/Dr1")
]
elif loc == 'eye diseases':
#response = "Op. Dr. Samim Özdeş"
buttons = [
Button(title="Op. Dr. Samim Özdeş", payload="/Dr1")
]
elif loc == 'hematology policlinic':
#response = "Prof. Dr. Oral Nevruz"
buttons = [
Button(title="Prof. Dr. Oral Nevruz", payload="/Dr1")
]
elif loc == 'internal diseases':
#response = "1- Doç. Dr. Beril Akman\n2- Uzm. Dr. Sercan Cansaran\n3- Uzm. Dr. Sevgi Karabuğa\n4- Yar. Doç. Dr. Gökhan Celbek"
buttons = [
Button(title="Doç. Dr. Beril Akman", payload="/Dr1"),
Button(title="Uzm. Dr. Sercan Cansaran", payload="/Dr2"),
Button(title="Uzm. Dr. Sevgi Karabuğa", payload="/Dr3"),
Button(title="Yar. Doç. Dr. Gökhan Celbek", payload="/Dr4")
]
elif loc == 'gynecology and Obstetrics':
#response = "1- Yar. Doç. Dr. Müberra Namlı Kalem\n2- Yar. Doç. Dr. Coşkun Şimşir\n3- Prof. Dr. Ali Ergün\n4- Doç. Dr. Korhan Kahraman\n5- Doç. Dr. Turgut Var\n6- Doç. Dr. Türkan Örnek Gülpınar\n7- Op. Dr. Aslı Yücetürk\n8- Op. Dr. Ebru Yüce\n9- Prof. Dr. Timur Gürgan"
buttons = [
Button(title="Yar. Doç. Dr. Müberra Namlı Kalem", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Coşkun Şimşir", payload="/Dr2"),
Button(title="Prof. Dr. Ali Ergün", payload="/Dr3"),
Button(title="Doç. Dr. Korhan Kahraman", payload="/Dr4"),
Button(title="Doç. Dr. Turgut Var", payload="/Dr5"),
Button(title="Doç. Dr. Türkan Örnek Gülpınar", payload="/Dr6"),
Button(title="Op. Dr. Aslı Yücetürk", payload="/Dr7"),
Button(title="Op. Dr. Ebru Yüce", payload="/Dr8"),
Button(title="Prof. Dr. Timur Gürgan", payload="/Dr9")
]
elif loc == 'cardiac surgery':
#response = "1- Prof. Dr. Erol Şener\n2- Yar. Doç. Dr. Emre Boysan\n2- Yar. Doç. Renda Cırcı"
buttons = [
Button(title="Prof. Dr. Erol Şener", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Emre Boysan", payload="/Dr2"),
Button(title="Yar. Doç. Renda Cırcı", payload="/Dr3")
]
elif loc == 'cardiology':
#response = "1- Prof. Dr. Erdoğan İlkay\n2- Doç. Dr. Alper Canbay\n3- Uzm. Dr. Çiğdem Koca Tarı\n4- Uzm. Dr. Erol Kalender"
buttons = [
Button(title="Prof. Dr. Erdoğan İlkay", payload="/Dr1"),
Button(title="Doç. Dr. Alper Canbay", payload="/Dr2"),
Button(title="Uzm. Dr. Çiğdem Koca Tarı", payload="/Dr3"),
Button(title="Uzm. Dr. Erol Kalender", payload="/Dr4")
]
elif loc == 'ENT diseases':
#response = "1- Prof. Dr. Ali Altuntaş\n2- Prof. Dr. Serdar Karahatay\n3- Yar. Doç Dr. Canset Aydın"
buttons = [
Button(title="Prof. Dr. Ali Altuntaş", payload="/Dr1"),
Button(title="Prof. Dr. Serdar Karahatay", payload="/Dr2"),
Button(title="Yar. Doç Dr. Canset Aydın", payload="/Dr3")
]
elif loc == 'nephrology':
#response = "Doç. Dr. Beril Akman"
buttons = [
Button(title="Doç. Dr. Beril Akman", payload="/Dr1")
]
elif loc == 'neurology':
#response = "1- Prof. Dr. Mehmet Zülküf Önal\n2- Yar. Doç. Dr. Akçay Övünç Ozon"
buttons = [
Button(title="Prof. Dr. Mehmet Zülküf Önal", payload="/Dr1"),
Button(title="Yar. Doç. Dr. Akçay Övünç Ozon", payload="/Dr2")
]
elif loc == 'orthopedics and traumatology':
#response = "1- Yar. Doç. Dr. Uğur Gönç\n2- Op. Dr. Mesut Atabek\n3- Prof. Dr. levent Çelebi"
buttons = [
Button(title="Yar. Doç. Dr. Uğur Gönç", payload="/Dr1"),
Button(title="Op. Dr. Mesut Atabek", payload="/Dr2"),
Button(title="Prof. Dr. levent Çelebi", payload="/Dr3")
]
elif loc == 'plastic surgery':
#response = "1- Op. Dr. Ergin Işık\n2- Op. Dr. Serdar Düzgün"
buttons = [
Button(title="Op. Dr. Ergin Işık", payload="/Dr1"),
Button(title="Op. Dr. Serdar Düzgün", payload="/Dr2")
]
elif loc == 'psychiatry':
#response = "Prof. Dr. Ali Bozkurt"
buttons = [
Button(title="Prof. Dr. Ali Bozkurt", payload="/Dr1")
]
elif loc == 'psychologist':
#response = "Psk. Ezgi Kılınç"
buttons = [
Button(title="Psk. Ezgi Kılınç", payload="/Dr1")
]
elif loc == 'rheumatology':
#response = "Doç. Dr. Orhan Küçükşahin"
buttons = [
Button(title="Doç. Dr. Orhan Küçükşahin", payload="/Dr1")
]
elif loc == 'medical oncology':
#response = ["Prof. Dr. Fikret Arpacı", "Doç. Dr. Gökhan Erdem"]
buttons = [
Button(title="Prof. Dr. Fikret Arpacı", payload="/Dr1"),
Button(title="Doç. Dr. Gökhan Erdem", payload="/Dr2")
]
elif loc == 'urology':
response = "Müsait doktor bulunmamaktadır..."
#response = "abc\n\nasd"
response=""
# buttons = [
# Button(title="Btn1", payload="/btn1"),
# Button(title="Btn2", payload="/btn2")
# ]
dispatcher.utter_button_message("my message", buttons)
return [SlotSet('doctor', response)]
|
3,855 | 309090167c2218c89494ce17f7a25bd89320a202 | from google.appengine.api import users
from google.appengine.ext import ndb
from datetime import datetime
from datetime import timedelta
import os
import logging
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserProfile(ndb.Model):
"""Models the profile (JSON) of an individual user."""
profile = ndb.TextProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_profile(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).get()
class UserProfileHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('templates/profile.html')
the_user = self.request.get('user')
logging.info("The user = " + the_user)
if the_user == "":
the_user = users.get_current_user().email()
owner = True
else:
owner = False
user_profile_data = UserProfile.get_by_id(the_user)
template_values = { 'owner': owner, 'user': the_user}
if user_profile_data:
template_values['profile_data'] = user_profile_data.profile
logging.info(user_profile_data)
self.response.out.write(template.render(template_values))
def post(self):
user = users.get_current_user()
profile_data = self.request.get('profile_data')
user_profile = UserProfile(id=user.email(), profile=profile_data)
user_profile.put()
self.redirect('/profile')
#self.response.out.write("Here is the JSON for your profile.")
#self.response.out.write(profile_data)
app = webapp2.WSGIApplication([
('/profile', UserProfileHandler),
], debug=True)
|
3,856 | 6162911befc8ad37591f7c19b14b349c655ccac0 | def generator(factor, modulus=-1, maxx=2147483647):
def next(prev):
nxt = (prev*factor) % maxx
if modulus > 0:
while nxt % modulus != 0:
nxt = (nxt * factor) % maxx
return nxt
return next
def main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):
genA = generator(a_fact, a_mod)
genB = generator(b_fact, b_mod)
match = 0
mask = (0xFF << 8) + 0xFF
for i in range(N):
a = genA(a)
b = genB(b)
match += [0, 1][(mask & a) == (mask & b)]
return match
if __name__ == '__main__':
#example
#print(main(65, 8921))
#print(main(65,8921,4,8,2000))
#print(main(65,8921,4,8,5000000))
#PART 1
#print(main(634,301))
#PART 2
print(main(634,301,4,8,5000000))
|
3,857 | fc9742ceb3c38a5f8c1ad1f030d76103ba0a7a81 | # Generated by Django 3.2.7 on 2021-09-23 07:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sms_consumer', '0006_auto_20210923_0733'),
]
operations = [
migrations.RemoveField(
model_name='smslogmodel',
name='hello',
),
]
|
3,858 | e6c7b15e5b42cfe6c5dec2eaf397b67afd716ebd | myfavoritenumber = 5
print(myfavoritenumber)
x=5
x=x+1
print(x)
x,y,z=1,2,3
print(x,y,z)
|
3,859 | 03e92eae4edb4bdbe9fa73e39e7d5f7669746fe5 | from integral_image import calc_integral_image
class Region:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def calc_feature(self, cumul_sum):
yy = self.y + self.height
xx = self.x + self.width
return cumul_sum[yy][xx] - cumul_sum[yy][x] - cumul_sum[y][xx] + cumul_sum[y][x]
|
3,860 | 921c45af3ba34a1b12657bf4189fc8dd66fa44a6 | import tensorflow as tf
import numpy as np
import tensorflow_datasets as tfds
print(tf.__version__)
imdb, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True)
train_data = imdb['train']
test_data = imdb['test']
# 25000 in each set
training_sentences = []
training_labels = []
testing_sentences = []
testing_labels = []
for s, l in train_data:
training_sentences.append(str(s.numpy()))
training_labels.append(l.numpy())
for s, l in test_data:
testing_sentences.append(str(s.numpy()))
testing_labels.append(l.numpy())
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
vocab_size = 10000
embedding_dim = 16
max_length = 120
trunc_type = 'post'
oov_tok = "<OOV>"
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(num_words = vocab_size, oov_token = oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(training_sentences)
padded = pad_sequences(sequences, maxlen = max_length, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
]
)
# with global average pooling
# model = tf.keras.Sequential([
# tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),
# tf.keras.layers.GlobalAveragePooling1D(),
# tf.keras.layers.Dense(6, activation='relu'),
# tf.keras.layers.Dense(1, activation='sigmoid')
# ]
# )
model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
num_epochs = 10
model.fit(padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final), verbose=2)
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # (vocab_size, embedding_dim) |
3,861 | 88071df9367804b1c6e2b1c80da178ab7658e7a4 | # Copyright (c) 2018, Raul Astudillo
import numpy as np
from copy import deepcopy
class BasicModel(object):
"""
Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.
"""
analytical_gradient_prediction = True
def __init__(self, output_dim=None, X=None, Y=None):
self.output_dim = output_dim
self.X = X
self.Y = Y
self.name = 'basic model'
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
|
3,862 | d0997f5001090dd8925640cd5b0f3eb2e6768113 | #!/usr/bin/env python
from pymongo import MongoClient
import serial
import sys, os, datetime
os.system('sudo stty -F /dev/ttyS0 1200 sane evenp parenb cs7 -crtscts')
SERIAL = '/dev/ttyS0'
try:
ser = serial.Serial(
port=SERIAL,
baudrate = 1200,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS,
timeout=1)
except:
print "Impossible d'ouvrir le port serie" + SERIAL
print sys.exc_info()
sys.exit(1)
# 2. Lecture d'une trame complete
compteur=0
data = {}
#'Periode':'HP','IndexHCreuses': "019728489",'IndexHPleines':'019728489','InstantI1':'027','InstantI2':'027','InstantI3':'027','IMaxi1':'027','IMaxi2':'027','IMaxi3':'028','PuissanceApp':'02695','PuissanceMax':'13160'}
ADCO ='ADCO'
while True :
trame=ser.readline().strip()
listeTrame = trame.split(' ')
if len(listeTrame)>1 :
key, value = listeTrame[0], listeTrame[1]
print key + ":" + value
if key == "ADCO" :
if 'ADCO' not in ADCO : break
ADCO = value
# la periode pour moi est 'HC' ou 'HP', seul les 2 1ers char sont utiles
elif key == "PTEC" : data['Periode'] = value[:2]
elif key == "HCHC" : data['IndexHCreuses'] = int(value)
elif key == "HCHP" : data['IndexHPleines'] = int(value)
elif key == "IINST1" : data['InstantI1'] = int(value)
elif key == "IINST2" : data['InstantI2'] = int(value)
elif key == "IINST3" : data['InstantI3'] = int(value)
elif key == "IMAX1" : data['IMaxi1'] = int(value)
elif key == "IMAX2" : data['IMaxi2'] = int(value)
elif key == "IMAX3" : data['IMaxi3'] = int(value)
elif key == "PAPP" : data['PuissanceApp'] = int(value)
elif key == "PMAX" : data['PuissanceMax'] = int(value)
dateDeMesure = datetime.datetime.utcnow()
data['dateMesure'] = dateDeMesure
clientMongo = MongoClient('mongodb://bber:cab32b79@nounours:27017/')
db = clientMongo.teleinfo
collec = db.conso
print (data)
un_id=collec.insert_one(data).inserted_id
print (un_id)
ser.close()
|
3,863 | f2abb7ea3426e37a10e139d83c33011542e0b3d1 | from .menu import menu
from .create_portfolio import create_portfolio
from .search import search
from .list_assets import list_assets
from .add_transaction import add_transaction
from .stats import stats
from .info import info |
3,864 | 0402096f215ae600318d17bc70e5e3067b0a176b | from django.core.paginator import Paginator, EmptyPage
from django.shortcuts import render
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from logging import getLogger
from django_redis import get_redis_connection
from decimal import Decimal
import json
from django import http
from django.utils import timezone
from django.db import transaction
from users.models import Address
from goods.models import SKU
from meiduo_mall.utils import constants
from meiduo_mall.utils.auth_backend import LoginRequiredJsonMixin
from .models import OrderInfo, OrderGoods
from meiduo_mall.utils.response_code import RETCODE, err_msg
logger = getLogger('django')
class GoodsCommentView(View):
"""订单商品评价信息"""
def get(self, request, sku_id):
# 获取被评价的订单商品信息
order_goods_list = OrderGoods.objects.filter(sku_id=sku_id, is_commented=True).order_by('-create_time')[:constants.COMMENTS_LIST_LIMIT]
# 序列化
comment_list = []
for order_goods in order_goods_list:
username = order_goods.order.user.username
comment_list.append({
'username': username[0] + '***' + username[-1] if order_goods.is_anonymous else username,
'comment': order_goods.comment,
'score': order_goods.score,
})
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'comment_list': comment_list})
class OrderCommentView(LoginRequiredMixin, View):
"""订单商品评价"""
def get(self, request):
"""展示商品评价页面"""
# 接收参数
order_id = request.GET.get('order_id')
# 校验参数
try:
OrderInfo.objects.get(order_id=order_id, user=request.user)
except OrderInfo.DoesNotExist:
return http.HttpResponseNotFound('订单不存在')
# 查询订单中未被评价的商品信息
try:
uncomment_goods = OrderGoods.objects.filter(order_id=order_id, is_commented=False)
except Exception as e:
logger.error(e)
return http.HttpResponseServerError('订单商品信息出错')
# 构造待评价商品数据
uncomment_goods_list = []
for goods in uncomment_goods:
uncomment_goods_list.append({
'order_id': goods.order.order_id,
'sku_id': goods.sku.id,
'name': goods.sku.name,
'price': str(goods.price),
'default_image_url': goods.sku.default_image.url,
'comment': goods.comment,
'score': goods.score,
'is_anonymous': str(goods.is_anonymous),
})
# 渲染模板
context = {
'uncomment_goods_list': uncomment_goods_list
}
return render(request, 'goods_judge.html', context)
def post(self, request):
"""评价订单商品"""
# 接收参数
json_dict = json.loads(request.body.decode())
order_id = json_dict.get('order_id')
sku_id = json_dict.get('sku_id')
score = json_dict.get('score')
comment = json_dict.get('comment')
is_anonymous = json_dict.get('is_anonymous')
# 校验参数
if not all([order_id, sku_id, score, comment]):
return http.HttpResponseForbidden('缺少必传参数')
try:
OrderInfo.objects.filter(order_id=order_id, user=request.user, status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])
except OrderInfo.DoesNotExist:
return http.HttpResponseForbidden('参数order_id错误')
try:
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('参数sku_id错误')
if is_anonymous:
if not isinstance(is_anonymous, bool):
return http.HttpResponseForbidden('参数is_anonymous错误')
# 以下操作数据库的操作,开启作为一次事务
with transaction.atomic():
# 在数据库操作前,创建保存点(数据库最初的状态)
save_id = transaction.savepoint()
try:
# 保存订单商品评价数据
OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id, is_commented=False).update(
comment=comment,
score=score,
is_anonymous=is_anonymous,
is_commented=True
)
# 累计评论数据
sku.comments += 1
sku.save()
sku.spu.comments += 1
sku.spu.save()
# 如果所有订单商品都已评价,则修改订单状态为已完成
if OrderGoods.objects.filter(order_id=order_id, is_commented=False).count() == 0:
OrderInfo.objects.filter(order_id=order_id).update(status=OrderInfo.ORDER_STATUS_ENUM['FINISHED'])
# 对于未知的数据库错误,暴力回滚
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.COMMITMENTERR, 'errmsg': err_msg[RETCODE.COMMITMENTERR]})
else:
# 提交事务
transaction.savepoint_commit(save_id)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK]})
class UserOrderInfoView(LoginRequiredMixin, View):
"""我的订单"""
def get(self, request, page_num):
"""提供我的订单页面"""
user = request.user
# 查询订单
orders = user.orderinfo_set.all().order_by("-create_time")
# 遍历所有订单
for order in orders:
# 绑定订单状态
order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status-1][1]
# 绑定支付方式
order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.pay_method-1][1]
order.sku_list = []
# 查询订单商品
order_goods = order.skus.all()
# 遍历订单商品
for order_good in order_goods:
sku = order_good.sku
sku.count = order_good.count
sku.amount = sku.price * sku.count
order.sku_list.append(sku)
# 分页
page_num = int(page_num)
try:
paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)
page_orders = paginator.page(page_num)
total_page = paginator.num_pages
except EmptyPage:
return http.HttpResponseNotFound('订单不存在')
context = {
"page_orders": page_orders,
'total_page': total_page,
'page_num': page_num,
}
return render(request, "user_center_order.html", context)
class OrderSuccessView(LoginRequiredMixin, View):
"""订单成功页面"""
def get(self, request):
"""提供订单成功页面"""
# 接受参数
order_id = request.GET.get('order_id')
payment_amount = request.GET.get('payment_amount')
pay_method = request.GET.get('pay_method')
# 构造上下文
context = {
'order_id': order_id,
'payment_amount': payment_amount,
'pay_method': pay_method
}
return render(request, 'order_success.html', context)
class OrderCommitView(LoginRequiredJsonMixin, View):
"""提交订单"""
def post(self, request):
"""保存订单基本信息和订单商品信息"""
# 接收参数
json_dict = json.loads(request.body.decode())
address_id = json_dict.get('address_id')
pay_method = json_dict.get('pay_method')
# 校验参数
if not all([address_id, pay_method]):
return http.HttpResponseForbidden('缺少必传参数')
# 判断address_id是否合法
try:
address = Address.objects.get(id=address_id)
except Exception as e:
logger.error(e)
return http.HttpResponseForbidden('参数address_id错误')
# 判断pay_method是否合法
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return http.HttpResponseForbidden('参数pay_method错误')
# 以下操作数据库的操作,开启作为一次事务
with transaction.atomic():
# 在数据库操作前,创建保存点(数据库最初的状态)
save_id = transaction.savepoint()
# 获取登录用户
user = request.user
# 获取订单编号:时间 + user_id == '2020123113041200000001'
order_id = timezone.localtime().strftime('%Y%m%d%H%M%S') + '{:0>9d}'.format(user.id)
try:
# 保存订单基本信息(一)
order = OrderInfo.objects.create(
order_id=order_id,
user=user,
address=address,
total_count=0, # 仅用来初始化,后面根据订单中的商品进行更新
total_amount=Decimal('0.00'), # 仅用来初始化,后面根据订单中的商品进行更新
freight=Decimal(constants.ORDERS_FREIGHT_COST),
pay_method=pay_method,
# 如果支付方式为支付宝,支付状态为未付款,如果支付方式是货到付款,支付状态为未发货
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
# 保存订单商品信息(多)
# 查询redis中购物车被勾选的商品
redis_conn = get_redis_connection('carts')
# 购物车中商品的数量
redis_cart = redis_conn.hgetall('carts_%s' % user.id)
# 被勾选的商品sku_id
redis_selected = redis_conn.smembers('selected_{}'.format(user.id))
# 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}
new_cart_dict = {}
for sku_id in redis_selected:
new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])
# 获取被勾选商品的sku_id
sku_ids = new_cart_dict.keys()
for sku_id in sku_ids:
# 每个商品都有多次下单的机会,直到库存不足
while True:
# 读取商品的sku信息
sku = SKU.objects.get(id=sku_id) # 查询商品和库存信息时,不能出现缓存,所有不用 filter(id__in=sku_ids)
# 获取当前被勾选商品的库存
sku_count = new_cart_dict[sku.id]
# 获取sku商品原始的库存stock和销量sales
origin_stock = sku.stock
origin_sales = sku.sales
# # 模型网络延迟
# import time
# time.sleep(5)
# 如果订单中的商品数量大于库存,响应库存不足
if sku_count > origin_stock:
# 库存不足,回滚
transaction.savepoint_rollback(save_id)
print(request.user, '库存不足')
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})
# 如果库存满足,SKU 减库存,加销量
new_stock = origin_stock - sku_count
new_sales = origin_sales + sku_count
result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)
# 如果在更新数据时,原始数据变化了,那么返回0,表示有资源抢夺
if result == 0:
# 由于其他用户提前对该商品完成下单,该商品此次下单失败,重新进行下单
continue
# SPU 加销量
sku.spu.sales += sku_count
sku.spu.save()
OrderGoods.objects.create(
order=order,
sku=sku,
count=sku_count,
price=sku.price,
)
# 累加订单中商品的总价和总数量
order.total_count += sku_count
order.total_amount += (sku_count * sku.price)
# 该件商品下单成功,退出循环
break
# 添加邮费和保存订单信息
order.total_amount += order.freight
order.save()
# 对于未知的数据库错误,暴力回滚
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR, 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})
else:
# 提交事务
transaction.savepoint_commit(save_id)
# 清除购物车中已结算的商品
pl = redis_conn.pipeline()
pl.hdel('carts_%s' % user.id, *redis_selected)
pl.srem('selected_%s' % user.id, *redis_selected)
try:
pl.execute()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR, 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})
else:
# 返回响应
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'order_id': order_id})
class OrderSettlementView(LoginRequiredMixin, View):
"""结算订单"""
def get(self, request):
"""查询并展示要结算的订单数据"""
# 获取登录用户
user = request.user
# 查询用户收货地址,没有被删除的收货地址
try:
addresses = Address.objects.filter(user=user, is_deleted=False)
except Exception as e:
logger.error(e)
# 如果没有查询出收货地址,可以去编辑收货地址
addresses = None
# 查询redis中购物车被勾选的商品
redis_conn = get_redis_connection('carts')
# 购物车中商品的数量
redis_cart = redis_conn.hgetall('carts_%s' % user.id)
# 被勾选的商品sku_id
redis_selected = redis_conn.smembers('selected_{}'.format(user.id))
# 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}
new_cart_dict = {}
for sku_id in redis_selected:
new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])
# 获取被勾选商品的sku_id
sku_ids = new_cart_dict.keys()
# 获取被勾选商品的sku信息
skus = SKU.objects.filter(id__in=sku_ids)
# 商品总数量与商品总金额
total_count = 0
total_amount = Decimal(0.00) # 或 Decimal('0.00')
for sku in skus:
# 遍历skus,给每个sku补充count(数量)和amount(小计)字段
sku.count = new_cart_dict[sku.id]
sku.amount = sku.price * sku.count # Decimal类型
# 累加商品数量和金额
total_count += sku.count
total_amount += sku.amount
# 构造上下文
context = {
'addresses': addresses,
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': constants.ORDERS_FREIGHT_COST, # 运费
'payment_amount': Decimal(constants.ORDERS_FREIGHT_COST) + total_amount,
}
return render(request, 'place_order.html', context)
|
3,865 | 62d0818395a6093ebf2c410aaadeb8a0250707ab | # This is a generated file, do not edit
from typing import List
import pydantic
from ..rmf_fleet_msgs.DockParameter import DockParameter
class Dock(pydantic.BaseModel):
fleet_name: str = "" # string
params: List[DockParameter] = [] # rmf_fleet_msgs/DockParameter
class Config:
orm_mode = True
# string fleet_name
# DockParameter[] params
|
3,866 | 47cee0c659976a2b74e2bb07f6c4d622ceab7362 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence
from flask import g
from sqlalchemy import or_, select
from sqlalchemy.orm import joinedload
from airflow.auth.managers.fab.models import Permission, Resource, Role, User
from airflow.auth.managers.fab.views.permissions import (
ActionModelView,
PermissionPairModelView,
ResourceModelView,
)
from airflow.auth.managers.fab.views.roles_list import CustomRoleModelView
from airflow.auth.managers.fab.views.user import (
CustomUserDBModelView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
)
from airflow.auth.managers.fab.views.user_edit import (
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomUserInfoEditView,
)
from airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.www.extensions.init_auth_manager import get_auth_manager
from airflow.www.fab_security.sqla.manager import SecurityManager
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
"Admin",
"Viewer",
"User",
"Op",
"Public",
}
if TYPE_CHECKING:
from sqlalchemy.orm import Session
SecurityManagerOverride: type = object
else:
# Fetch the security manager override from the auth manager
SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow."""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global resource for dag-level access
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS: list[dict[str, Any]] = [
{"role": "Public", "perms": []},
{"role": "Viewer", "perms": VIEWER_PERMISSIONS},
{
"role": "User",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
"role": "Op",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
"role": "Admin",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder) -> None:
super().__init__(
appbuilder=appbuilder,
actionmodelview=self.actionmodelview,
authdbview=self.authdbview,
authldapview=self.authldapview,
authoauthview=self.authoauthview,
authoidview=self.authoidview,
authremoteuserview=self.authremoteuserview,
permissionmodelview=self.permissionmodelview,
registeruser_view=self.registeruser_view,
registeruserdbview=self.registeruserdbview,
registeruseroauthview=self.registeruseroauthview,
registerusermodelview=self.registerusermodelview,
registeruseroidview=self.registeruseroidview,
resetmypasswordview=self.resetmypasswordview,
resetpasswordview=self.resetpasswordview,
rolemodelview=self.rolemodelview,
user_model=self.user_model,
userinfoeditview=self.userinfoeditview,
userdbmodelview=self.userdbmodelview,
userldapmodelview=self.userldapmodelview,
useroauthmodelview=self.useroauthmodelview,
useroidmodelview=self.useroidmodelview,
userremoteusermodelview=self.userremoteusermodelview,
userstatschartview=self.userstatschartview,
)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith("view"):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, "datamodel", None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id: str) -> str:
if "." in dag_id:
dm = self.appbuilder.get_session.execute(
select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)
).one()
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms) -> None:
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.bulk_sync_roles([{"role": role_name, "perms": perms}])
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
@provide_session
def get_accessible_dags(
self,
user_actions: Container[str] | None,
user,
session: Session = NEW_SESSION,
) -> Iterable[DagModel]:
warnings.warn(
"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))
def get_readable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(
self,
user,
user_actions: Container[str] | None = None,
session: Session = NEW_SESSION,
) -> set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (
permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)
):
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
user_query = session.scalar(
select(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(Permission.action), joinedload(Permission.resource))
)
.where(User.id == user.id)
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return {
dag.dag_id
for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))
}
def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != "~":
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
def can_edit_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)
def can_delete_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) -> str:
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) -> bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None) -> bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) -> bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) -> bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return (
self._has_role(["Admin", "Viewer", "Op", "User"], user)
or self.can_read_all_dags(user)
or self.can_edit_all_dags(user)
)
def can_edit_all_dags(self, user=None) -> bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) -> bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
def clean_perms(self) -> None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug("Cleaning faulty perms")
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) -> None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(
select(self.permission_model).filter_by(action=action, resource=resource).limit(1)
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) -> None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) -> set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name)
.join(self.permission_model.action)
.join(self.permission_model.resource)
)
)
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
"""
Get permissions except those that are for specific DAGs.
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name, self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.where(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
)
}
def _get_all_roles_with_permissions(self) -> dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {
r.name: r
for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.permissions))
).unique()
}
def create_dag_specific_permissions(self) -> None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) -> None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(
select(Resource).where(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role("Admin")
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) -> None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(
self,
dag_id: str,
access_control: dict[str, Collection[str]] | None = None,
) -> None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'", dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset.",
dag_resource_name,
)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) -> Permission | None:
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named "
f"'{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes "
f"the following invalid permissions: {invalid_action_names}; "
f"The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) -> None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self,
perms: Sequence[tuple[str, str]] | None = None,
dag_id: str | None = None,
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: Session | None = None) -> None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: Session | None = None):
self.appbuilder = FakeAppBuilder(session)
|
3,867 | 3b4799f43ec497978bea3ac7ecf8c6aaeb2180b4 | # coding: utf8
from __future__ import absolute_import
import numpy as np
def arr2str(arr, sep=", ", fmt="{}"):
"""
Make a string from a list seperated by ``sep`` and each item formatted
with ``fmt``.
"""
return sep.join([fmt.format(v) for v in arr])
def indent_wrap(s, indent=0, wrap=80):
"""
Wraps and indents a string ``s``.
Parameters
----------
s : str
The string to wrap.
indent : int
How far to indent each new line.
wrape : int
Number of character after which to wrap the string.
Returns
-------
s : str
Indented and wrapped string, each line has length ``wrap``, except the
last one, which may have less than ``wrap`` characters.
Example
-------
>>> s = 2 * "abcdefghijklmnopqrstuvwxyz"
>>> indent_wrap(s, indent=0, wrap=26)
'abcdefghijklmnopqrstuvwxyz\nabcdefghijklmnopqrstuvwxyz'
>>> indent_wrap(s, indent=2, wrap=26)
' abcdefghijklmnopqrstuvwx\n yzabcdefghijklmnopqrstuv\n wxyz'
"""
split = wrap - indent
chunks = [indent * " " + s[i:i + split] for i in range(0, len(s), split)]
return "\n".join(chunks)
def serialize_ndarrays(d):
"""
Recursively traverse through iterable object ``d`` and convert all occuring
ndarrays to lists to make it JSON serializable.
Note: Works for 1D dicts with ndarrays at first level. Certainly not tested
and meant to work for all use cases.
Made with code from: http://code.activestate.com/recipes/577504/
Parameters
----------
d : iterable
Can be dict, list, set, tuple or frozenset.
Returns
-------
d : iterable
Same as input, but all ndarrays replaced by lists.
"""
def dict_handler(d):
return d.items()
handlers = {list: enumerate, tuple: enumerate,
set: enumerate, frozenset: enumerate,
dict: dict_handler}
def serialize(o):
for typ, handler in handlers.items():
if isinstance(o, typ):
for key, val in handler(o):
if isinstance(val, np.ndarray):
o[key] = val.tolist()
else:
o[key] = serialize_ndarrays(o[key])
return o
return serialize(d)
def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):
"""
Populate dictionary with data from a given dict ``d``, and check if ``d``
has required and optional keys. Set optionals with default if not present.
If input ``d`` is None and ``required_keys`` is empty, just return
``opt_keys``.
Parameters
----------
d : dict or None
Input dictionary containing the data to be checked. If is ``None``, then
a copy of ``opt_keys`` is returned. If ``opt_keys`` is ``None``, a
``TypeError`` is raised. If ``d``is ``None`` and ``required_keys`` is
not, then a ``ValueError`` israised.
required_keys : list or None, optional
Keys that must be present and set in ``d``. (default: None)
opt_keys : dict or None, optional
Keys that are optional. ``opt_keys`` provides optional keys and default
values ``d`` is filled with if not present in ``d``. (default: None)
noleft : bool, optional
If True, raises a ``KeyError``, when ``d`` contains etxra keys, other
than those given in ``required_keys`` and ``opt_keys``. (default: True)
Returns
-------
out : dict
Contains all required and optional keys, using default values, where
optional keys were missing. If ``d`` was None, a copy of ``opt_keys`` is
returned, if ``opt_keys`` was not ``None``.
"""
if required_keys is None:
required_keys = []
if opt_keys is None:
opt_keys = {}
if d is None:
if not required_keys:
if opt_keys is None:
raise TypeError("`d` and òpt_keys` are both None.")
return opt_keys.copy()
else:
raise ValueError("`d` is None, but `required_keys` is not empty.")
d = d.copy()
out = {}
# Set required keys
for key in required_keys:
if key in d:
out[key] = d.pop(key)
else:
raise KeyError("Dict is missing required key '{}'.".format(key))
# Set optional values, if key not given
for key, val in opt_keys.items():
out[key] = d.pop(key, val)
# Complain when extra keys are left and noleft is True
if d and noleft:
raise KeyError("Leftover keys ['{}'].".format(
"', '".join(list(d.keys()))))
return out
|
3,868 | 4c3a27bf1f7e617f4b85dc2b59efa184751b69ac | import os
from redis import Redis
try:
if os.environ.get('DEBUG'):
import settings_local as settings
else:
import settings_prod as settings
except ImportError:
import settings
redis_env = os.environ.get('REDISTOGO_URL')
if redis_env:
redis = Redis.from_url(redis_env)
elif getattr(settings, 'REDIS_URL', None):
redis = Redis.from_url(settings.REDIS_URL)
else:
redis = Redis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB,
password=settings.REDIS_PASS)
|
3,869 | 0588aad1536a81d047a2a2b91f83fdde4d1be974 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
path('about/', views.about, name='about'),
path('contact/', views.contact, name= 'contact'),
path('category/', views.category, name='category'),
path('product/<str:id>/<slug:slug>',views.product_list, name='product_list'),
path('product-detail/<str:id>/<slug:slug>', views.prod_detail, name= 'prod_detail'),
] |
3,870 | bd2a5c2dd3eef5979c87a488fb584dce740ccb05 | import io
import os
import sys
import whwn
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
with open('README.md') as readme:
long_description = readme.read()
with open('requirements.txt') as reqs:
install_requires = [
line for line in reqs.read().split('\n') if (line and not
line.startswith('--'))
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='We Have We Need',
version=whwn.__version__,
url='http://github.com/wehaveweneed/wehaveweneed',
tests_require=['pytest'],
cmdclass={'test': PyTest},
description='Inventory Management System',
long_description=long_description,
install_requires=install_requires,
packages=['whwn'],
include_package_data=True,
test_suite='whwn.test.test_whwn',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
],
extras_require={
'testing': ['pytest'],
}
)
|
3,871 | e5e460eb704e2ab5f747d1beee05e012ea95fbd2 | class UnknownResponseFormat(Exception):
pass
|
3,872 | 283b93437072f0fd75d75dab733ecab05dc9e1f3 | #!/usr/bin/env python3
import logging
import datetime
import os
import time
import json
import prod
import secret
from logging.handlers import RotatingFileHandler
import requests
import sns
from kafka import KafkaProducer
logger = logging.getLogger()
logger.setLevel('INFO')
log_path = os.path.basename(__file__).split('.')[0] + '.log'
handler = RotatingFileHandler(
log_path, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
class Producer():
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(
bootstrap_servers=kafka_hosts,
acks=1,
compression_type='snappy',
retries=5,
linger_ms=200,
batch_size=1000,
request_timeout_ms=100000,
sasl_plain_username=kafka_uname,
sasl_plain_password=kafka_pwd,
security_protocol="SASL_SSL",
sasl_mechanism="PLAIN",
# sasl_mechanism="SCRAM-SHA-512",
ssl_cafile=ssl_truststore_file,
api_version=(0, 10, 1)
)
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret(
'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets["kafka_hosts"]
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - \
datetime.timedelta(minutes=minutes_before)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
qs = {"created_after": twenty_minutes_ago, "created_before": fifteen_minutes_ago, "expand_events": "false"}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns("proofpoint_trap")
logger.error(f"Error for TRAP API call: {str(e)}")
if __name__ == "__main__":
minutes_before = 0 * 60
minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')
if os.path.exists(minutes_before_file):
with open(minutes_before_file, 'r') as minutes_file:
line = minutes_file.readline()
line = line.strip()
minutes_before = int(line)
while True:
"""
Query TRAP API (JSON format) starting from minutes_before
send logs to kafka
reduce minutes_before in next iteration and repeat
when iteration reaches now -20 minutes
run the job once every 5 minutes
"""
logger.info(f'minutes before: {minutes_before}')
if minutes_before <= 0:
logger.info('waiting for 5 minutes')
time.sleep(300)
logger.info('TRAP query started')
logs = pull_pp_trap_logs(minutes_before)
logger.info('TRAP query finished')
minutes_before = minutes_before - 5
if logs:
logger.info('TRAP_produce started')
run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')
logger.info('TRAP_produce finished')
else:
logger.info("No logs for TRAP call.")
with open(minutes_before_file, 'w') as minutes_file:
minutes_before = 0 if minutes_before < 0 else minutes_before
minutes_file.write(str(minutes_before)) |
3,873 | d90aeaaa682b371afb4771ecfbf1077fc12520b4 | from django.contrib import admin
# Register your models here.
from django.contrib import admin
from practice_app.models import Person
class PersonAdmin(admin.ModelAdmin):
pass
admin.site.register(Person) |
3,874 | 2ae953d1d53c47da10ea4c8aace186eba0708ad0 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import pylab as pb
from .. import kern
from ..core import model
from ..util.linalg import pdinv,mdot
from ..util.plot import gpplot,x_frame1D,x_frame2D, Tango
from ..likelihoods import EP
class GP(model):
"""
Gaussian Process model for regression and EP
:param X: input observations
:param kernel: a GPy kernel, defaults to rbf+white
:parm likelihood: a GPy likelihood
:param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_X: False|True
:param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_Y: False|True
:param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing)
:rtype: model object
:param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1
:param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.]
:type powerep: list
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None):
# parse arguments
self.Xslices = Xslices
self.X = X
assert len(self.X.shape)==2
self.N, self.Q = self.X.shape
assert isinstance(kernel, kern.kern)
self.kern = kernel
#here's some simple normalization for the inputs
if normalize_X:
self._Xmean = X.mean(0)[None,:]
self._Xstd = X.std(0)[None,:]
self.X = (X.copy() - self._Xmean) / self._Xstd
if hasattr(self,'Z'):
self.Z = (self.Z - self._Xmean) / self._Xstd
else:
self._Xmean = np.zeros((1,self.X.shape[1]))
self._Xstd = np.ones((1,self.X.shape[1]))
self.likelihood = likelihood
#assert self.X.shape[0] == self.likelihood.Y.shape[0]
#self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self)
def dL_dZ(self):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
return np.zeros_like(self.Z)
def _set_params(self,p):
self.kern._set_params_transformed(p[:self.kern.Nparam])
#self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
#the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki,self.likelihood.Y)
self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
self.dL_dK = 0.5*(tmp - self.D*self.Ki)
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
Approximates a non-gaussian likelihood using Expectation Propagation
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
this function does nothing
"""
self.likelihood.fit_full(self.kern.K(self.X))
self._set_params(self._get_params()) # update the GP
def _model_fit_term(self):
"""
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))
else:
return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))
def log_likelihood(self):
"""
The log marginal likelihood of the GP.
For an EP model, can be written as the log likelihood of a regression
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z
def _log_likelihood_gradients(self):
"""
The gradient of all parameters.
For the kernel parameters, use the chain rule via dL_dK
For the likelihood parameters, pass in alpha = K^-1 y
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self,_Xnew,slices=None, full_cov=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
"""
Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)
mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)
KiKx = np.dot(self.Ki,Kx)
if full_cov:
Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)
var = Kxx - np.dot(KiKx.T,Kx)
else:
Kxx = self.kern.Kdiag(_Xnew, slices=slices)
var = Kxx - np.sum(np.multiply(KiKx,Kx),0)
var = var[:,None]
return mu, var
def predict(self,Xnew, slices=None, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
Arguments
---------
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.Q
:param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)
:type slices: (None, list of slice objects, list of ints)
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
:type full_cov: bool
:rtype: posterior mean, a Numpy array, Nnew x self.D
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D
.. Note:: "slices" specifies how the the points X_new co-vary wich the training points.
- If None, the new points covary throigh every kernel part (default)
- If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part
- If a list of booleans, specifying which kernel parts are active
If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
#normalize X values
Xnew = (Xnew.copy() - self._Xmean) / self._Xstd
mu, var = self._raw_predict(Xnew, slices, full_cov)
#now push through likelihood TODO
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
return mean, var, _025pm, _975pm
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:param which_functions: which of the kernel functions to plot (additively)
:type which_functions: list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, we've no implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions using which_data and which_functions
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
"""
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits)
if samples == 0:
m,v = self._raw_predict(Xnew, slices=which_functions)
gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v))
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
else:
m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True)
Ysim = np.random.multivariate_normal(m.flatten(),v,samples)
gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None])
for i in range(samples):
pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25)
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
pb.xlim(xmin,xmax)
ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None])))
ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin)
pb.ylim(ymin,ymax)
if hasattr(self,'Z'):
pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12)
elif self.X.shape[1] == 2:
resolution = resolution or 50
Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution)
m,v = self._raw_predict(Xnew, slices=which_functions)
m = m.reshape(resolution,resolution).T
pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet)
pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max())
pb.xlim(xmin[0],xmax[0])
pb.ylim(xmin[1],xmax[1])
else:
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20):
"""
TODO: Docstrings!
:param levels: for 2D plotting, the number of contour levels to use
"""
# TODO include samples
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now
Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits)
m, var, lower, upper = self.predict(Xnew, slices=which_functions)
gpplot(Xnew,m, lower, upper)
pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5)
ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper))
ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin)
pb.xlim(xmin,xmax)
pb.ylim(ymin,ymax)
if hasattr(self,'Z'):
Zu = self.Z*self._Xstd + self._Xmean
pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12)
if self.has_uncertain_inputs:
pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten()))
elif self.X.shape[1]==2: #FIXME
resolution = resolution or 50
Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution)
x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution)
m, var, lower, upper = self.predict(Xnew, slices=which_functions)
m = m.reshape(resolution,resolution).T
pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet)
Yf = self.likelihood.Y.flatten()
pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.)
pb.xlim(xmin[0],xmax[0])
pb.ylim(xmin[1],xmax[1])
if hasattr(self,'Z'):
pb.plot(self.Z[:,0],self.Z[:,1],'wo')
else:
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
|
3,875 | 55a392d63838cbef027f9cf525999c41416e3575 | import torch
from torch import nn
from torch.nn import functional as F
from models.blocks import UnetConv3, MultiAttentionBlock, UnetGridGatingSignal3, UnetUp3_CT, UnetDsv3
class AttentionGatedUnet3D(nn.Module):
"""
Attention Gated Unet for 3D semantic segmentation.
Args:
config: Must contain following attributes:
num_classes (int): Number of output classes in the mask;
in_channels (int): Number of channels in the input image;
feature_scale (int, optional): factor by which to scale down the number of filters / channels in each block;
is_deconv (bool, optional): whether to use DeConvolutions;
is_batchnorm (bool, optional): whether to use Batch Normalization;
Attributes:
num_classes (int): Number of classes in the output mask
in_channels (int): Number of channels in the input image
is_batchnorm (bool)
is_deconv (bool)
feature_scale (int)
"""
def __init__(self, config):
super(AttentionGatedUnet3D, self).__init__()
assert hasattr(config, "num_classes")
assert hasattr(config, "in_channels")
if not hasattr(config, "feature_scale"):
print("feature_scale not specified in config, setting to default 4")
config.feature_scale = 4
if not hasattr(config, "is_deconv"):
print("is_deconv not specified in config, setting to default True")
config.is_deconv = True
if not hasattr(config, "is_batchnorm"):
print("is_batchnorm not specified in config, setting to default True")
config.is_batchnorm = True
self.num_classes = config.num_classes
self.in_channels = config.in_channels
self.is_deconv = config.is_deconv
self.is_batchnorm = config.is_batchnorm
self.feature_scale = config.feature_scale
nonlocal_mode = 'concatenation'
attention_dsample = (2, 2, 2)
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm)
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm)
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm)
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm)
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm)
self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1),
is_batchnorm=self.is_batchnorm)
# attention blocks
self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1],
nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)
self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2],
nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)
self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3],
nonlocal_mode=nonlocal_mode, sub_sample_factor=attention_dsample)
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], self.is_deconv)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], self.is_deconv)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], self.is_deconv)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], self.is_deconv)
# deep supervision
self.dsv4 = UnetDsv3(in_size=filters[3], out_size=self.num_classes, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=filters[2], out_size=self.num_classes, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=filters[1], out_size=self.num_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=self.num_classes, kernel_size=1)
# final conv (without any concat)
self.final = nn.Conv3d(self.num_classes * 4, self.num_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d) or isinstance(m, nn.BatchNorm3d):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
nn.init.normal(m.weight.data, 1.0, 0.02)
nn.init.constant(m.bias.data, 0.0)
def forward(self, inputs):
# Feature Extraction
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
# Gating Signal Generation
center = self.center(maxpool4)
gating = self.gating(center)
# Attention Mechanism
# Upscaling Part (Decoder)
g_conv4, att4 = self.attentionblock4(conv4, gating)
up4 = self.up_concat4(g_conv4, center)
g_conv3, att3 = self.attentionblock3(conv3, up4)
up3 = self.up_concat3(g_conv3, up4)
g_conv2, att2 = self.attentionblock2(conv2, up3)
up2 = self.up_concat2(g_conv2, up3)
up1 = self.up_concat1(conv1, up2)
# Deep Supervision
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
final = self.final(torch.cat([dsv1, dsv2, dsv3, dsv4], dim=1))
pred = F.softmax(final, dim=1)
return pred
# @staticmethod
# def apply_argmax_softmax(pred):
# log_p = F.softmax(pred, dim=1)
# return log_p
|
3,876 | bd2c327915c1e133a6e7b7a46290369440d50347 | #import fungsi_saya as fs
# from fungsi_saya import kalkulator as k
# hasil = k(10,5,'+')
# print(hasil)
from kelas import Siswa
siswa_1 = Siswa('Afif', "A.I.", 17, 'XII IPA')
siswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
#print(Siswa.jum_siswa)
|
3,877 | a7f348b258e1d6b02a79c60e4fe54b6d53801f70 | # coding=utf-8
"""
author: wlc
function: 百科检索数据层
"""
# 引入外部库
import json
import re
from bs4 import BeautifulSoup
# 引入内部库
from src.util.reptile import *
class EncyclopediaDao:
@staticmethod
def get_key_content (key: str) -> list:
"""
获取指定关键字的百科内容检索内容
:param key:
:return:
"""
# 1.参数设置
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {
'action': 'query',
'list': 'search',
'srsearch': key,
'format': 'json',
'formatversion': '2'
}
# 2.百科内容获取
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3)
content_list = json.loads(page_content)['query']['search']
# 3.百科内容格式化
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
date, time = item['timestamp'].rstrip('Z').split('T')
entry = {
'id': item['pageid'],
'index': index,
'create_date': date,
'create_time': time,
'title': item['title'],
'abstract': re.sub('[<span class=\"searchmatch\">,</span>]', '', item['snippet']),
'url': prefix + item['title'],
}
data.append(entry)
return data
@staticmethod
def get_key_title(key: str) -> list:
"""
获取指定关键字的百科内容检索标题
:param key:
:return:
"""
# 1.参数设置
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {
'action': 'opensearch',
'search': key,
'format': 'json',
'formatversion': '2'
}
# 2.百科内容获取
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3)
content_list = json.loads(page_content)[1]
# 3.百科内容格式化
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
entry = {
'index': index,
'title': item,
'url': prefix + item,
}
data.append(entry)
return data
@staticmethod
def get_faq_content(query: str, page: str) -> list:
"""
获取指定query的faq检索内容
:param query:
:param page:
:return:
"""
# 1.参数设置
url = 'https://zhidao.baidu.com/search?'
parm = {
'lm': '0',
'rn': '5',
'pn': page,
'fr': 'search',
'ie': 'gbk',
'word': query
}
# 2.百科内容获取
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3, is_cookie=True, charset='gbk')
bs = BeautifulSoup(page_content, "html.parser")
content_list = bs.body.find_all("dl", {'class': 'dl'})
# 3.百科内容格式化
data = []
for item in content_list:
entry = {
'create_date': item.find("dd", {'class': 'dd explain f-light'}).span.text,
'title': item.a.text,
'abstract': item.find("dd", {'class': 'dd answer'}).text,
'url': item.a.get('href')
}
data.append(entry)
return data
|
3,878 | 03da813650d56e7ab92885b698d4af3a51176903 | import datetime
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming.txt') as f_obj:
lines = f_obj.readlines()
m_lines = []
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming1.txt', 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\guestbook.txt', 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
|
3,879 | a85d06d72b053b0ef6cb6ec2ba465bfb8975b28e | def sum_numbers(numbers=None):
sum = 0
if numbers == None:
for number in range(1,101):
sum += number
return sum
for number in numbers:
sum += number
return sum
|
3,880 | 1b645ab0a48b226e26009f76ea49fd3f10f5cc7b | #デフォルト引数の破壊
#以下、破壊的な操作
def sample(x, arg=[]):
arg.append(x)
return arg
print(sample(1))
print(sample(2))
print(sample(3))
#対策・・・デフォルト引数にはイミュータブルなものを使用する
def sample(x, arg=None):
if arg is None:
arg = []
arg.append(x)
return arg
print(sample(1))
print(sample(2))
print(sample(3)) |
3,881 | d724b4f57cf7683d6b6385bf991ed23a5dd8208f | """added Trail.Geometry without srid
Revision ID: 56afb969b589
Revises: 2cf6c7c1f0d7
Create Date: 2014-12-05 18:13:55.512637
"""
# revision identifiers, used by Alembic.
revision = '56afb969b589'
down_revision = '2cf6c7c1f0d7'
from alembic import op
import sqlalchemy as sa
import flask_admin
import geoalchemy2
def upgrade():
### commands auto generated by Alembic - please adjust! ###
#with op.batch_alter_table('POI', schema=None) as batch_op:
# batch_op.drop_index('idx_POI_point')
with op.batch_alter_table('trail', schema=None) as batch_op:
batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='MULTILINESTRING'), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('trail', schema=None) as batch_op:
batch_op.drop_column('geom')
#with op.batch_alter_table('POI', schema=None) as batch_op:
# batch_op.create_index('idx_POI_point', ['point'], unique=False)
### end Alembic commands ###
|
3,882 | 84e84d9f35702c2572ad5e7daa92a271674986dc | #Coded by J. Prabhath
#14th April, 2020
#Released under GNU GPL
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
K = 96
Kp = 1
Td = 1.884
s1 = signal.lti([-1/Td],[0,-2,-4,-6], K)
s2 = signal.lti([],[0,-2,-4,-6], K)
w,mag1,phase1 = signal.bode(s1)
_,mag2,phase2 = signal.bode(s2)
plt.xlabel('Freq (in rad/s)')
plt.ylabel('Phase (in deg)')
plt.title('Phase plot')
plt.semilogx(w,phase1, label = 'With Controller')
plt.semilogx(w,phase2, label = 'Without Controller')
plt.grid()
plt.legend()
plt.show()
|
3,883 | e2948c0ad78ce210b08d65b3e0f75d757e286ad9 | # 在写Python爬虫的时候,最麻烦的不是那些海量的静态网站,而是那些通过JavaScript获取数据的站点。Python本身对js的支持就不好,所以就有良心的开发者来做贡献了,这就是Selenium,他本身可以模拟真实的浏览器,浏览器所具有的功能他一个都不拉下,加载js更是小菜了
# https://zhuanlan.zhihu.com/p/27115580
# C:\Users\hedy\AppData\Local\Programs\Python\Python36\Scripts\;C:\Users\hedy\AppData\Local\Programs\Python\Python36\
# pip 换源
# http://blog.csdn.net/lambert310/article/details/52412059
# 安装全家桶(ipython,jupyter notebook)
# https://jingyan.baidu.com/article/cbcede070c8eac02f40b4d8e.html
# http://blog.csdn.net/sanshixia/article/details/53996126
|
3,884 | 7e71c97070285b051b23448c755e3d41b2909dda | class Solution(object):
def removeNthFromEnd(self, head, n):
dummy = ListNode(-1)
dummy.next = head
first, second = dummy, dummy
for i in range(n):
first = first.next
while first.next:
first = first.next
second = second.next
second.next = second.next.next
return dummy.next
|
3,885 | b0a51877b59e14eefdd662bac468e8ce12343e6b | from django.db import models
# Create your models here.
class Glo_EstadoPlan(models.Model):
descripcion_estado = models.CharField(max_length=100)
def __str__(self):
return '{}'.format(self.descripcion_estado) |
3,886 | 22b9868063d6c5fc3f8b08a6e725fff40f4a1a03 | from __future__ import annotations
import math
from abc import abstractmethod
from pytown_core.patterns.behavioral import Command
from pytown_core.serializers import IJSONSerializable
from .buildings import BuildingProcess, BuildingTransaction
from .buildings.factory import BuildingFactory
from .check import (
AvailableCheck,
AwakenCheck,
BackgroundBuildCheck,
BackgroundMovementCheck,
CheckResult,
EnergyCheck,
InventoryAddCheck,
InventoryRemoveCheck,
TransactionCheck,
)
from .inventory import Item
class ServerCommand(IJSONSerializable, Command):
def __init__(self):
self.client_id = None
self.town = None # TODO: will be set by townmanager
self.check_result = CheckResult()
def execute(self):
self._check()
if self.check_result:
self._do()
@abstractmethod
def _check(self):
raise NotImplementedError
@abstractmethod
def _do(self):
raise NotImplementedError
@abstractmethod
def __repr__(self):
pass
@classmethod
@abstractmethod
def from_json_dict(cls, json_dict) -> ServerCommand:
raise NotImplementedError
def to_json_dict(self) -> dict:
json_dict = {}
json_dict["client_id"] = self.client_id
json_dict["check_result"] = self.check_result.to_json_dict()
return json_dict
def to_podsixnet(self):
podsixnet_dict = self.to_json_dict()
podsixnet_dict["action"] = "command"
return podsixnet_dict
class MovePlayerCommand(ServerCommand):
ENERGY_COST = 1
def __init__(self, direction: str):
ServerCommand.__init__(self)
self._direction = direction
def __repr__(self):
msg = "Move ServerCommand : {}".format(self._direction)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
def _check(self):
player = self.town.get_player(self.client_id)
EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.check_result)
AvailableCheck(player).check(self.check_result)
for tile in self._get_tiles_coordinates_dict().values():
if tile not in self.town.backgrounds.keys():
self.check_result += "tile {} not in town".format(tile)
return
BackgroundMovementCheck(self.town.backgrounds[tile], player).check(
self.check_result
)
def _do(self):
(x_dest, y_dest) = self.tile_dest
player = self.town.get_player(self.client_id)
player.status = "move"
player.direction = self._direction
player.energy.value -= MovePlayerCommand.ENERGY_COST
player.x = x_dest
player.y = y_dest
@property
def tile_dest(self) -> tuple:
movement_matrix = {}
movement_matrix["left"] = (-1, 0)
movement_matrix["right"] = (+1, 0)
movement_matrix["up"] = (0, -1)
movement_matrix["down"] = (0, +1)
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
background = self.town.backgrounds[tile]
bg_multiplicator = background.move_multiplicator
x_dest = (
player.x
+ movement_matrix[self._direction][0] * bg_multiplicator * player.velocity
)
y_dest = (
player.y
+ movement_matrix[self._direction][1] * bg_multiplicator * player.velocity
)
return (x_dest, y_dest)
def _get_tiles_coordinates_dict(self):
(x_dest, y_dest) = self.tile_dest
tiles_coordinates_dict = {
"topleft": (math.floor(x_dest), math.floor(y_dest)),
"topright": (math.floor(x_dest + 0.99), math.floor(y_dest)),
"bottomleft": (math.floor(x_dest), math.floor(y_dest + 0.99)),
"bottomright": (math.floor(x_dest + 0.99), math.floor(y_dest + 0.99)),
}
return tiles_coordinates_dict
@classmethod
def from_json_dict(cls, json_dict) -> MovePlayerCommand:
return cls(json_dict["direction"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "move"
json_dict["direction"] = self._direction
return json_dict
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.backgrounds:
self.check_result += "tile {} not in town".format(self._tile)
return
background = self.town.backgrounds[self._tile]
BackgroundBuildCheck(background, self._building_name).check(self.check_result)
if self._tile in self.town.buildings:
self.check_result += "Can't build {} : {} already built on {}".format(
self._building_name, self.town.buildings[self._tile].name, self._tile
)
def _do(self):
self.town.set_building(
BuildingFactory.create_building_by_name(self._building_name), self._tile
)
def __repr__(self):
msg = "Build ServerCommand : {} in {}".format(self._building_name, self._tile)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> BuildCommand:
return cls(json_dict["tile"], json_dict["building_name"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "build"
json_dict["building_name"] = self._building_name
json_dict["tile"] = self._tile
return json_dict
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += "No resource in {}".format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = "Collect Resource ServerCommand : {}".format(self._item)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> CollectResourceCommand:
return cls(json_dict["tile"], Item.from_json_dict(json_dict["item"]))
def to_json_dict(self) -> dict:
json_dict = super().to_json_dict()
json_dict["command"] = "collect"
json_dict["tile"] = self._tile
json_dict["item"] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += "No building on {}".format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(
building.inventory, self._building_process.item_required
).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.item_result).check(
self.check_result
)
EnergyCheck(player, self._building_process.energy_required).check(
self.check_result
)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = "BuildingProcessCommand ServerCommand {}".format(self._building_process)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingProcess.from_json_dict(json_dict["building_process"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "building_process"
json_dict["tile"] = self._tile
json_dict["building_process"] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = "BuyCommand ServerCommand {}".format(self._transaction.item_name)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingTransaction.from_json_dict(json_dict["transaction"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "buy"
json_dict["tile"] = self._tile
json_dict["transaction"] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = "SellCommand ServerCommand {}".format(self._transaction.item_name)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingTransaction.from_json_dict(json_dict["transaction"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "sell"
json_dict["tile"] = self._tile
json_dict["transaction"] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.check_result)
TransactionCheck(building, building, self._item).check(self.check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = "Build Building ServerCommand {}".format(self._item)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict["tile"], Item.from_json_dict(json_dict["item"]))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "build_building"
json_dict["tile"] = self._tile
json_dict["item"] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += "construction not finished"
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = "Upgrade Building ServerCommand {}".format(self._tile)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict["tile"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "upgrade_building"
json_dict["tile"] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
# Player not in building
if tile in self.town.buildings and self.town.buildings[tile].name != "cabane":
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
# Change player sprite
player.status = "sleep"
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
# Change energy regeneration depending on where he sleeps
if tile in self.town.buildings and self.town.buildings[tile].name == "cabane":
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = "Sleep command. Player id: {}".format(self.client_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "sleep"
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += "{} is already awake".format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = "idle"
player.energy.reset_regen()
def __repr__(self):
msg = "Wake up command. Player id: {}".format(self.client_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "wakeup"
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
# The two players id exists in the town ?
if self.client_id not in self.town.players.keys():
self.check_result += "Player {} does not exist".format(self.client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += "Player {} does not exist".format(
self._player_to_help_id
)
return
# Check if the two players are in the same tile
if self.town.get_player_tile(self.client_id) != self.town.get_player_tile(
self._player_to_help_id
):
self.check_result += "Players {} and {} are not in the same tile".format(
self.client_id, self._player_to_help_id
)
return
# Check if I have enough energy to help
EnergyCheck(
self.town.get_player(self.client_id), HelpPlayerCommand.ENERGY_TO_HELP
).check(self.check_result)
# Check if patient doesn't have health
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check
)
if is_alive_check:
self.check_result += "{} has enough health to keep moving".format(
self._player_to_help_id
)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = "HelpPlayerCommand: try to help {}".format(self._player_to_help_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> HelpPlayerCommand:
return cls(json_dict["player_to_help_id"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "help"
json_dict["player_to_help_id"] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT["move"] = MovePlayerCommand
COMMANDS_DICT["build"] = BuildCommand
COMMANDS_DICT["collect"] = CollectResourceCommand
COMMANDS_DICT["building_process"] = BuildingProcessCommand
COMMANDS_DICT["buy"] = BuyCommand
COMMANDS_DICT["sell"] = SellCommand
COMMANDS_DICT["build_building"] = BuildBuildingCommand
COMMANDS_DICT["upgrade_building"] = UpgradeBuildingCommand
COMMANDS_DICT["help"] = HelpPlayerCommand
COMMANDS_DICT["sleep"] = SleepCommand
COMMANDS_DICT["wakeup"] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict["command"] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[
podsixnet_dict["command"]
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict["client_id"]
command.check_result = CheckResult.from_json_dict(
podsixnet_dict["check_result"]
)
return command
|
3,887 | cc1b3c3c65e8832316f72cbf48737b21ee4a7799 | ###########################################################################
# This file provides maintenance on the various language files
# 1. Create new "xx/cards_xx.json" files that have entries ordered as:
# a. the card_tag entries in "cards_db.json"
# b. the group_tag entries as found in "cards_db.json"
# c. the super group entries (grouping across all expansions"
# d. any unused entries existing in the file (assumed to be work in progress)
#
# 2. Create new "sets_db.json" and "xx/cards_xx.json" with entries sorted alphabetically
#
# All output is in the designated output directory. Original files are not overwritten.
###########################################################################
import os
import os.path
import io
import codecs
import json
from shutil import copyfile
import argparse
import collections
LANGUAGE_DEFAULT = "en_us" # default language, which takes priority
LANGUAGE_XX = "xx" # language for starting a translation
def get_lang_dirs(path):
# Find all valid languages.
languages = []
for name in os.listdir(path):
dir_path = os.path.join(path, name)
if os.path.isdir(dir_path):
cards_file = os.path.join(dir_path, "cards_" + name + ".json")
sets_file = os.path.join(dir_path, "sets_" + name + ".json")
if os.path.isfile(cards_file) and os.path.isfile(sets_file):
languages.append(name)
return languages
def get_json_data(json_file_path):
print(("reading {}".format(json_file_path)))
# Read in the json from the specified file
with codecs.open(json_file_path, "r", "utf-8") as json_file:
data = json.load(json_file)
assert data, "Could not load json at: '%r' " % json_file_path
return data
def json_dict_entry(entry, separator=""):
# Return a nicely formated json dict entry.
# It does not include the enclosing {} and removes trailing white space
json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)
json_data = json_data.strip(
"{}"
).rstrip() # Remove outer{} and then trailing whitespace
return separator + json_data
# Multikey sort
# see: http://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
def multikeysort(items, columns):
from operator import itemgetter
for c in columns[::-1]:
items = sorted(items, key=itemgetter(c))
return items
def main(args):
###########################################################################
# Get all the languages, and place the default language first in the list
###########################################################################
languages = get_lang_dirs(args.card_db_dir)
languages.remove(LANGUAGE_DEFAULT)
languages.insert(0, LANGUAGE_DEFAULT)
if LANGUAGE_XX not in languages:
languages.append(LANGUAGE_XX)
print("Languages:")
print(languages)
print()
###########################################################################
# Make sure the directories exist to hold the output
###########################################################################
# main output directory
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# each language directory
for lang in languages:
# Make sure the directory is there to hold the file
lang_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(lang_dir):
os.makedirs(lang_dir)
###########################################################################
# Get the types_db information
# Store in a list in the order found in types[]. Ordered by card_type
# 1. card_tags, 2. group_tags, 3. super groups
###########################################################################
type_parts = set()
# Get the card data
type_data = get_json_data(os.path.join(args.card_db_dir, "types_db.json"))
# Sort the cards by cardset_tags, then card_tag
sorted_type_data = multikeysort(type_data, ["card_type"])
with io.open(
os.path.join(args.output_dir, "types_db.json"), "w", encoding="utf-8"
) as f:
json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)
type_parts = list(set().union(*[set(t["card_type"]) for t in sorted_type_data]))
type_parts.sort()
print("Unique Types:")
print(type_parts)
print()
###########################################################################
# Get the labels_db information
# Store in a list in the order found.
###########################################################################
all_labels = []
# Get the card data
label_data = get_json_data(os.path.join(args.card_db_dir, "labels_db.json"))
all_labels = list(set().union(*[set(label["names"]) for label in label_data]))
with io.open(
os.path.join(args.output_dir, "labels_db.json"), "w", encoding="utf-8"
) as f:
json.dump(label_data, f, indent=4, ensure_ascii=False)
all_labels.sort()
print("Labels: ")
print(all_labels)
print()
###########################################################################
# Fix up all the xx/types_xx.json files
# Place entries in alphabetical order
# If entries don't exist:
# If the default language, set from information in the "types_db.json" file,
# If not the default language, set based on information from the default language.
# Lastly, keep any extra entries that are not currently used, just in case needed
# in the future or is a work in progress.
###########################################################################
for lang in languages:
lang_file = "types_" + lang + ".json"
fname = os.path.join(args.card_db_dir, lang, lang_file)
if os.path.isfile(fname):
lang_type_data = get_json_data(fname)
else:
lang_type_data = {}
for t in sorted(type_parts):
if t not in lang_type_data:
if lang == LANGUAGE_DEFAULT:
lang_type_data[t] = t
lang_type_default = lang_type_data
else:
lang_type_data[t] = lang_type_default[t]
with io.open(
os.path.join(args.output_dir, lang, lang_file), "w", encoding="utf-8"
) as f:
json.dump(lang_type_data, f, indent=4, ensure_ascii=False)
if lang == LANGUAGE_DEFAULT:
lang_type_default = lang_type_data # Keep for later languages
###########################################################################
# Get the cards_db information
# Store in a list in the order found in cards[]. Ordered as follows:
# 1. card_tags, 2. group_tags, 3. super groups
###########################################################################
# Get the card data
card_data = get_json_data(os.path.join(args.card_db_dir, "cards_db.json"))
cards = set(card["card_tag"] for card in card_data)
groups = set(card["group_tag"] for card in card_data if "group_tag" in card)
super_groups = set(["events", "landmarks"])
# Sort the cardset_tags
for card in card_data:
card["cardset_tags"].sort()
# But put all the base cards together by moving to front of the list
if "base" in card["cardset_tags"]:
card["cardset_tags"].remove("base")
card["cardset_tags"].insert(0, "base")
# Sort the cards by cardset_tags, then card_tag
sorted_card_data = multikeysort(card_data, ["cardset_tags", "card_tag"])
with io.open(
os.path.join(args.output_dir, "cards_db.json"), "w", encoding="utf-8"
) as lang_out:
json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)
# maintain the sorted order, but expand with groups and super_groups
cards = [c["card_tag"] for c in sorted_card_data]
cards.extend(sorted(groups))
cards.extend(sorted(super_groups))
print("Cards:")
print(cards)
print()
###########################################################################
# Fix up all the cards_xx.json files
# Place entries in the same order as given in "cards_db.json".
# If entries don't exist:
# If the default language, set base on information in the "cards_db.json" file,
# If not the default language, set based on information from the default language.
# Lastly, keep any extra entries that are not currently used, just in case needed
# in the future or is a work in progress.
###########################################################################
for lang in languages:
# contruct the cards json file name
lang_file = "cards_" + lang + ".json"
fname = os.path.join(args.card_db_dir, lang, lang_file)
if os.path.isfile(fname):
lang_data = get_json_data(fname)
else:
lang_data = {}
sorted_lang_data = collections.OrderedDict()
fields = ["description", "extra", "name"]
for card_tag in cards:
lang_card = lang_data.get(card_tag)
# print(f'looking at {card_tag}: {lang_card}')
if not lang_card or lang == LANGUAGE_XX:
# Card is missing, need to add it
lang_card = {}
if lang == LANGUAGE_DEFAULT:
# Default language gets bare minimum. Really need to add by hand.
lang_card["extra"] = ""
lang_card["name"] = card
lang_card["description"] = ""
lang_card["untranslated"] = fields
lang_default = lang_data
else:
# All other languages should get the default languages' text
lang_card["extra"] = lang_default[card_tag]["extra"]
lang_card["name"] = lang_default[card_tag]["name"]
lang_card["description"] = lang_default[card_tag]["description"]
lang_card["untranslated"] = fields
else:
# Card exists, figure out what needs updating (don't update default language)
if lang != LANGUAGE_DEFAULT:
if "untranslated" in lang_card:
# Has an 'untranslated' field. Process accordingly
if not lang_card["untranslated"]:
# It is empty, so just remove it
del lang_card["untranslated"]
else:
# If a field remains untranslated, then replace with the default languages copy
for field in fields:
if field in lang_card["untranslated"]:
lang_card[field] = lang_default[card_tag][field]
else:
# Need to create the 'untranslated' field and update based upon existing fields
untranslated = []
for field in fields:
if field not in lang_data[card_tag]:
lang_card[field] = lang_default[card_tag][field]
untranslated.append(field)
if untranslated:
# only add if something is still needing translation
lang_card["untranslated"] = untranslated
lang_card["used"] = True
sorted_lang_data[card_tag] = lang_card
unused = [c for c in lang_data.values() if "used" not in c]
print(
f'unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if "used" in c])}'
)
print([c["name"] for c in unused])
# Now keep any unused values just in case needed in the future
for card_tag in lang_data:
lang_card = lang_data.get(card_tag)
if "used" not in lang_card:
if lang != LANGUAGE_XX:
lang_card["untranslated"] = [
"Note: This card is currently not used."
]
sorted_lang_data[card_tag] = lang_card
else:
del lang_card["used"]
# Process the file
with io.open(
os.path.join(args.output_dir, lang, lang_file), "w", encoding="utf-8"
) as lang_out:
json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)
if lang == LANGUAGE_DEFAULT:
lang_default = lang_data # Keep for later languages
###########################################################################
# Fix up the sets_db.json file
# Place entries in alphabetical order
###########################################################################
lang_file = "sets_db.json"
set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))
with io.open(
os.path.join(args.output_dir, lang_file), "w", encoding="utf-8"
) as lang_out:
json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False)
print("Sets:")
print(set(set_data))
print()
###########################################################################
# Fix up all the xx/sets_xx.json files
# Place entries in alphabetical order
# If entries don't exist:
# If the default language, set from information in the "sets_db.json" file,
# If not the default language, set based on information from the default language.
###########################################################################
for lang in languages:
lang_file = "sets_" + lang + ".json"
fname = os.path.join(args.card_db_dir, lang, lang_file)
if os.path.isfile(fname):
lang_set_data = get_json_data(fname)
else:
lang_set_data = {}
for s in sorted(set_data):
if s not in lang_set_data:
lang_set_data[s] = {}
if lang == LANGUAGE_DEFAULT:
lang_set_data[s]["set_name"] = s.title()
lang_set_data[s]["text_icon"] = set_data[s]["text_icon"]
if "short_name" in set_data[s]:
lang_set_data[s]["short_name"] = set_data[s]["short_name"]
if "set_text" in set_data[s]:
lang_set_data[s]["set_text"] = set_data[s]["set_text"]
else:
lang_set_data[s]["set_name"] = lang_default[s]["set_name"]
lang_set_data[s]["text_icon"] = lang_default[s]["text_icon"]
if "short_name" in lang_default[s]:
lang_set_data[s]["short_name"] = lang_default[s]["short_name"]
if "set_text" in lang_default[s]:
lang_set_data[s]["set_text"] = lang_default[s]["set_text"]
else:
if lang != LANGUAGE_DEFAULT:
for x in lang_default[s]:
if x not in lang_set_data[s] and x != "used":
lang_set_data[s][x] = lang_default[s][x]
if lang == LANGUAGE_DEFAULT:
lang_default = lang_set_data # Keep for later languages
with io.open(
os.path.join(args.output_dir, lang, lang_file), "w", encoding="utf-8"
) as lang_out:
json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)
###########################################################################
# bonuses_xx files
###########################################################################
for lang in languages:
# Special case for xx. Reseed from default language
fromLanguage = lang
if lang == LANGUAGE_XX:
fromLanguage = LANGUAGE_DEFAULT
copyfile(
os.path.join(
args.card_db_dir, fromLanguage, "bonuses_" + fromLanguage + ".json"
),
os.path.join(args.output_dir, lang, "bonuses_" + lang + ".json"),
)
###########################################################################
# translation.txt
###########################################################################
copyfile(
os.path.join(args.card_db_dir, "translation.md"),
os.path.join(args.output_dir, "translation.md"),
)
# Since xx is the starting point for new translations,
# make sure xx has the latest copy of translation.txt
copyfile(
os.path.join(args.card_db_dir, LANGUAGE_XX, "translation.txt"),
os.path.join(args.output_dir, LANGUAGE_XX, "translation.txt"),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--card_db_dir",
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "src", "domdiv", "card_db"
),
help="directory of card data",
)
parser.add_argument(
"--output_dir",
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)), ".", "card_db"
),
help="directory for output data",
)
args = parser.parse_args()
main(args)
|
3,888 | 263347d1d445643f9c84e36a8cbb5304581ebaf6 | from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from .views import TestView, index, setup_fraud_detection, verify_testing_works
urlpatterns = [
path('test/<str:name>/', index, name='index'),
path('ml/setup/', setup_fraud_detection, name='fraud_detection_setup'),
path('ml/verify/', verify_testing_works, name='fraud_verification'),
path('class/<str:name>/', csrf_exempt(TestView.as_view()), name='test_class'),
# path('mine/', MyView.as_view(), name='my-view'),
] |
3,889 | 8c458d66ab2f9a1bf1923eecb29c3c89f2808d0b | '''
www.autonomous.ai
Phan Le Son
plson03@gmail.com
'''
import speech_recognition as sr
import pyaudio
from os import listdir
from os import path
import time
import wave
import threading
import numpy as np
import BF.BeamForming as BF
import BF.Parameter as PAR
import BF.asr_wer as wer
import BF.mic_array_read as READ
import BF.DOA as DOA
global flgLoad
flgGoogle = False
flgRefReady = False
flgPlayOn = False
flgFinish = False
CHUNK_OUT = 1024
reftext = None
filename = None
CHANNELS = 2
CHUNK = 1024 * 4 # PAR.m*PAR.N/CHANNELS # 1024*4
RATE = 64000 # sample rate
RECORD_SECONDS = 15
idxDir = 6
Audio_Data = np.zeros((np.floor(RECORD_SECONDS * RATE / 4), PAR.m))
Audio_SD = np.zeros(np.floor(RECORD_SECONDS * RATE / 4))
ind = 0
numCHUNK = np.floor(RATE * RECORD_SECONDS / CHUNK)
filesave = open("log.txt",'w')
p = pyaudio.PyAudio()
r = sr.Recognizer()
MIC_ARRAY = READ.Mic_Array_Read()
LOC = DOA.DOA_MicArray()
BEAM = BF.BeamFormingObj(Weight_Update=False)
class PlayOut(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.wavefiles = [f for f in listdir('./en') if path.isfile(path.join('./en', f))]
def run(self):
for wav in list(self.wavefiles):
global flgPlayOn, flgFinish, reftext, filename, flgRefReady, flgGoogle
filename = wav
print("Playing:" + filename)
flgPlayOn = True
flgGoogle = False
time.sleep(0.5)
WAV_FILE = path.join("./en", wav)
wf = wave.open(WAV_FILE, 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data
data = wf.readframes(CHUNK_OUT)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK_OUT)
wf.close()
# stop stream
stream.stop_stream()
stream.close()
time.sleep(1)
flgPlayOn = False
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
flgRefReady = False
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
reftext = r.recognize_google(audio)
print("correct one:" + str(reftext.encode('utf-8')))
filesave.write("correct one:"+ str(reftext.encode('utf-8')))
filesave.write('\r\n')
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
flgRefReady = True
while (flgGoogle == False):
time.sleep(0.01)
flgFinish = True
p.terminate()
if False:
Frames_1024 = MIC_ARRAY.Read()
while (BEAM.ListenBGNoise(Frames_1024)==0):
time.sleep(0.0001)
threadLock = threading.Lock()
thread_play = PlayOut()
thread_play.start()
while (flgFinish == False):
time.sleep(0.01)
print("**** recording *******")
ind = 0
flgLoad = [True]*PAR.CNTBUF
MIC_ARRAY.ForgetOldData()
while (flgPlayOn == True):
Frames_1024 = MIC_ARRAY.Read()
'''Sound Source Localization'''
idxDir = LOC.Update(Frames_1024)
Beam_Audio = BEAM.BFCalc(Frames_1024, 1,Post_Filtering=False)
# Storage audio output
Audio_Data[ind:ind + PAR.N, 0:PAR.m] = Frames_1024[:, 0:PAR.m]
Audio_SD[ind:ind + PAR.N] = Beam_Audio
ind = ind + PAR.N
print("**** done recording **")
raw_data = Audio_SD[:ind].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_BF_SD = filename + "SD.wav"
wf = wave.open(WAVE_OUTPUT_BF_SD, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE'))
wf.writeframes(byte_data)
wf.close()
for i in range(0, PAR.m):
raw_data = Audio_Data[:ind, i].astype(np.int16)
byte_data = raw_data.tostring()
WAVE_OUTPUT_FILENAME_I = filename + "channel" + str(i) + ".wav"
Data_Audio = "Audio_Channel" + str(i)
wf = wave.open(WAVE_OUTPUT_FILENAME_I, 'wb')
wf.setparams((1, 2, 16000, 0, 'NONE', 'NONE')) # (nchannels, sampwidth, framerate, nframes, comptype, compname
wf.writeframesraw(byte_data)
wf.close()
while (flgRefReady == False):
time.sleep(0.01)
if True:
ResSum=0
for i in range(0, 8):
file = filename + "channel" + str(i) + ".wav"
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), file)
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
testtext = r.recognize_google(audio)
print("Google Speech Recognition for mic " + str(i) + "::::::::::" + str(testtext.encode('utf-8')))
filesave.write(" mic " + str(i) + "::::::::::" + str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
ResSum+= (1.0/8.0)*res
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
ResSum+= (1.0/8.0)
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
ResSum+= (1.0/8.0)
filesave.write('Word Error Rate Everage: {0:.04f}'.format(ResSum))
filesave.write('\r\n')
WAV_FILE = path.join(path.dirname(path.realpath(__file__)), filename + "SD.wav")
with sr.WavFile(WAV_FILE) as source:
audio = r.record(source) # read the entire WAV file
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
testtext = r.recognize_google(audio)
print("Beam-forming result :::::::::::::::::::::::::" + str(testtext.encode('utf-8')))
filesave.write("Beam-forming result :::::::::::::::::::::::::" + str(testtext.encode('utf-8')))
filesave.write('\r\n')
res = wer.wer(reftext, testtext)
print('Word Error Rate: {0:.04f}'.format(res))
filesave.write('Word Error Rate: {0:.04f}'.format(res))
filesave.write('\r\n')
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
flgGoogle = True
time.sleep(0.03)
LOC.Stop()
MIC_ARRAY.Stop_Read()
filesave.close()
'''
# recognize speech using Sphinx
try:
print("Sphinx thinks you said " + r.recognize_sphinx(audio))
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
'''
|
3,890 | 606a6e7ecc58ecbb11aa53602599e671514bc537 | import torch.utils.data
import torch
import math
from util.helpers import *
from collections import defaultdict as ddict
class _Collate:
def __init__(self, ):
pass
def collate(self, batch):
return torch.squeeze(torch.from_numpy(np.array(batch)))
class PR:
dataset = None
eval_data = None
model = None
device = None
most_frequent_rels = None
test_data = None
train_data = None
valid_data = None
eval_test_data = None
topk = None
def init(self, data):
self.model = self.model.to(self.device)
collate_fn = _Collate()
self.eval_loader = torch.utils.data.DataLoader(
data,
Config.eval_batch_size, shuffle=False,
pin_memory=Config.pin_memory, num_workers=Config.loader_num_workers,
collate_fn=collate_fn.collate)
def count_e1_e2_by_relation(self, data):
rel_map = ddict(int)
for r in data.keys():
rel_map[r] = len(data[r])
count_pairs_by_relation = rel_map.items()
count_pairs_by_relation = sorted(count_pairs_by_relation, key=lambda x: -x[1])
return count_pairs_by_relation
# computes the position of a tuple for the flattened 1d score matrix
def convert_idx_to_1d(self, tuples_r, n=None):
if n is None:
n = self.model.num_entities
pos_1d = []
row_idx, column_idx = tuples_r
for i in range(len(row_idx)):
pos_1d.append(row_idx[i] * n + column_idx[i])
return pos_1d
def evaluate(self, epoch, logger):
#prepare data
idx_train = ddict(list)
for e1, r, e2 in self.train_data:
idx_train[r].append((e1, e2))
if self.eval_test_data:
idx_valid = ddict(list)
for e1, r, e2 in self.valid_data:
idx_valid[r].append((e1, e2))
idx_test = ddict(list)
for e1, r, e2 in self.test_data:
idx_test[r].append((e1, e2))
tuples_by_relation = self.count_e1_e2_by_relation(idx_test)
relations = np.array([x[0] for x in tuples_by_relation])
#tuples_count = np.array([x[1] for x in tuples_by_relation])
# speedup grid search
if self.most_frequent_rels > 0:
print("Evaluating on {} most frequent relations...".format(self.most_frequent_rels))
relations = relations[:self.most_frequent_rels]
prepare_test = ddict(list)
for e1, r, e2 in self.test_data:
prepare_test[r].append([e1, r, e2])
# sorted data
prepare_test_sorted = ddict(list)
for r in relations:
prepare_test_sorted[r].append(prepare_test[r])
eval_data_prepared = [triple_list for r, triple_list in prepare_test_sorted.items()]
ranks_by_r = ddict(list)
num_true_triples = ddict(list)
self.init(eval_data_prepared)
for i, batch in enumerate(self.eval_loader):
batch = batch.to(self.device)
r = None
if len(batch.shape) >= 2:
r_tensor = batch[0][1]
r = batch[0][1].item()
else:
# only one test triple for a given relation
r_tensor = batch[1]
r = batch[1].item()
print("Evaluating: {} Progress: {}%".format(r, round(i/len(self.eval_loader) * 100, 2)))
scores = ddict(list)
score_matrix = self.model.score_matrix_r(r_tensor)
scores[r].append(score_matrix)
# ----- FILTERING -----
# all e1, e2 for a given relation in test, validation data
tuples_r_test = np.array(prepare_test_sorted[r][0])
tuples_r_test = [tuples_r_test[:,0], tuples_r_test[:,2]]
tuples_r_train = np.array(idx_train[r])
tuples_r_train = [tuples_r_train[:,0], tuples_r_train[:,1]]
score_matrix[tuples_r_train] = -math.inf # Filter training set out
# Filter validation set out
if self.eval_test_data:
tuples_r_valid = np.array(idx_valid[r])
if (len(tuples_r_valid) > 0):
tuples_r_valid = [tuples_r_valid[:, 0], tuples_r_valid[:, 1]]
score_matrix[tuples_r_valid] = -math.inf
# ---- /FILTERING -----
test_tuples_r_1d = self.convert_idx_to_1d(tuples_r_test)
num_true_triples[r] = len(test_tuples_r_1d)
test_tuples_r_1d_tensor = torch.squeeze(torch.LongTensor([test_tuples_r_1d]))
topk = self.compute_topk(score_matrix, test_tuples_r_1d_tensor)
ranks = topk.cpu().data.numpy()
if len(ranks.shape) > 0:
ranks = np.sort(ranks)
print(ranks)
ranks_by_r[r].append(ranks)
print("-----------------------")
avg_map, avg_hits = self.metrics(ranks_by_r, num_true_triples)
print("TOTAL MAP: {} ".format(avg_map))
print("TOTAL HITS: {}".format(avg_hits))
# save results
if logger is not None:
avg_map = round(avg_map, 4)
avg_hits = round(avg_hits, 4)
logger.log_result(avg_map, avg_hits, epoch, "a")
logger.compare_best(avg_map, avg_hits, epoch, "_best", self.model)
return avg_map, avg_hits
def compute_topk(self, score_matrix, tuples_r_1d):
score_matrix = score_matrix.reshape((1, -1)).flatten()
if len(score_matrix) > self.topk+1:
sorted_k_values, sorted_k_indexs = torch.topk(score_matrix, self.topk, largest=True, sorted=True)
other = torch.zeros(len(sorted_k_indexs)).long().to(self.device)
tuples_r_1d = tuples_r_1d.to(self.device)
if len(tuples_r_1d.size()) > 0:
check = [torch.where(sorted_k_indexs == t, sorted_k_indexs, other) for t in tuples_r_1d if len(torch.nonzero(sorted_k_indexs == t)) > 0]
else:
check = [torch.where(sorted_k_indexs == tuples_r_1d, sorted_k_indexs, other)]
ranks = [torch.nonzero(t)+1 for t in check]
if len(ranks) == 1: # one or zero elements in ranks
ranks = ranks[0] if len(ranks[0].size()) <= 1 else ranks[0][0]
else:
ranks = torch.LongTensor(ranks).to(self.device)
return ranks
def metrics(self, ranks_by_relation, num_true_triples):
total_precision = 0
normalization = 0
total_hits = 0
for r, ranks in ranks_by_relation.items():
total_hits += len(ranks[0])
normalization += min(num_true_triples[r], self.topk)
for idx, rank in enumerate(ranks[0]):
total_precision += (idx + 1) / rank
avg_map = (total_precision / normalization) * 100
avg_hits = (total_hits / normalization) * 100
return avg_map, avg_hits
@staticmethod
def fromConfig(model, dataset):
evaluator = PR()
if dataset is None:
evaluator.dataset = dataset.load()
else:
evaluator.dataset = dataset
evaluator.device = torch.device(Config.eval_device)
torch.set_num_threads(Config.num_threads)
evaluator.model = model
coder = Coder()
data_dir = Config.data_dir
dataset = Config.dataset
train_triples = read_triplets(data_dir + Config.dataset + "/" + Config.raw_split_files['train'], None)
train_triples = coder.construct_encoder(train_triples)
test_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['test'], coder)
test_triples = coder.construct_encoder(test_triples)
valid_triples = read_triplets(data_dir + dataset + "/" + Config.raw_split_files['valid'], coder)
valid_triples = coder.construct_encoder(valid_triples)
evaluator.train_data = train_triples
evaluator.eval_test_data = Config.eval_test_data
if Config.eval_test_data: # use test set for evaluation, training and validation split for filtering
evaluator.test_data = test_triples
evaluator.valid_data = valid_triples
else: # use validation set for evaluation and training set for filtering
evaluator.test_data = valid_triples
evaluator.most_frequent_rels = Config.most_frequent_rels
evaluator.topk = Config.topk
return evaluator
|
3,891 | d133a07f69d2dadb5559d881b01050abb2a9602b | #!/usr/bin/env python
# ! -*- coding: utf-8 -*-
'''
@Time : 2020/6/4 16:33
@Author : MaohuaYang
@Contact : maohuay@hotmail.com
@File : pinganFudan-GUI.py
@Software: PyCharm
'''
import time
import requests
import tkinter as tk
from login import Ehall
def set_win_center(root, curWidth='', curHight=''):
"""
设置窗口大小,并居中显示
:param root:主窗体实例
:param curWidth:窗口宽度,非必填,默认200
:param curHight:窗口高度,非必填,默认200
:return:无
"""
if not curWidth:
'''获取窗口宽度,默认200'''
curWidth = root.winfo_width()
if not curHight:
'''获取窗口高度,默认200'''
curHight = root.winfo_height()
# print(curWidth, curHight)
# 获取屏幕宽度和高度
scn_w, scn_h = root.maxsize()
# print(scn_w, scn_h)
# 计算中心坐标
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
# print(cen_x, cen_y)
# 设置窗口初始大小和位置
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root.geometry(size_xy)
def sign_up(ehall, address_info):
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'
response = ehall.session.get(url, headers=ehall.headers, verify=False)
data = response.json()
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'
data['d']['info'].update(address_info)
post_data = data['d']['info']
response = ehall.session.post(url, data=post_data, verify=False, headers=ehall.headers)
return response.json()
def main():
root = tk.Tk()
root.title("DailyFudan")
set_win_center(root, 700, 350)
root.resizable(0, 0)
# user ID
lblid = tk.Label(root, text="学号:")
lblid.grid(row=0, column=0)
#lid.pack()
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
#entID.pack()
# password
lblPW = tk.Label(root, text="Ehall密码:")
lblPW.grid(row=1, column=0)
#lPW.pack()
entPW = tk.Entry(root, show="*")
entPW.grid(row=1, column=1)
#entPW.pack()
# location information
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value="上海市 杨浦区")
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
#entArea.pack()
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value="上海")
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
#entProv.pack()
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value="上海市")
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
#entCity.pack()
# auto submit
# to be continue
# log area
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S+tk.W+tk.E+tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {
'id': id,
'pw': pw
}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {
"area": varArea.get(),
"province": varProv.get(),
"city": varCity.get()
}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = ">>填报成功!%s %s\n" % (ehall.username, time.ctime())
else:
log = ">>今日已填报!%s %s\n" % (ehall.username, time.ctime())
else:
log = ">>登录失败!%s %s\n" % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert("insert", log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text="提交", command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
if __name__ == "__main__":
main()
|
3,892 | 1dab0084666588f61d0f9f95f88f06ed9d884e5b | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'KEY.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_KEY(object):
def setupUi(self, KEY):
KEY.setObjectName("KEY")
KEY.resize(419, 106)
self.Key1 = QtWidgets.QLineEdit(KEY)
self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))
self.Key1.setText("")
self.Key1.setObjectName("Key1")
self.Key2 = QtWidgets.QLineEdit(KEY)
self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))
self.Key2.setObjectName("Key2")
self.layoutWidget = QtWidgets.QWidget(KEY)
self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.enter = QtWidgets.QPushButton(KEY)
self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))
self.enter.setObjectName("enter")
self.quxiao = QtWidgets.QPushButton(KEY)
self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))
self.quxiao.setObjectName("quxiao")
self.retranslateUi(KEY)
self.quxiao.clicked.connect(KEY.close)
QtCore.QMetaObject.connectSlotsByName(KEY)
def retranslateUi(self, KEY):
_translate = QtCore.QCoreApplication.translate
KEY.setWindowTitle(_translate("KEY", "KEY"))
self.label.setText(_translate("KEY", "Keys 1"))
self.label_2.setText(_translate("KEY", "Keys 2"))
self.enter.setText(_translate("KEY", "确定"))
self.quxiao.setText(_translate("KEY", "取消"))
|
3,893 | bfd8385e8f4886b91dde59c04785134b9cd6a2b6 | # Generated by Django 3.1 on 2020-08-28 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_rest', '0004_auto_20200828_0749'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='user_id',
),
migrations.AddField(
model_name='event',
name='users',
field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'),
),
]
|
3,894 | e4a0f26afe8c78e4abbd85834c96ed5ba84e1f0b | import tensorflow as tf
import numpy as np
import math
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
# from transform_nets import input_transform_net, feature_transform_net
import tf_util_loss
class Network:
def placeholder_inputs(self,batch_size, num_point):
# with tf.variable_scope('inputs') as ip:
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
# with tf.variable_scope('PointNet') as pn:
# Comment above two lines to have same points for loss and features and also change the variable names in the next line.
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay, activation_fn=None)
# Symmetric function: max pooling
source_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))
loss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)
return loss
if __name__=='__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32,1024,3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs) |
3,895 | c8d27965df83eb3e673b3857ee700a8474826335 | #!/usr/bin/python
debug = 0
if debug == 1:
limit = [8,20]
n = 3
p = [[2,10],[10,12],[8,30],[1,5]]
#n = 1
# p = [[8,30]]
print limit
print n
print p
def isIn(arr):
if arr[0] > limit[1] or arr[1] < limit[0] or \
arr[1] == 0:
return False
else:
return True
def overlapNum():
count = 0
maxN = 0
minN = 10001
global p
global limit
if debug !=1:
limit = []
p = []
n = 0
i = 0
s = raw_input().split(" ")
limit = map(int,s)
n = input()
while i<n:
s = raw_input().split(" ")
p.append([int(s[0]),int(s[1])])
i = i + 1
if n == 0:
print 0
print 0
return
p = filter(isIn,p) #Filtered out those not in limit scale
#add 0,1 to the start and end time
l = []
for i in range(len(p)):
l.append((p[i][0],0))
l.append((p[i][1],1))
#sort
l = sorted(l)
#count 0 and 1
if limit[1] == 0 or len(l) == 0:
print 0
print 0
return
if l[0][0] > limit[0] or l[-1][0] < limit[1]:
minN = 0
for k in l:
if k[1] == 0:
count = count + 1
maxN = max(maxN,count)
if minN != 0:
minN = count
else: #k[1] == 1
if k[0] < limit[1]:
count = count -1
if minN != 0:
minN = min(minN,count)
if minN >= 10001:
print 0
else:
print minN
print maxN
return
if __name__ == "__main__":
overlapNum()
|
3,896 | 0f3e12f35cc29a71be5b8e6d367908e31c200c38 | from numpy import *
from numpy.linalg import*
preco = array(eval(input("Alimentos: ")))
alimento = array([[ 2, 1 ,4 ],
[1 , 2 , 0],
[2 , 3 , 2 ]])
r = dot(inv(alimento),preco.T) #
print("estafilococo: ", round(r[0] , 1))
print("salmonela: ", round(r[1], 1))
print("coli: ", round(r[2], 1))
if r[0] == min(r):
print("estafilococo")
elif r[1] == min(r):
print("salmonela")
elif r[2]== min(r) :
print("coli") |
3,897 | bc1aefd0b0a87b80a10cecf00407b4608a6902b5 | #
# cuneiform_python.py
#
# Example showing how to create a custom Unicode set for parsing
#
# Copyright Paul McGuire, 2021
#
from typing import List, Tuple
import pyparsing as pp
class Cuneiform(pp.unicode_set):
"""Unicode set for Cuneiform Character Range"""
_ranges: List[Tuple[int, ...]] = [
(0x10380, 0x103d5),
(0x12000, 0x123FF),
(0x12400, 0x1247F),
]
# list out all valid identifier characters
# print(Cuneiform.identchars)
"""
Simple Cuneiform Python language transformer
Define Cuneiform "words"
print: 𒄑𒉿𒅔𒋫
hello: 𒀄𒂖𒆷𒁎
world: 𒍟𒁎𒉿𒆷𒀳
def: 𒁴𒈫
"""
# uncomment to show parse-time debugging
# pp.enable_diag(pp.Diagnostics.enable_debug_on_named_expressions)
# define a MINIMAL Python parser
LPAR, RPAR, COLON, EQ = map(pp.Suppress, "():=")
def_ = pp.Keyword("𒁴𒈫", ident_chars=Cuneiform.identbodychars).set_name("def")
any_keyword = def_
ident = (~any_keyword) + pp.Word(
Cuneiform.identchars, Cuneiform.identbodychars, asKeyword=True
)
str_expr = pp.infix_notation(
pp.QuotedString('"') | pp.common.integer,
[
("*", 2, pp.OpAssoc.LEFT),
("+", 2, pp.OpAssoc.LEFT),
],
)
rvalue = pp.Forward()
fn_call = (ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR)).set_name("fn_call")
rvalue <<= fn_call | ident | str_expr | pp.common.number
assignment_stmt = ident + EQ + rvalue
stmt = pp.Group(fn_call | assignment_stmt).set_name("stmt")
fn_def = pp.Group(
def_ + ident + pp.Group(LPAR + pp.Optional(rvalue) + RPAR) + COLON
).set_name("fn_def")
fn_body = pp.IndentedBlock(stmt).set_name("fn_body")
fn_expr = pp.Group(fn_def + pp.Group(fn_body))
script = fn_expr[...] + stmt[...]
# parse some Python written in Cuneiform
cuneiform_hello_world = r"""
𒁴𒈫 𒀄𒂖𒆷𒁎():
𒀁 = "𒀄𒂖𒆷𒁎, 𒍟𒁎𒉿𒆷𒀳!\n" * 3
𒄑𒉿𒅔𒋫(𒀁)
𒀄𒂖𒆷𒁎()"""
script.parseString(cuneiform_hello_world).pprint(width=40)
# use transform_string to convert keywords and builtins to runnable Python
names_map = {
"𒄑𒉿𒅔𒋫": "print",
}
ident.add_parse_action(lambda t: names_map.get(t[0], t[0]))
def_.add_parse_action(lambda: "def")
print("\nconvert Cuneiform Python to executable Python")
transformed = (
# always put ident last
(def_ | ident)
.ignore(pp.quoted_string)
.transform_string(cuneiform_hello_world)
.strip()
)
print(
"=================\n"
+ cuneiform_hello_world.strip()
+ "\n=================\n"
+ transformed
+ "\n=================\n"
)
print("# run transformed Python")
exec(transformed)
|
3,898 | 2874e05d6d5e0f13924e5920db22ea3343707dfa | _base_ = [
'../models/cascade_rcnn_r50_fpn.py',
#'coco_instance.py',
'../datasets/dataset.py',
'../runtime/valid_search_wandb_runtime.py',
'../schedules/schedule_1x.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='CascadeRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768])
#[256, 512, 1024, 2048]
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),
(608, 1024), (640, 1024), (672, 1024), (704, 1024),
(736, 1024), (768, 1024), (800, 1024)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1024), (500, 1024), (600, 1024)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024),
(576, 1024), (608, 1024), (640, 1024),
(672, 1024), (704, 1024), (736, 1024),
(768, 1024), (800, 1024)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),
(608, 1024), (640, 1024), (672, 1024), (704, 1024),
(736, 1024), (768, 1024), (800, 1024)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1024), (500, 1024), (600, 1024)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024),
(576, 1024), (608, 1024), (640, 1024),
(672, 1024), (704, 1024), (736, 1024),
(768, 1024), (800, 1024)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline),val=dict(pipeline=val_pipeline))
evaluation = dict(interval=1, metric='bbox', save_best='bbox_mAP_50')
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(
# type='WandbLoggerHook',
# init_kwargs=dict(
# project='valid_search',
# name='YOUR_EXP'
# ))
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
|
3,899 | 4d7696c832f9255fbc68040b61fde12e057c06fa | import numpy as np
import mysql.connector
from mysql.connector import Error
import matplotlib.pyplot as plt
def readData():
connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
data = records[0]
# nama_pasien = data[1]
filename = data[2]
# dataSignal = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/"+filename,delimiter=',')
## READ TXT FILE
dataSignal = []
my_file = open("C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/" + filename, "r")
for line in my_file.readlines():
if line[-1:] == "\n":
dataSignal.append(line[:-1])
else:
dataSignal.append(line)
my_file.close()
# C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses
if (connection.is_connected()):
cursor.close()
connection.close()
return dataSignal, filename
def saveData(data,label,filename):
connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_'+filename
with open(r'C:\xampp\htdocs\projectCAD\public\storage\upload/files\hasilproses/' + filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
#Select Pasien from database
sql_select = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = "UPDATE pasien SET hasilproses = '" + filename_hasil + "',label = '"+str(label[0])+"' WHERE id = "+str(id_pasien)
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/" + fiturname, delimiter=',')
if (connection.is_connected()):
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur,label):
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
# dbfitur = getFiturEkstraksi()
# dbfitur.append(fitur)
fiturname = 'fitur.txt'
rowfitur = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+fiturname, "w")
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+labelname, "w")
for row in range(len(label)):
np.savetxt(rowlabel,row)
rowlabel.close()
sql_update = "UPDATE fitur_ekstraksis SET fitur = '" + fiturname + "', label = '" + labelname + "' WHERE id = 1"
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.