text stringlengths 38 1.54M |
|---|
# Using print() functions to draw ascii art.
print(" /|")
print(" / |")
print(" / |")
print("/___|")
# The order in which you write your instructions matters alot. (Top down sequence)
|
def longest_slide_down(pyramid):
pyramid.reverse()
for i in range(1,len(pyramid)):
slideSum=[]
for j in range(len(pyramid[i])):
slideSum.append(max(pyramid[i][j]+pyramid[i-1][j],
pyramid[i][j]+pyramid[i-1][j+1]))
pyramid[i] = slideSum
return pyramid[-1][0] |
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "trivia_test"
self.database_path = "postgres://{}:{}@{}/{}".format(os.environ.get('MY_PG_USER'), os.environ.get('MY_PG_PWD'), 'localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
q = Question("When is the best time to wear a striped sweater?", "All the time.", 4, 5)
self.db.session.add(q)
self.db.session.commit()
self.valid_delete_id = q.id
def tearDown(self):
"""Executed after reach test"""
question_match_A = Question.query.filter(Question.question == "When is the best time to wear a striped sweater?").first()
if question_match_A:
question_match_A.delete()
question_match_B = Question.query.filter(Question.question == "What color is grass?").first()
if question_match_B:
question_match_B.delete()
"""
TODO
Write at least one test for each test for successful operation and for expected errors.
"""
def test_get_categories_success(self):
"""Test for retrieving all categories"""
res = self.client().get('/')
self.assertEqual(res.status_code, 200)
data = json.loads(res.data)
expected_categories = ['Science', 'Art', 'Geography', 'History', 'Entertainment', 'Sports']
self.assertIn("categories", data)
self.assertEqual(len(data["categories"]), len(expected_categories))
for category in expected_categories:
self.assertIn(category, data["categories"])
self.assertIn("message", data)
self.assertEqual(data["message"], "HELLO WORLD")
def test_get_questions_page_success(self):
"""Test for retrieving a page of questions"""
res = self.client().get('/questions?page=1')
self.assertEqual(res.status_code, 200)
data = json.loads(res.data)
self.assertIn("questions", data)
self.assertIn("total_questions", data)
self.assertIn("categories", data)
expected_category_map = {
'1': "Science", '2': "Art", '3': "Geography", '4': "History", '5': "Entertainment",
'6': "Sports"
}
self.assertEqual(10, len(data["questions"]))
self.assertEqual(19, data["total_questions"])
self.assertEqual(len(expected_category_map), len(data["categories"]))
for id, category in expected_category_map.items():
self.assertIn(id, data["categories"])
self.assertEqual(category, data["categories"][id])
def test_get_questions_page_failure(self):
"""Test for retrieving page of questions with failure"""
res = self.client().get('/questions?page=0')
self.assertEqual(res.status_code, 422)
def test_delete_question_success(self):
"""Test for deleting existent question by ID"""
resBeforeDelete = self.client().get('/questions')
dataBeforeDelete = json.loads(resBeforeDelete.data)
total_question_count = dataBeforeDelete["total_questions"]
resDelete = self.client().delete(f"/questions/{self.valid_delete_id}")
dataDelete = json.loads(resDelete.data)
self.assertEqual(resDelete.status_code, 200)
self.assertTrue(dataDelete["success"])
resAfterDelete = self.client().get('/questions')
dataAfterDelete = json.loads(resAfterDelete.data)
self.assertEqual(dataAfterDelete["total_questions"], total_question_count-1)
def test_delete_question_failure(self):
"""Test for deleting non-existent question by ID"""
res_delete = self.client().delete("/questions/1000000")
self.assertEqual(res_delete.status_code, 404)
def test_retrieve_category_map_success(self):
"""Test for retrieving map of categories"""
res = self.client().get('/categories')
self.assertEqual(res.status_code, 200)
data = json.loads(res.data)
self.assertIn("categories", data)
self.assertEqual(len(data["categories"]), 6)
expected_category_map = { '1': "Science", '2': "Art", '3': "Geography", '4': "History",
'5': "Entertainment", '6': "Sports" }
for id, category in expected_category_map.items():
self.assertIn(id, data["categories"])
self.assertEqual(category, data["categories"][id])
def test_create_question_success(self):
"""Test for creating question successfully"""
question_info = {
"question": 'What color is grass?',
"answer": 'Green',
"category": 5,
"difficulty": 2,
}
res = self.client().put('/questions', data=json.dumps(question_info), headers={'Content-Type': 'application/json'})
self.assertEqual(200, res.status_code)
data = json.loads(res.data)
self.assertTrue("success" in data)
self.assertTrue(data["success"])
self.assertTrue("id" in data)
question_match = Question.query.filter(Question.id == data["id"]).first()
self.assertTrue(question_match is not None)
self.assertEqual("What color is grass?", question_match.question)
self.assertEqual("Green", question_match.answer)
self.assertEqual(5, question_match.category)
self.assertEqual(2, question_match.difficulty)
def test_create_question_failure(self):
"""Test for failiing to create a question due to invalid parameter dictionary"""
question_info = {
"question": 'What color is grass?'
}
res = self.client().put('/questions', data=json.dumps(question_info), headers={'Content-Type': 'application/json'})
self.assertEqual(400, res.status_code)
def test_retrieve_category_questions_success(self):
"""Test for successful retrieval of category questions"""
res = self.client().get('/categories/4/questions')
self.assertEqual(200, res.status_code)
data = json.loads(res.data)
self.assertTrue("questions" in data)
self.assertTrue("totalQuestions" in data)
self.assertTrue("currentCategory" in data)
self.assertEqual(5, data["totalQuestions"])
self.assertEqual("History", data["currentCategory"])
questions_matches = data["questions"]
added_question = list(filter(lambda q: q["question"] == "When is the best time to wear a striped sweater?", questions_matches))
self.assertTrue(added_question is not None)
self.assertEqual(1, len(added_question))
self.assertEqual("When is the best time to wear a striped sweater?", added_question[0]["question"])
self.assertEqual("All the time.", added_question[0]["answer"])
self.assertEqual(4, added_question[0]["category"])
self.assertEqual(5, added_question[0]["difficulty"])
def test_retrieve_category_questions_failure(self):
"""Test for failing to retrieve category questions"""
res = self.client().get('/categories/8/questions')
self.assertEqual(404, res.status_code)
def test_retrieve_question_search_success(self):
"""Test for successful search of term 'title'"""
search_info = { "page": 1, "searchTerm": 'title' }
res = self.client().post('/questions', data=json.dumps(search_info), headers={'Content-Type': 'application/json' })
self.assertEqual(200, res.status_code)
data = json.loads(res.data)
self.assertTrue("totalQuestions" in data)
self.assertTrue("questions" in data)
self.assertEqual(2, data["totalQuestions"])
self.assertEqual(2, len(data["questions"]))
data["questions"].sort(key = lambda q: q["question"])
self.assertEqual("What was the title of the 1990 fantasy directed by Tim Burton about a young man with multi-bladed appendages?", data["questions"][0]["question"])
self.assertEqual("Edward Scissorhands", data["questions"][0]["answer"])
self.assertEqual(5, data["questions"][0]["category"])
self.assertEqual(3, data["questions"][0]["difficulty"])
self.assertEqual("Whose autobiography is entitled 'I Know Why the Caged Bird Sings'?", data["questions"][1]["question"])
self.assertEqual("Maya Angelou", data["questions"][1]["answer"])
self.assertEqual(4, data["questions"][1]["category"])
self.assertEqual(2, data["questions"][1]["difficulty"])
def test_retrieve_question_search_failure(self):
"""Test for failed search due to invalid input data"""
search_info = { "page": 1 }
res = self.client().post('/questions', data=json.dumps(search_info), headers={'Content-Type': 'application/json'})
self.assertEqual(404, res.status_code)
def test_retrieve_quiz_question_success(self):
"""Test for successful retrieval of multiple quiz questions"""
previous_questions = []
test_category_questions = Question.query.filter(Question.category == 1).all()
category_question_ids = [q.id for q in test_category_questions]
quiz_category = { "id": "1", "type": "Science" }
quiz_info = { "previous_questions": previous_questions, "quiz_category": quiz_category }
for _ in range(len(category_question_ids)):
res = self.client().post('/quizzes', data=json.dumps(quiz_info), headers={'Content-Type': 'application/json' })
self.assertEqual(200, res.status_code)
data = json.loads(res.data)
self.assertEqual(data["question"]["category"], 1)
self.assertFalse(data["question"]["id"] in quiz_info["previous_questions"])
self.assertTrue(data["question"]["id"] in category_question_ids)
quiz_info["previous_questions"].append(data["question"]["id"])
res = self.client().post('/quizzes', data=json.dumps(quiz_info), headers={'Content-Type': 'application/json' })
self.assertEqual(200, res.status_code)
data = json.loads(res.data)
self.assertTrue(data["question"] is None)
# No failure test for retrieval of quiz questions seemed necessary; the above test seemed to encompass all cases
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main() |
import xml.etree.ElementTree as ET
"""
data ='''<person>
<name>Chuck</name>
<phone type="intl">
+1 734 303 4456
</phone>
<email hide="yes"/>
</person>'''
#las tres comillas simpless fue para meter toda esa informacion en varias lineas Jaja salu2
tree=ET.fromstring(data)#Crea un arbol de el documento
print('Name: ', tree.find('name').text)#obtengo el contenido de name
print('Attr: ', tree.find('email').get('hide'))#Obtengo el valor de hide
"""
input ='''<stuff>
<users>
<user x="2">
<id>001</id>
<name>Chuck</name>
</user>
<user x="7">
<id>009</id>
<name>Brent</name>
</user>
</users>
</stuff>'''
stuff=ET.fromstring(input)#un arbol
lst=stuff.findall('users/user')#una lista de arboles
print('User count:',len(lst))
for iteam in lst:
print('Name ', iteam.find('name').text)
print('ID ', iteam.find('id').text)
print('Attribute ', iteam.get('x'))
|
from django.contrib.gis.utils import LayerMapping
from models import Contours
contoursJsonFile = '/Users/kiran/Downloads/contours.json'
mapping = {
'trt':'TRTO0326_',
'trt_i':'TRTO0326_',
'height':'HEIGHT',
'htm': 'HTM',
'rpoly':'RPOLY_',
'lpoly':'LPOLY_',
'tnode':'TNODE_',
'fnode':'FNODE_',
'length':'LENGTH',
'geomData':'MULTILINESTRING'
}
lm = LayerMapping(Contours,contoursJsonFile,mapping)
lm.save(verbose=True) |
#!/usr/bin/env python
# coding: utf-8
# In[4]:
# 리스트를 만든다.
리스트 = ["가", "나", "다", "라"]
# 증감폭을 음수로 설정하여 끝에서 앞방향으로 값을 슬라이싱한다.
# 이때 한 줄씩 출력해야 하므로 for문을 사용하여 하나씩 여러번 출력되도록 한다.
for i in 리스트[: :-1]:
print(i)
# 실행결과 : 라
# 다
# 나
# 가
|
from serverzen_fabric import utils, ossupport, _internal
class ApacheService(_internal.LoggerMixin,
utils.RunnerMixin, ossupport._OSMixin):
def _apache(self, cmd):
self.log('Executing', 'apache.' + cmd)
self.runner.clone(sudo_user='root') \
.run(self.os.apacheinitd + ' ' + cmd)
def start(self):
self._apache('start')
def stop(self):
self._apache('stop')
def restart(self):
self._apache('restart')
def reload(self):
self._apache('reload')
def enable_mod(self, *mods):
root = self.runner.clone(sudo_user='root')
s = ''
for mod in mods:
if len(s) > 0:
s += '; '
s += 'a2enmod ' + mod
root.run(s)
def disable_mod(self, *mods):
root = self.runner.clone(sudo_user='root')
s = ''
for mod in mods:
if len(s) > 0:
s += '; '
s += 'a2dismod ' + mod
root.run(s)
|
#!/usr/bin/env python
'''
This node subscribe topic "chat_data" from picture_voice_recognize.py and
"/usb_cam/image_raw".When this node receive the string information "cheese",
this node then picture according the data of "/usb_cam/image_raw" and save in
the given path.
'''
from __future__ import print_function
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import time
class TakePhoto:
def __init__(self):
self.bridge = CvBridge()
self.is_picture = False
image_topic = "/usb_cam/image_raw"
self.image_sub = rospy.Subscriber(image_topic, Image, self.imageCallback)
self.action_signal_sub = rospy.Subscriber("action_signal", String, self.chatdataCallback)
self.success_pub = rospy.Publisher('success_signal', String, queue_size = 5)
self.success_signal = "I have taken one photo for you"
rospy.sleep(1)
def chatdataCallback(self,data):
if data.data == 'CHEESE':
###############choose your picture save path%%%%%%%%%%%%%%
path = "/home/siupaang/rse_ws/src/robot_vision/photos/"
timestr = time.strftime("%Y%m%d-%H%M%S-")
img_title = path + timestr + "photo.jpg"
self.take_picture(img_title)
def imageCallback(self,data):
# Convert image to OpenCV format
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
self.image_received = True
self.image = cv_image
def take_picture(self, img_title):
if self.image_received:
# Save an image
cv2.imwrite(img_title, self.image)
self.success_pub.publish(self.success_signal)
return True
else:
return False
if __name__ == '__main__':
rospy.init_node('photo_taker', anonymous=False)
takephoto = TakePhoto()
rospy.spin() |
#-*- coding: utf-8 -*-
"""
Created on Wed Aug 1 08:50:40 2018
class MyGan
@author: Dujin
"""
runfile('C:/users/dujin/desktop/tensorflow/infogan/basefunc.py')
class MyGan(object):
def __init__(self,sess,data_path,ydim = 12 ,zdim = 10,batch_size = 64, input_height = 28 , input_width =28,channel = 1,epoch = 10000):
self.sess = sess
self.data_path = 'C:/users/dujin/desktop/tensorflow/infogan/data/'+'mnist'
self.ydim = ydim
self.zdim = zdim
self.batch_size = batch_size
self.input_height = input_height
self.input_width = input_width
self.cdim = channel
self.data_X , self.data_y = load_mnist(self.data_path)
self.beta1 = 0.5
self.learning_rate = 0.0002
self.len_continuous_code = 2
self.len_discrete_code = 10
self.num_batches = len(self.data_X) // self.batch_size
self.sample_num = 64
self.epoch = epoch
def generator(self, z, y, is_training=True, reuse=False):
# [batch_size, z_dim+y_dim] > [batch_size, 1024] > [batch_size, 128*7*7] >
# [batch_size, 7, 7, 128] > [batch_size, 14, 14, 64] > [batch_size, 28, 28, 1]
with tf.variable_scope("generator", reuse=reuse):
# merge noise and code
z = tf.concat([z, y], 1)
net = tf.nn.relu(bn(linear(z, 1024, scope='g_fc1'), is_training=is_training, scope='g_bn1'))
net = tf.nn.relu(bn(linear(net, 128 * 7 * 7, scope='g_fc2'), is_training=is_training, scope='g_bn2'))
net = tf.reshape(net, [self.batch_size, 7, 7, 128])
net = tf.nn.relu(
bn(deconv2d(net, [self.batch_size, 14, 14, 64], 4, 4, 2, 2, name='g_dc3'), is_training=is_training,
scope='g_bn3'))
out = tf.nn.sigmoid(deconv2d(net, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name='g_dc4'))
return out
def discriminatior(self, x, is_training=True, reuse=False):
with tf.variable_scope("discriminator", reuse=reuse):
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='d_conv1'))
net = lrelu(bn(conv2d(net, 128, 4, 4, 2, 2, name='d_conv2'), is_training=is_training, scope='d_bn2'))
net = tf.reshape(net, [self.batch_size, -1])
net = lrelu(bn(linear(net, 1024, scope='d_fc3'), is_training=is_training, scope='d_bn3'))
out_logit = linear(net, 1, scope='d_fc4')
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
def classifier(self, x, is_training=True, reuse=False):
# x from discirminator net
# [batch_size, 64] > [batch_size, y_dim]
with tf.variable_scope("classifier", reuse=reuse):
net = lrelu(bn(linear(x, 64, scope='c_fc1'), is_training=is_training, scope='c_bn1'))
out_logit = linear(net, self.ydim, scope='c_fc2')
out = tf.nn.softmax(out_logit)
return out, out_logit
def bulid_model(self):
self.z = tf.placeholder(dtype = tf.float32 , shape = [self.batch_size , self.zdim],name = 'z')
self.y = tf.placeholder(tf.float32,shape = [self.batch_size , self.ydim],name = 'y')
self.dic_input = tf.placeholder(tf.float32, shape = [self.batch_size, self.input_height, self.input_width,self.cdim] , name = 'dic_input')
# cal loss
# real image loss
D_real , D_real_logits, _ = self.discriminatior(self.dic_input,is_training= True)
# fake image loss
G = self.generator(self.z,self.y,is_training= True , reuse = False)
D_fake , D_fake_logits, input_classifier = self.discriminatior(G, is_training = True , reuse = True)
#get loss for Discriminator
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits= D_real_logits,labels = tf.ones_like(D_real_logits)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits= D_fake_logits,labels = tf.zeros_like(D_fake_logits)))
self.d_loss = d_loss_fake + d_loss_real
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake)))
## 2. Information Loss"
code_fake, code_logit_fake = self.classifier(input_classifier, is_training=True, reuse=False)
disc_code_est = code_logit_fake[:, :self.len_discrete_code]
disc_code_tg = self.y[:, :self.len_discrete_code]
q_disc_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=disc_code_est, labels=disc_code_tg))
cont_code_est = code_logit_fake[:, self.len_discrete_code:]
cont_code_tg = self.y[:, self.len_discrete_code:]
q_cont_loss = tf.reduce_mean(tf.reduce_sum(tf.square(cont_code_tg - cont_code_est), axis=1))
self.q_loss = q_disc_loss + q_cont_loss
self.fake_images = self.generator(self.z, self.y, is_training=False, reuse=True)
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
q_vars = [var for var in t_vars if ('d_' in var.name) or ('c_' in var.name) or ('g_' in var.name)]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate * 5, beta1=self.beta1) \
.minimize(self.g_loss, var_list=g_vars)
self.q_optim = tf.train.AdamOptimizer(self.learning_rate * 5, beta1=self.beta1) \
.minimize(self.q_loss, var_list=q_vars)
def train(self):
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
for epoch in range(0, self.epoch):
for idx in range(0, self.num_batches):
batch_images = self.data_X[idx*self.batch_size:(idx+1)*self.batch_size]
batch_labels = self.data_y[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_codes = np.concatenate((batch_labels, np.random.uniform(-1, 1, size=(self.batch_size, 2))),
axis=1)
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.zdim]).astype(np.float32)
_, d_loss = self.sess.run([self.d_optim,self.d_loss],feed_dict={self.dic_input: batch_images, self.y: batch_codes,self.z: batch_z})
self.sess.run([self.g_optim, self.q_optim], feed_dict={self.dic_input: batch_images, self.z: batch_z, self.y: batch_codes})
print(d_loss)
def visualize_result(self):
""" random noise, random discrete code, fixed continuous code """
image_frame_dim = 8
y = np.random.choice(self.len_discrete_code, self.batch_size)
y_one_hot = np.zeros((self.batch_size, self.ydim))
y_one_hot[np.arange(self.batch_size), y] = 1
z_sample = np.random.uniform(-1, 1, size=(self.batch_size, self.zdim))
self.samples1 = self.sess.run(self.fake_images, feed_dict={self.z: z_sample, self.y: y_one_hot})
save_images(self.samples1[:64, :, :, :],[8,8],'r_d_code'+'.png')
""" random noise specified discrete code, fixed continuous code """
n_styles = 10
np.random.seed()
si = np.random.choice(self.batch_size, n_styles)
for l in range(self.len_discrete_code):
y = np.zeros(self.batch_size, dtype=np.int64) + l
y_one_hot = np.zeros((self.batch_size, self.ydim))
y_one_hot[np.arange(self.batch_size), y] = 1
self.samples2 = self.sess.run(self.fake_images, feed_dict={self.z: z_sample, self.y: y_one_hot})
save_images(self.samples2[:64, :, :, :],[8,8],'s_d_code'+'.png')
""" fixed noise pecified discrete code, gradual change continuous code """
assert self.len_continuous_code == 2
c1 = np.linspace(-1, 1, image_frame_dim)
c2 = np.linspace(-1, 1, image_frame_dim)
xv, yv = np.meshgrid(c1, c2)
xv = xv[:image_frame_dim,:image_frame_dim]
yv = yv[:image_frame_dim, :image_frame_dim]
c1 = xv.flatten()
c2 = yv.flatten()
z_fixed = np.zeros([self.batch_size, self.zdim])
for l in range(self.len_discrete_code):
y = np.zeros(self.batch_size, dtype=np.int64) + l
y_one_hot = np.zeros((self.batch_size, self.ydim))
y_one_hot[np.arange(self.batch_size), y] = 1
y_one_hot[np.arange(image_frame_dim*image_frame_dim), self.len_discrete_code] = c1
y_one_hot[np.arange(image_frame_dim*image_frame_dim), self.len_discrete_code+1] = c2
self.samples3 = self.sess.run(self.fake_images,
feed_dict={ self.z: z_fixed, self.y: y_one_hot})
save_images(self.samples3[:64, :, :, :],[8,8],str(l)+'.png')
infoGAN = MyGan(tf.Session(),'mnist',epoch = 10)
infoGAN.bulid_model()
infoGAN.train()
infoGAN.visualize_result()
|
#######################################################
#
# Cruiser.py
# Python implementation of the Class Cruiser
# Generated by Enterprise Architect
# Created on: 30-6��-2021 15:38:36
# Original author: 70748
#
#######################################################
import abc
class Cruiser(abc.ABC):
@abc.abstractmethod
def gunFire(self):
pass
|
"""longest common subsequence"""
class LCS:
def __init__(self):
self.lcs = ""
def longest_common_subsequence(self, x: str, y: str, m: int, n: int):
if m == 0 or n == 0:
return 0
elif x[m - 1] == y[n - 1]:
return 1 + self.longest_common_subsequence(x, y, m - 1, n - 1)
else:
return max(self.longest_common_subsequence(x, y, m, n - 1), self.longest_common_subsequence(x, y, m - 1, n))
@staticmethod
def memo_longest_common_subsequence(x: str, y: str):
m = len(x)
n = len(y)
memo = [[None] * (n + 1) for i in range(m + 1)]
print(type(memo[0]))
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
memo[i][j] = 0
elif x[m - 1] == y[n - 1]:
memo[i][j] = memo[i - 1][j - 1]
else:
memo[i][j] = max(memo[i][j - 1], memo[i - 1][j])
return memo[m][n]
def get_lcs(self):
return self.lcs
def main():
lcs = LCS()
x = "AGGTAB"
y = "GXTXAYB"
print(f"Length of lcs of {x} and {y} is {lcs.longest_common_subsequence(x, y, len(x), len(y))}")
print(f"Memoization : {lcs.memo_longest_common_subsequence(x, y)}")
print(f"lcs is {lcs.get_lcs()}")
if __name__ == "__main__":
main()
|
import pygame
pygame.init()
RUNNING = pygame.mixer.music.load('data/Gameplay/sound/running.mp3')
JUMP = pygame.mixer.music.load('data/Gameplay/sound/jump.mp3')
HURT = pygame.mixer.music.load('data/Gameplay/sound/hurt.mp3')
ENEMY_DEATH = pygame.mixer.music.load('data/Gameplay/sound/enemy-death.mp3')
CHEST = pygame.mixer.music.load('data/Gameplay/sound/chest.mp3')
# data/Gameplay/sound/enchanted_forest
def background_music():
pygame.mixer.music.load('data/Gameplay/sound/enchanted_forest.mp3')
pygame.mixer.music.play()
def jump_music():
# print('Ok')
pygame.mixer.music.load('data/Gameplay/sound/jump.mp3')
pygame.mixer.music.play()
def hurt_music():
pygame.mixer.music.load('data/Gameplay/sound/hurt.mp3')
pygame.mixer.music.play()
|
import os
django_secret = os.getenv('DJANGO_SECRET_KEY', 'l0cAl-t3st*')
django_is_debug_activated = os.getenv('DJANGO_DEBUG', 'False').lower() == 'true'
django_relative_path_for_static_file = os.getenv('DJANGO_STATIC_PATH', './public/static')
auth0_client_id = os.getenv('AUTH0_CLIENT_ID')
auth0_client_secret = os.getenv('AUTH0_CLIENT_SECRET')
# POSTGRESQL_ADDON -> env variables in CleverCloud
database = {
'name': os.getenv('POSTGRESQL_ADDON_DB', os.getenv('PG_DB', '')),
'user': os.getenv('POSTGRESQL_ADDON_USER', os.getenv('PG_USER', '')),
'password': os.getenv('POSTGRESQL_ADDON_PASSWORD', os.getenv('PG_PWD', '')),
'host': os.getenv('POSTGRESQL_ADDON_HOST', os.getenv('PG_HOST', '')),
'port': os.getenv('POSTGRESQL_ADDON_PORT', os.getenv('PG_PORT', '')),
} |
#===============================================================================
# 26888: Verify that the favourite contacts will be listed on the top of the full
# contact list
#
# Procedure:
# 1- Open Address book app
# 2- Navigate to the top of contacts lists.
#
# Expected results:
# The favourite contacts are listed on the top of the full contact list and in
# letter position.
#===============================================================================
from gaiatest import GaiaTestCase
from OWDTestToolkit.utils.contacts import MockContact
from OWDTestToolkit import DOM
from OWDTestToolkit.utils.utils import UTILS
from OWDTestToolkit.apps.contacts import Contacts
class test_main(GaiaTestCase):
def setUp(self):
# Set up child objects...
GaiaTestCase.setUp(self)
self.UTILS = UTILS(self)
self.contacts = Contacts(self)
# Prepare the contacts.
self.contact_list = [MockContact() for i in range(3)]
map(self.UTILS.general.insertContact, self.contact_list)
def tearDown(self):
self.UTILS.reporting.reportResults()
GaiaTestCase.tearDown(self)
def test_run(self):
self.contacts.launch()
# View the details of our contact and make him a favourite.
self.UTILS.reporting.logResult("info", "<b>Setting up a contact in favourites ...</b>")
self.contacts.view_contact(self.contact_list[0]['name'])
# Mark contact as favourite
fav_btn = self.UTILS.element.getElement(DOM.Contacts.favourite_button, "Toggle favourite button (before tap)")
fav_btn.tap()
# Go back to all contacts list
self.contacts.go_back_from_contact_details()
# Check the contact is in the favourite list
string = self.contact_list[0]['givenName'] + self.contact_list[0]['familyName']
favs = ("xpath", DOM.Contacts.favourites_list_xpath.format(string.upper()))
self.UTILS.element.waitForElements(favs, "'" + self.contact_list[0]['name'] + "' in the favourites list")
# Now check the favourites list appears first.
fav_list = self.UTILS.element.getElements(("tag name", "ol"), "Contact lists")
fav_id = "contacts-list-favorites"
normal_ids = "contacts-list-"
foundFav = False
foundNormal = False
for i in fav_list:
if fav_id in i.get_attribute("id"):
foundFav = True
if normal_ids in i.get_attribute("id"):
foundNormal = True
break
self.UTILS.test.test(foundNormal, "Found the non-favourite lists.")
self.UTILS.test.test(foundFav, "Found the favourite lists before the non-favourite lists.")
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.email_address import EmailAddress
from ..model.physical_address import PhysicalAddress
from ..model.extension import Extension
from ..model.single_value_legacy_extended_property import SingleValueLegacyExtendedProperty
from ..model.multi_value_legacy_extended_property import MultiValueLegacyExtendedProperty
from ..model.profile_photo import ProfilePhoto
from datetime import datetime
from ..one_drive_object_base import OneDriveObjectBase
class Contact(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def parent_folder_id(self):
"""
Gets and sets the parentFolderId
Returns:
str:
The parentFolderId
"""
if "parentFolderId" in self._prop_dict:
return self._prop_dict["parentFolderId"]
else:
return None
@parent_folder_id.setter
def parent_folder_id(self, val):
self._prop_dict["parentFolderId"] = val
@property
def birthday(self):
"""
Gets and sets the birthday
Returns:
datetime:
The birthday
"""
if "birthday" in self._prop_dict:
return datetime.strptime(self._prop_dict["birthday"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@birthday.setter
def birthday(self, val):
self._prop_dict["birthday"] = val.isoformat()+"Z"
@property
def file_as(self):
"""
Gets and sets the fileAs
Returns:
str:
The fileAs
"""
if "fileAs" in self._prop_dict:
return self._prop_dict["fileAs"]
else:
return None
@file_as.setter
def file_as(self, val):
self._prop_dict["fileAs"] = val
@property
def display_name(self):
"""
Gets and sets the displayName
Returns:
str:
The displayName
"""
if "displayName" in self._prop_dict:
return self._prop_dict["displayName"]
else:
return None
@display_name.setter
def display_name(self, val):
self._prop_dict["displayName"] = val
@property
def given_name(self):
"""
Gets and sets the givenName
Returns:
str:
The givenName
"""
if "givenName" in self._prop_dict:
return self._prop_dict["givenName"]
else:
return None
@given_name.setter
def given_name(self, val):
self._prop_dict["givenName"] = val
@property
def initials(self):
"""
Gets and sets the initials
Returns:
str:
The initials
"""
if "initials" in self._prop_dict:
return self._prop_dict["initials"]
else:
return None
@initials.setter
def initials(self, val):
self._prop_dict["initials"] = val
@property
def middle_name(self):
"""
Gets and sets the middleName
Returns:
str:
The middleName
"""
if "middleName" in self._prop_dict:
return self._prop_dict["middleName"]
else:
return None
@middle_name.setter
def middle_name(self, val):
self._prop_dict["middleName"] = val
@property
def nick_name(self):
"""
Gets and sets the nickName
Returns:
str:
The nickName
"""
if "nickName" in self._prop_dict:
return self._prop_dict["nickName"]
else:
return None
@nick_name.setter
def nick_name(self, val):
self._prop_dict["nickName"] = val
@property
def surname(self):
"""
Gets and sets the surname
Returns:
str:
The surname
"""
if "surname" in self._prop_dict:
return self._prop_dict["surname"]
else:
return None
@surname.setter
def surname(self, val):
self._prop_dict["surname"] = val
@property
def title(self):
"""
Gets and sets the title
Returns:
str:
The title
"""
if "title" in self._prop_dict:
return self._prop_dict["title"]
else:
return None
@title.setter
def title(self, val):
self._prop_dict["title"] = val
@property
def yomi_given_name(self):
"""
Gets and sets the yomiGivenName
Returns:
str:
The yomiGivenName
"""
if "yomiGivenName" in self._prop_dict:
return self._prop_dict["yomiGivenName"]
else:
return None
@yomi_given_name.setter
def yomi_given_name(self, val):
self._prop_dict["yomiGivenName"] = val
@property
def yomi_surname(self):
"""
Gets and sets the yomiSurname
Returns:
str:
The yomiSurname
"""
if "yomiSurname" in self._prop_dict:
return self._prop_dict["yomiSurname"]
else:
return None
@yomi_surname.setter
def yomi_surname(self, val):
self._prop_dict["yomiSurname"] = val
@property
def yomi_company_name(self):
"""
Gets and sets the yomiCompanyName
Returns:
str:
The yomiCompanyName
"""
if "yomiCompanyName" in self._prop_dict:
return self._prop_dict["yomiCompanyName"]
else:
return None
@yomi_company_name.setter
def yomi_company_name(self, val):
self._prop_dict["yomiCompanyName"] = val
@property
def generation(self):
"""
Gets and sets the generation
Returns:
str:
The generation
"""
if "generation" in self._prop_dict:
return self._prop_dict["generation"]
else:
return None
@generation.setter
def generation(self, val):
self._prop_dict["generation"] = val
@property
def email_addresses(self):
"""Gets and sets the emailAddresses
Returns:
:class:`EmailAddressesCollectionPage<onedrivesdk.request.email_addresses_collection.EmailAddressesCollectionPage>`:
The emailAddresses
"""
if "emailAddresses" in self._prop_dict:
return EmailAddressesCollectionPage(self._prop_dict["emailAddresses"])
else:
return None
@property
def im_addresses(self):
"""
Gets and sets the imAddresses
Returns:
str:
The imAddresses
"""
if "imAddresses" in self._prop_dict:
return self._prop_dict["imAddresses"]
else:
return None
@im_addresses.setter
def im_addresses(self, val):
self._prop_dict["imAddresses"] = val
@property
def job_title(self):
"""
Gets and sets the jobTitle
Returns:
str:
The jobTitle
"""
if "jobTitle" in self._prop_dict:
return self._prop_dict["jobTitle"]
else:
return None
@job_title.setter
def job_title(self, val):
self._prop_dict["jobTitle"] = val
@property
def company_name(self):
"""
Gets and sets the companyName
Returns:
str:
The companyName
"""
if "companyName" in self._prop_dict:
return self._prop_dict["companyName"]
else:
return None
@company_name.setter
def company_name(self, val):
self._prop_dict["companyName"] = val
@property
def department(self):
"""
Gets and sets the department
Returns:
str:
The department
"""
if "department" in self._prop_dict:
return self._prop_dict["department"]
else:
return None
@department.setter
def department(self, val):
self._prop_dict["department"] = val
@property
def office_location(self):
"""
Gets and sets the officeLocation
Returns:
str:
The officeLocation
"""
if "officeLocation" in self._prop_dict:
return self._prop_dict["officeLocation"]
else:
return None
@office_location.setter
def office_location(self, val):
self._prop_dict["officeLocation"] = val
@property
def profession(self):
"""
Gets and sets the profession
Returns:
str:
The profession
"""
if "profession" in self._prop_dict:
return self._prop_dict["profession"]
else:
return None
@profession.setter
def profession(self, val):
self._prop_dict["profession"] = val
@property
def business_home_page(self):
"""
Gets and sets the businessHomePage
Returns:
str:
The businessHomePage
"""
if "businessHomePage" in self._prop_dict:
return self._prop_dict["businessHomePage"]
else:
return None
@business_home_page.setter
def business_home_page(self, val):
self._prop_dict["businessHomePage"] = val
@property
def assistant_name(self):
"""
Gets and sets the assistantName
Returns:
str:
The assistantName
"""
if "assistantName" in self._prop_dict:
return self._prop_dict["assistantName"]
else:
return None
@assistant_name.setter
def assistant_name(self, val):
self._prop_dict["assistantName"] = val
@property
def manager(self):
"""
Gets and sets the manager
Returns:
str:
The manager
"""
if "manager" in self._prop_dict:
return self._prop_dict["manager"]
else:
return None
@manager.setter
def manager(self, val):
self._prop_dict["manager"] = val
@property
def home_phones(self):
"""
Gets and sets the homePhones
Returns:
str:
The homePhones
"""
if "homePhones" in self._prop_dict:
return self._prop_dict["homePhones"]
else:
return None
@home_phones.setter
def home_phones(self, val):
self._prop_dict["homePhones"] = val
@property
def mobile_phone(self):
"""
Gets and sets the mobilePhone
Returns:
str:
The mobilePhone
"""
if "mobilePhone" in self._prop_dict:
return self._prop_dict["mobilePhone"]
else:
return None
@mobile_phone.setter
def mobile_phone(self, val):
self._prop_dict["mobilePhone"] = val
@property
def business_phones(self):
"""
Gets and sets the businessPhones
Returns:
str:
The businessPhones
"""
if "businessPhones" in self._prop_dict:
return self._prop_dict["businessPhones"]
else:
return None
@business_phones.setter
def business_phones(self, val):
self._prop_dict["businessPhones"] = val
@property
def home_address(self):
"""
Gets and sets the homeAddress
Returns:
:class:`PhysicalAddress<onedrivesdk.model.physical_address.PhysicalAddress>`:
The homeAddress
"""
if "homeAddress" in self._prop_dict:
if isinstance(self._prop_dict["homeAddress"], OneDriveObjectBase):
return self._prop_dict["homeAddress"]
else :
self._prop_dict["homeAddress"] = PhysicalAddress(self._prop_dict["homeAddress"])
return self._prop_dict["homeAddress"]
return None
@home_address.setter
def home_address(self, val):
self._prop_dict["homeAddress"] = val
@property
def business_address(self):
"""
Gets and sets the businessAddress
Returns:
:class:`PhysicalAddress<onedrivesdk.model.physical_address.PhysicalAddress>`:
The businessAddress
"""
if "businessAddress" in self._prop_dict:
if isinstance(self._prop_dict["businessAddress"], OneDriveObjectBase):
return self._prop_dict["businessAddress"]
else :
self._prop_dict["businessAddress"] = PhysicalAddress(self._prop_dict["businessAddress"])
return self._prop_dict["businessAddress"]
return None
@business_address.setter
def business_address(self, val):
self._prop_dict["businessAddress"] = val
@property
def other_address(self):
"""
Gets and sets the otherAddress
Returns:
:class:`PhysicalAddress<onedrivesdk.model.physical_address.PhysicalAddress>`:
The otherAddress
"""
if "otherAddress" in self._prop_dict:
if isinstance(self._prop_dict["otherAddress"], OneDriveObjectBase):
return self._prop_dict["otherAddress"]
else :
self._prop_dict["otherAddress"] = PhysicalAddress(self._prop_dict["otherAddress"])
return self._prop_dict["otherAddress"]
return None
@other_address.setter
def other_address(self, val):
self._prop_dict["otherAddress"] = val
@property
def spouse_name(self):
"""
Gets and sets the spouseName
Returns:
str:
The spouseName
"""
if "spouseName" in self._prop_dict:
return self._prop_dict["spouseName"]
else:
return None
@spouse_name.setter
def spouse_name(self, val):
self._prop_dict["spouseName"] = val
@property
def personal_notes(self):
"""
Gets and sets the personalNotes
Returns:
str:
The personalNotes
"""
if "personalNotes" in self._prop_dict:
return self._prop_dict["personalNotes"]
else:
return None
@personal_notes.setter
def personal_notes(self, val):
self._prop_dict["personalNotes"] = val
@property
def children(self):
"""
Gets and sets the children
Returns:
str:
The children
"""
if "children" in self._prop_dict:
return self._prop_dict["children"]
else:
return None
@children.setter
def children(self, val):
self._prop_dict["children"] = val
@property
def extensions(self):
"""Gets and sets the extensions
Returns:
:class:`ExtensionsCollectionPage<onedrivesdk.request.extensions_collection.ExtensionsCollectionPage>`:
The extensions
"""
if "extensions" in self._prop_dict:
return ExtensionsCollectionPage(self._prop_dict["extensions"])
else:
return None
@property
def single_value_extended_properties(self):
"""Gets and sets the singleValueExtendedProperties
Returns:
:class:`SingleValueExtendedPropertiesCollectionPage<onedrivesdk.request.single_value_extended_properties_collection.SingleValueExtendedPropertiesCollectionPage>`:
The singleValueExtendedProperties
"""
if "singleValueExtendedProperties" in self._prop_dict:
return SingleValueExtendedPropertiesCollectionPage(self._prop_dict["singleValueExtendedProperties"])
else:
return None
@property
def multi_value_extended_properties(self):
"""Gets and sets the multiValueExtendedProperties
Returns:
:class:`MultiValueExtendedPropertiesCollectionPage<onedrivesdk.request.multi_value_extended_properties_collection.MultiValueExtendedPropertiesCollectionPage>`:
The multiValueExtendedProperties
"""
if "multiValueExtendedProperties" in self._prop_dict:
return MultiValueExtendedPropertiesCollectionPage(self._prop_dict["multiValueExtendedProperties"])
else:
return None
@property
def photo(self):
"""
Gets and sets the photo
Returns:
:class:`ProfilePhoto<onedrivesdk.model.profile_photo.ProfilePhoto>`:
The photo
"""
if "photo" in self._prop_dict:
if isinstance(self._prop_dict["photo"], OneDriveObjectBase):
return self._prop_dict["photo"]
else :
self._prop_dict["photo"] = ProfilePhoto(self._prop_dict["photo"])
return self._prop_dict["photo"]
return None
@photo.setter
def photo(self, val):
self._prop_dict["photo"] = val
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from server.services import driver as driver_service
class Command(BaseCommand):
help = 'Sends notifications to drivers'
def handle(self, *args, **options):
# driver_service.process_signup_notifications()
# driver_service.process_credit_notifications() - we might want this soon
# driver_service.process_referral_notifications() - we might want this soon
driver_service.process_document_notifications()
# driver_service.process_insurance_notifications()
|
#!/usr/bin/env python
"""
Jay Sweeney, Ben Ihle, 2011.
Simple DNS server
for regex-based conditional domain
resolution and forwarding.
Place config (see below) into
./bdns_settings.py
What this does
~~~~~~~~~~~~~~
static mapping for www.google.com
static mapping for .*google.com (e.g. mail.google.com, www.aaaddsssgoogle.com)
forward *.mydomain.com to upstream 10.0.0.1 dns
all other requests resolved via 8.8.8.8 and ns1.linode.com
"""
import sys
import time
import os
import re
import dns.resolver
import dns.message
import dns.rdtypes.IN.A
import dns.rdatatype
import SocketServer
from SocketServer import ThreadingMixIn, UDPServer
import socket
import logging
import imp
logging.basicConfig(
format='[%(asctime)s] %(levelname)-10s %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p',
level=logging.INFO)
log = logging.getLogger(__name__)
class DNSProtocol(object):
"""
Knows how to respond to DNS messages, but mostly by just shipping them off
to some real nameserver. The main entry point is `DNSProtocol.handle(data)`.
"""
def __init__(self, config):
self.config = config
def handle(self, data):
""" Handle a dns message. """
with open('request.bin', 'wb') as fout:
fout.write(data)
msg = dns.message.from_wire(data)
log.debug('[REQUEST]\n%s\n[/REQUEST]', str(msg))
nameservers = self.config.default
if len(msg.question) > 1:
log.warning("Warning: multi-question messages " +\
"are not yet supported. Using default nameserver.")
return self.forward_request(msg, nameservers).to_wire()
question = msg.question[0]
log.info('%-10s%-8s%s', 'Question:', msg.id, str(question))
if question.rdtype == dns.rdatatype.A:
name = question.name.to_text()
ipaddr, nameservers = self.resolve_by_config(name)
if ipaddr:
response = self.create_response(ipaddr, msg)
log.info('%-10s%-8s%s DNS: %s', 'Answer:', response.id, map(str, response.answer), '[* STATIC IP *]')
with open('response.bin', 'wb') as fout:
fout.write(response.to_wire())
return response.to_wire()
# let some nameserver handle the message
response = self.forward_request(msg, nameservers)
log.debug('[RESPONSE]\n%s\n[/RESPONSE]', str(response))
log.info('%-10s%-8s%s DNS: %r', 'Answer:', response.id, map(str, response.answer), nameservers)
return response.to_wire()
def forward_request(self, msg, nameservers):
for ns in nameservers:
try:
# xxx: when do we use tcp? (message size-based?)
response = dns.query.udp(msg, ns, timeout=10)
return response
except:
continue
# XXX: raise exception here, or return some sort of
# error response to client
def resolve_by_config(self, name):
"""
Look through `config` dictionary for either an IP address or a
nameserver to use to resolve a `name`. Returns
a tuple (ipaddr, [nameservers]) in which the `ipaddr` can be None
but [nameservers] will always be non-empty.
"""
# remove trailing '.' from name, if present.
if name[-1] is '.':
name = name[:-1]
nameservers = self.config.default
ipaddr = None
for key, item in self.config.hosts.iteritems():
# TODO: hosts really should be ordered so that you can effectively override.
if (key == name) or (hasattr(key, 'search') and key.search(name)):
if isinstance(item, list):
nameservers = item
else:
ipaddr = item
break
return ipaddr, nameservers
def create_response(self, ipaddr, msg):
"""
Create a response for an `A` message with an answer of `ipaddr`
"""
response = dns.message.make_response(msg)
rrset = dns.rrset.RRset(msg.question[0].name, 1, 1)
rrset.ttl = 5
rrset.add(dns.rdtypes.IN.A.A(1, 1, ipaddr))
response.answer.append(rrset)
return response
class RequestHandler(SocketServer.BaseRequestHandler):
""" handles requests and does some bad non-threadsafe config reloading """
def handle(self):
data, sock = self.request
self.server.config = getconfig() # XXX: race here!
protocol = DNSProtocol(self.server.config)
sock.sendto(protocol.handle(data), self.client_address)
class Server(ThreadingMixIn, UDPServer):
def __init__(self, server_address, RequestHandlerClass, config):
self.config = config
UDPServer.__init__(self, server_address, RequestHandlerClass)
class ConfigException(Exception):
pass
def isip(s):
try:
socket.inet_aton(s)
return True
except socket.error:
return False
def getconfig():
""" Read and validate config. Also resolve any nameserver names
TODO: log info on resolving dnses """
try:
fp, path, desc = imp.find_module('bdns_settings')
try:
bdns_settings = imp.load_module('bdns_settings', fp, path, desc)
finally:
if fp:
fp.close()
except ImportError:
log.error("Could not load bdns_settings.py. Using defaults.")
return {'default': '8.8.8.8'}
try:
default_nameservers = bdns_settings.default
except KeyError:
raise ConfigException('No "default" found in bdns_settings.py')
for ns in default_nameservers:
if not isip(ns):
raise ConfigException("Bad default nameserver IP: `%s`" % ns)
for key, value in bdns_settings.hosts.iteritems():
if isinstance(value, list): # nameserver
for thing in value:
if not isip(thing):
resolver = dns.resolver.get_default_resolver()
resolver.nameservers = default_nameservers
try:
answer = resolver.query(thing)
bdns_settings.hosts[key] = [answer[0].address]
except:
raise ConfigException("`%s` does not look like "
"an ipv4 address and does not resolve "
"using the default nameservers" % thing)
else: # should be a valid ip
if not isip(value):
raise ConfigException("`%s` is not a valid "
"ipv4 address" % value)
return bdns_settings
if __name__ == '__main__':
server = Server(("0.0.0.0", 53), RequestHandler, getconfig())
try:
server.serve_forever()
except KeyboardInterrupt:
print "Shutting down..."
server.server_close()
|
"""
@Time : 2020/12/1418:37
@Auth : 周俊贤
@File :get_tsa_thresh.py
@DESCRIPTION:
"""
import torch
def get_tsa_thresh(schedule, global_step, num_train_steps, start, end):
training_progress = torch.tensor(float(global_step) / float(num_train_steps))
if schedule == 'linear_schedule':
threshold = training_progress
elif schedule == 'exp_schedule':
scale = 5
threshold = torch.exp((training_progress - 1) * scale)
elif schedule == 'log_schedule':
scale = 5
threshold = 1 - torch.exp((-training_progress) * scale)
output = threshold * (end - start) + start
return output
|
from collections import defaultdict
def distance(x1, y1, x2, y2):
return abs(x1 - x2) + abs(y1 - y2)
def find_closest(coords, x, y):
closest = 1000000
closestcells = []
for i, coord in enumerate(coords):
d = distance(x, y, coord[0], coord[1])
if d < closest:
closest = d
closestcells = [i]
elif d == closest:
closestcells.append(i)
if len(closestcells) > 1:
return -1
return closestcells[0]
def solve(d):
coords = []
leftest = 100000
rightest = -1000000
uppest = 100000
lowest = -1000000
for line in d:
x, y = map(int, line.split(', '))
coords.append((x, y))
leftest = min(leftest, x)
rightest = max(rightest, x)
uppest = min(uppest, y)
lowest = max(lowest, y)
good = 0
count = 0
for x in range(leftest - 350, rightest + 350):
for y in range(uppest - 350, lowest + 350):
count += 1
total = 0
for coord in coords:
total += distance(x, y, coord[0], coord[1])
if total < 10000:
good += 1
return good
def read_and_solve():
with open('input_6.txt') as f:
data = [line.rstrip() for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve()) |
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
#np.seterr(divide='ignore', invalid='ignore')
import sympy as sym
from scipy import integrate
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
from scipy.optimize import curve_fit
#IMPORT
data1=np.genfromtxt("data1.txt", unpack = True)
data1[1] = data1[1]*10**(-4)
data1[0] = data1[0]+273
data2=np.genfromtxt("data2.txt", unpack = True)
data2[0] = data2[0]+273
# a)Diagramm des Drucks und der Temperatur
# plt.plot(1/data1[0],np.log(data1[1]),'kx',label="Dampfdruck")
# plt.yscale('log')
# plt.ylabel(f"log(p) in milli Bar")
# plt.xlabel(f"1/T")
# plt.legend()
# plt.show()
# plt.savefig("build/plot_a.pdf",bbox_inches='tight')
# plt.close()
# Definitionen für den curvefit
def f(x,a,b):
return a*x+b
def pol(x,a,b,c,d):
return a*x**3+b*x**2+c*x+d
R = 8.314462
####################################################################
# Niedrigdruck
####################################################################
# Bestimmung der Dampfdruckkurve von 30 bis 100 mbar (Wasser)
params_a, covariance_matrix_a = curve_fit(f,1/data1[0],np.log(data1[1]))
plt.plot(1/data1[0],np.log(data1[1]),'kx',label="Dampfdruck")
x_plot = np.linspace(0.00267,0.00331)
plt.plot(x_plot,f(x_plot,*params_a),label='Fit')
errors_a = np.sqrt(np.diag(covariance_matrix_a))
#plt.yscale('log')
plt.ylabel(f"log(p)")
plt.xlabel(f"1/T in 1/K")
plt.legend()
plt.savefig("build/plot_a.pdf",bbox_inches='tight')
#plt.show()
plt.close()
print(errors_a)
print(f"Parameter der Ausgleichskurve für die Messung unter 1 Bar:")
print(f"Steigung:{params_a[0]}+- {errors_a[0]}")
print(f"y-Achsenabschnitt:{params_a[1]}+-{errors_a[1]}")
# Berechnungen für den Bereich unter 1 Bar
unparams_a = unp.uarray(params_a,errors_a)
L = -unparams_a[0]*R
print(f"L für unter 1 Bar: {L}")
L_a = R*373
print(f"La für Niedrigdruck {L_a}")
L_i = L-L_a
print(f"Li für Niedrigdruck {L_i}")
L_im = L_i / (6.022 * 10**23)
L_im = L_im * (6.242 * 10**18)
print(f"Li pro Molekül {L_im}")
####################################################################
# Hochdruck
####################################################################
# Bestimmung der Dampfdruckkurve von 1 bis 15 bar (Wasser)
params_b, covariance_matrix_b = curve_fit(pol,data2[0],data2[1])
errors_b = np.sqrt(np.diag(covariance_matrix_b))
unparams_b = unp.uarray(params_b,errors_b)
plt.plot(data2[0],data2[1],"kx",label="Dampfdruck")
x_plot = np.linspace(100+273,200+273)
plt.plot(x_plot,pol(x_plot,*params_b),label='Fit')
plt.ylabel(f"log(p)")
plt.xlabel(f"T in K")
plt.legend()
plt.savefig("build/plot_b.pdf",bbox_inches='tight')
#plt.show()
plt.close()
print(f"Die Parameter im Hochdruckbereich sind:")
print(f"Parameter :{unparams_b}")
print(f"Parameter a: {unparams_b[0]}")
print(f"Parameter b: {unparams_b[1]}")
print(f"Parameter c: {unparams_b[2]}")
print(f"Parameter d: {unparams_b[3]}")
A = 0.9
# def L_berechnet(T,a,b,c,d):
# return(T/(pol(T,a,b,c,d)) * ( (R*T/2) + np.sqrt(( R*T/2 )**2 + A*(pol(T,a,b,c,d)) ) ) (3*a*T**2+2*b*T+c))
L_berechnetp = data2[0]/(pol(data2[0],*params_b)) * ( (R*data2[0]/2) + np.sqrt(( R*data2[0]/2 )**2 + A*(pol(data2[0],*params_b)) ) )* (3*params_b[0]*data2[0]**2+2*params_b[1]*data2[0]+params_b[2])
L_berechnetn = data2[0]/(pol(data2[0],*params_b)) * ( (R*data2[0]/2) - np.sqrt(( R*data2[0]/2 )**2 + A*(pol(data2[0],*params_b)) ) )* (3*params_b[0]*data2[0]**2+2*params_b[1]*data2[0]+params_b[2])
plt.plot(data2[0],L_berechnetp,"rx",label='Wurzel addiert')
#plt.plot(data2[0],L_berechnetn,label='Wurzel subtrahiert')
plt.ylabel(f"L in Joule/mol")
plt.xlabel(f"T in K")
plt.legend()
plt.savefig("build/plot_c.pdf",bbox_inches='tight')
#plt.show()
plt.close()
print(f"Werte:{L_berechnetp} ") |
import abc
import collections
import collections.abc
import dataclasses
from functools import wraps, partial, reduce
import hashlib
import operator
from typing import Any, Callable, Dict, Iterable
import warnings
class Stack:
"""Lightweight wrapper over builtin lists to provide a stack interface"""
def __init__(self):
self._stack = []
def push(self, value):
self._stack.append(value)
def pop(self):
return self._stack.pop()
def peek(self):
return self._stack[-1]
def peek_default(self, default: Any) -> Any:
try:
return self._stack[-1]
except IndexError:
return default
def __bool__(self) -> bool:
return bool(self._stack)
class _Ref(object):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value is other.value
def __hash__(self):
return id(self.value)
class _IdentitySetBase(collections.abc.MutableSet):
def __init__(self, refs):
self.refs = refs
def __contains__(self, elem):
return _Ref(elem) in self.refs
def __iter__(self):
return (ref.value for ref in self.refs)
def __len__(self):
return len(self.refs)
def add(self, elem):
self.refs.add(_Ref(elem))
def discard(self, elem):
raise NotImplementedError()
def remove(self, elem):
raise NotImplementedError()
def pop(self):
raise NotImplementedError()
def clear(self):
self.refs.clear()
def __repr__(self):
return "%s(%s)" % (type(self).__name__, list(self))
class IdentitySet(_IdentitySetBase):
def __init__(self, items=()):
refs = set(map(_Ref, items))
super().__init__(refs)
class OrderedIdentitySet(_IdentitySetBase):
def __init__(self, items=()):
# NOTE(rsetaluri): We use collections.OrderedDict to mimic an ordered
# set, to avoid implementing a custom ordered set or import one, since
# it is not natively supported.
refs = map(lambda x: (x, None), map(_Ref, items))
refs = collections.OrderedDict(refs)
super().__init__(refs)
def add(self, elem):
self.refs[_Ref(elem)] = None
def deprecated(func=None, *, msg=None):
"""
Inspired by https://pybit.es/decorator-optional-argument.html.
Marks a function as deprecated. @msg is an optional parameter to override
the default warning.
Examples:
@deprecated
def foo(...): ...
@deprecated('Don't use bar!')
def bar(...): ...
"""
if func is None:
return partial(deprecated, msg=msg)
if msg is None:
msg = f"{func.__name__} is deprecated"
@wraps(func)
def _wrapper(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return _wrapper
def setattrs(obj, dct, pred=None):
for k, v in dct.items():
if pred is None or pred(k, v):
setattr(obj, k, v)
class ParamDict(dict):
"""
Hashable dictionary for simple key: value parameters
"""
def __hash__(self):
return hash(tuple(sorted(self.items())))
def is_int(value):
try:
int(value)
except (TypeError, ValueError):
return False
return True
def _make_delegate_fn(method):
def _fn(self, *args, **kwargs):
return getattr(self._underlying, method)(*args, **kwargs)
return _fn
def make_delegator_cls(base):
methods = base.__abstractmethods__
class _Delegator(base):
def __init__(self, underlying, *args, **kwargs):
self._underlying = underlying
for method in methods:
setattr(_Delegator, method, _make_delegate_fn(method))
# NOTE(rsetaluri): We should be using the new abc.update_abstractmethods
# function. See https://bugs.python.org/issue41905 and
# https://docs.python.org/3.10/library/abc.html#abc.update_abstractmethods.
_Delegator.__abstractmethods__ = frozenset()
return _Delegator
class IterableException(ValueError):
pass
class EmptyIterableException(IterableException):
pass
class NonSingletonIterableException(IterableException):
pass
def only(lst: Iterable):
it = iter(lst)
try:
value = next(it)
except StopIteration:
raise EmptyIterableException() from None
try:
new_value = next(it)
except StopIteration:
return value
else:
elements = [value, new_value] + list(it)
raise NonSingletonIterableException(elements)
def is_empty(lst: Iterable) -> bool:
"""Checks whether the iterable @lst has any elements. If @lst is a
generator, then it is modified and should no longer be used.
"""
it = iter(lst)
try:
next(it)
except StopIteration:
return True
return False
class Finalizable(abc.ABC):
@abc.abstractmethod
def finalize(self):
raise NotImplementedError()
class FinalizableDelegator(Finalizable):
def __init__(self):
self._children = {}
def add_child(self, key: str, child: Finalizable):
if key in self._children:
raise ValueError(f"key '{key}' already present")
self._children[key] = child
def get_child(self, key: str) -> Finalizable:
return self._children[key]
def finalize(self):
for child in self._children.values():
child.finalize()
def lca_of_types(classes):
# NOTE(rsetaluri): This implementation is inspired by:
# https://stackoverflow.com/questions/58290137/how-to-find-most-recent-common-ancestor-base-type-of-several-types-in-python
return next(iter(reduce(
operator.and_, (collections.Counter(cls.mro()) for cls in classes))))
def slice_opts(dct: Dict, cls: type, keep: bool = False):
get_opt = dct.__getitem__ if keep else dct.pop
if not dataclasses.is_dataclass(cls):
raise TypeError("Expected dataclass")
kwargs = {}
for name, field in cls.__dataclass_fields__.items():
try:
value = get_opt(name)
except KeyError:
continue
kwargs[name] = value
return cls(**kwargs)
def filter_by_key(function: Callable[[Any], bool], dct: Dict):
return {k: v for k, v in dct.items() if function(k)}
def wrap_with_context_manager(ctx_mgr):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with ctx_mgr:
return fn(*args, **kwargs)
return wrapper
return decorator
def replace_all(s: str, replacement_map: Dict[str, str]) -> str:
for old, new in replacement_map.items():
s = s.replace(old, new)
return s
class SimpleCounter:
def __init__(self, init: int = 0):
self._value = init
def value(self) -> int:
return self._value
def next(self) -> int:
value = self._value
self._value += 1
return value
def sort_by_value(dct: Dict[Any, Any]) -> Iterable[Any]:
return sorted(dct.keys(), key=lambda k: dct[k])
class MroVisitor(abc.ABC):
def get_class(self, node: Any) -> type:
return node.__class__
def visit(self, node: Any, *args, **kwargs):
method = None
for cls in self.get_class(node).__mro__:
name = f"visit_{cls.__name__}"
method = getattr(self, name, None)
if method is not None:
break
if method is None:
method = self.generic_visit
return method(node, *args, **kwargs)
@abc.abstractmethod
def generic_visit(self, node: Any, *args, **kwargs):
raise NotImplementedError()
def prologue(prologue_fn):
def decorator(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
prologue_fn(*args, **kwargs)
return fn(*args, **kwargs)
return wrapped
return decorator
def epilogue(epilogue_fn):
def decorator(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
ret = fn(*args, **kwargs)
epilogue_fn(*args, **kwargs)
return ret
return wrapped
return decorator
def assert_false(*args, **kwargs):
assert False
def is_(x: Any, y: Any):
return x is y
def find_by_value(
dct: Dict[Any, Any],
value: Any,
eq: Callable[[Any, Any], bool] = None,
) -> Iterable[Any]:
"""Performs a reverse lookup on @dct given @value, i.e. returns any key k
for which @eq(@dct[k], @value), where @eq is an optional equality
function. By default `is` is used.
"""
if eq is None:
eq = is_
for k, v in dct.items():
if eq(v, value):
yield k
def hash_expr(expr: str) -> str:
hasher = hashlib.shake_128()
hasher.update(expr.encode())
return hasher.hexdigest(8)
|
class ImageFrame:
def __init__(self, pixels):
self.img = PhotoImage(width = WIDTH, height = HEIGHT)
for row in range(HEIGHT):
for col in range(WIDTH):
num = pixels[row*WIDTH+col]
if COLORFLAG:
kolor = '#%02x%02x%02x' % (num[0], num[1], num[2])
else:
kolor = '#%02x%02x%02x' % (num, num, num)
self.img.put(kolor, (col, row))
c = Canvas(root, width = WIDTH, height = HEIGHT); c.pack()
c.create_image(0,0, image = self.img, anchor = NW)
printElapsedTime('displayed image')
def confirmP3fileType(file1):
stng = file1.readline().strip()
if stng[0] + stng[1] != 'P3':
print('ERROR: NOT P3!')
file1.close()
exit()
def printElapsedTime (msg = 'time'):
length = 30
msg = msg[:length]
tab = '.'*(length-len(msg))
print('--' + msg.upper() + tab + '', end = '')
time = round(clock() - START, 1)
print( '%2d'%int(time/60), ' min :', '%4.1f'%round(time%60, 1), ' sec', sep = '')
def readFileNumbersIntoString(file1):
nums = file1.read().split()
file1.close()
if len(nums)%3 != 0:
print('WARNING: Size of File(', len(nums) ,') % 3 != 0')
exit()
return nums
def convertStringRGSsToGrayIntegerOrColorTuples(nums):
image = []
for pos in range(0,len(nums),3):
ints = int(nums[pos]), int(nums[pos+1]), int(nums[pos+2])
image.append(int(0.2*ints[0]+0.7*ints[1]+0.1*ints[2]))
return image
def printTitleAndSizeOfimageInPixels(image):
print('RTI')
if len(image) != WIDTH * HEIGHT:
print('ERROR: Bad file size')
print('Number of Pixels', len(image))
printElapsedTime('image extracted from file')
def readPixelColorsFromFile(file1):
confirmP3fileType(file1)
nums = readFileNumbersIntoString(file1)
image = convertStringRGSsToGrayIntegerOrColorTuples(nums)
printTitleAndSizeOfimageInPixels(image)
return image
def saveNumbersToFile(filename, image):
pass
def smoothImageCellWithNeighbor(row, col, image):
if not(row == 0 or col == 0 or row == HEIGHT-1 or col == WIDTH-1):
xy = row*512+col
image[xy]=\
(image[xy-513]+ \
2*image[xy-512]+ \
image[xy-511]+ \
2*image[xy-1]+ \
4*image[xy]+ \
2*image[xy+1]+ \
image[xy+511]+ \
2*image[xy+512]+ \
image[xy+513])/16
def smoothTheImage(image, count):
image2 = image[:]
for i in range(count):
for r in range(WIDTH):
for c in range(HEIGHT):
smoothImageCellWithNeighbor(r,c,image2)
return image2
def frange(start, stop, step):
i = start
terminate = stop-(step/10)
while i < terminate:
yield i
i+=step
def drawLine(m, b, image, start = 0, stop = 512):
for i in range(b, stop):
image[int( (i*512) + m*i+b )] = 255
def drawLine2(r, theta, image):
from math import atan2, cos, tan
m = tan(theta)
phi = atan2(m,1)
if phi < 0: phi += pi
for i in range(int(r*cos(m))):
index =int( (i*512) + m*i)
if len(image)>index:
image[index]=255
def imageNoise(points,image):
for i in range(points):
image[randint(0,HEIGHT*WIDTH-1)] = 255
def sobelTransformation(image):
from math import sqrt
image2 = [[0,0,0,0,0] for i in range(HEIGHT*WIDTH)]
tmp = image[600]
for row in range(HEIGHT):
for col in range(WIDTH):
index = row*512+col
Gx = gradX(row,col,image)
Gy = gradY(row,col,image)
temp = [sqrt(Gx*Gx+Gy*Gy),theta(Gx,Gy),0,0,0]
image2[index] = temp
return image2
def getGyGx(image,i,a):
return ( 1*image[(i-1)*512+(a-1)] +\
2*image[(i-1)*512+(a+0)] +\
1*image[(i-1)*512+(a+1)] +\
-1*image[(i+1)*512+(a-1)] +\
-2*image[(i+1)*512+(a+0)] +\
-1*image[(i+1)*512+(a+1)] ,\
1*image[(i-1)*512+(a-1)] +\
2*image[(i-0)*512+(a-1)] +\
1*image[(i+1)*512+(a-1)] +\
-1*image[(i-1)*512+(a+1)] +\
-2*image[(i+0)*512+(a+1)] +\
-1*image[(i+1)*512+(a+1)] )
def sobelize(image):
ret = [[0,0,0,0,0] for i in range(512*512) ]
for i in range(1,WIDTH-1):
for a in range(1,HEIGHT-1):
gy,gx = getGyGx(image,i,a);
tmp = [sqrt(gx**2+gy**2),theta(gx,gy),0,0,0]
ret[i*512+a] = tmp
printElapsedTime('sobel transformation')
return ret
def cannyTransformation(image):
for row in range(HEIGHT):
for col in range(WIDTH):
cell = image[row*512+col]
def gradY(row,col,image):
ret = 0
if not(row == 0 or col == 0 or row == HEIGHT-1 or col == WIDTH-1):
xy = row*512+col
ret=\
1*(image[xy-513]+ \
2*image[xy-512]+ \
1*image[xy-511]+ \
-1*image[xy+511]+ \
-2*image[xy+512]+ \
-1*image[xy+513])
return ret
def gradX(row,col,image):
ret = 0
if not(row == 0 or col == 0 or row == HEIGHT-1 or col == WIDTH-1):
xy = row*512+col
ret=\
-1*(image[xy-513]+ \
-2*image[xy-1]+ \
-1*image[xy+511]+ \
1*image[xy-511]+ \
2*image[xy+1]+ \
1*image[xy+513])
return ret
def normalize(image, intensity = 255):
m = 0
for i in image:
m = max(m,i[0])
printElapsedTime('normalizing')
return [int(x[0]*intensity/m) for x in image]
def theta(Gx, Gy):
from math import atan2
compute = atan2(Gy, Gx)
if compute < 0:
compute += pi
if 0 <= compute < (pi/8):
return 0
if (pi/8) <= compute < (3*pi/8):
return 1
if (3*pi/8) <= compute < (5*pi/8):
return 2
if (5*pi/8) <= compute < (7*pi/8):
return 3
if (7*pi/8) <= compute:
return 0
from tkinter import *
from time import clock
from sys import setrecursionlimit
from random import randint
from math import pi,sqrt,atan2,sin,cos
setrecursionlimit(7000)
root = Tk()
START = clock()
WIDTH = 512
HEIGHT = 512
COLORFLAG = False
HIGH = 45
LOW = 10
NUMBER_OF_TIMES_TO_SMOOTH_IMAGE = 7
def main():
fileName1 = 'lena.ppm'
file1 = open(fileName1, 'r')
image = [0] *HEIGHT*WIDTH
image = readPixelColorsFromFile(file1)
printElapsedTime('Smoothing')
image = smoothTheImage(image, 10)
printElapsedTime('Sobel Trans')
tmp = image[600]
image = sobelTransformation(image)
print(tmp,image[600][0])
image = normalize(image,255)
print(tmp,image[600])
x = ImageFrame(image)
root.mainloop()
if __name__ == '__main__': main()
#
# fileName2 = 'e:\\grayScale.ppm'
# saveNumbersToFile(fileName2,image)
#
# image = extractNumbersFromFile(fileName2, 'extract from PPM file')
#
# fileName3 = 'e:\\smoothed.ppm'
# for n in range(NUMBER_OF_TIMES_TO_SMOOTH_IMAGE):
# image = smooth
|
from Todo_app.models import Task
from django.contrib import admin
# Register your models here.
class TaskAdmin(admin.ModelAdmin):
list_display = ['user','title','complete','description','created']
admin.site.register(Task,TaskAdmin) |
import re
import sys
input_filename = sys.argv[1]
try:
output_filename = sys.argv[2]
except Exception:
output_filename = 'output.txt'
regs = [
( r'[\[\]]' , '' ),
( r'\bnvarchar[ ]*\((.)*\) ' , 'string ' ),
( r'\bdecimal[ ]*\((.)*\)' , 'decimal' ),
( r'\buniqueidentifier\b' , 'string' ),
( r'\bNOT NULL\b' , '' ),
( r'\bNULL\b' , '' ),
( r'\bdatetime\b' , 'timestamp' ),
( r'\bmoney\b' , 'decimal' ),
( r'\bbit\b' , 'boolean' ),
# ( r'^\b[^ ]+' , '' )
]
input = open(input_filename)
output = open(output_filename,'w')
lines = input.readlines()
for line in lines:
for (k,v) in regs:
print k
line = re.sub(k,v,line)
output.write(line)
# print line
output.close()
# for (k,v) in regs:
# test = re.sub(k,v,test)
# print "matching" + k + "\t" +test
# if match:
# print match.group()
# else :
# print "No match"
|
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase
class SentryAppInteractionTest(APITestCase):
def setUp(self):
self.superuser = self.create_user(email="superuser@example.com", is_superuser=True)
self.user = self.create_user(email="user@example.com")
self.org = self.create_organization(owner=self.user)
self.published_app = self.create_sentry_app(
name="Published App",
organization=self.org,
published=True,
schema={"elements": [self.create_issue_link_schema()]},
)
self.unowned_published_app = self.create_sentry_app(
name="Unowned Published App", organization=self.create_organization(), published=True
)
self.owned_url = reverse(
"sentry-api-0-sentry-app-interaction", args=[self.published_app.slug]
)
self.unowned_url = reverse(
"sentry-api-0-sentry-app-interaction", args=[self.unowned_published_app.slug]
)
class GetSentryAppInteractionTest(SentryAppInteractionTest):
def test_superuser_sees_unowned_interactions(self):
self.login_as(user=self.superuser, superuser=True)
response = self.client.get(self.unowned_url, format="json")
assert response.status_code == 200
assert len(response.data["views"]) > 0
assert response.data["componentInteractions"] == {}
def test_user_sees_owned_interactions(self):
self.login_as(user=self.user)
response = self.client.get(self.owned_url, format="json")
assert response.status_code == 200
assert len(response.data["views"]) > 0
assert "issue-link" in response.data["componentInteractions"]
def test_user_does_not_see_unowned_interactions(self):
self.login_as(user=self.user)
response = self.client.get(self.unowned_url, format="json")
assert response.status_code == 403
assert response.data["detail"] == "You do not have permission to perform this action."
def test_invalid_startend_throws_error(self):
self.login_as(self.user)
url = "%s?since=1569523068&until=1566931068" % self.owned_url
response = self.client.get(url, format="json")
assert response.status_code == 400
class PostSentryAppInteractionTest(SentryAppInteractionTest):
def test_not_logged_in_not_allowed(self):
body = {"tsdbField": "sentry_app_viewed"}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 401
assert response.data["detail"] == "Authentication credentials were not provided."
def test_missing_tsdb_field(self):
self.login_as(self.user)
body = {}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 400
assert (
response.data["detail"]
== "The tsdbField must be one of: sentry_app_viewed, sentry_app_component_interacted"
)
def test_incorrect_tsdb_field(self):
self.login_as(self.user)
body = {"tsdbField": "wrong"}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 400
assert (
response.data["detail"]
== "The tsdbField must be one of: sentry_app_viewed, sentry_app_component_interacted"
)
def test_missing_component_type(self):
self.login_as(self.user)
body = {"tsdbField": "sentry_app_component_interacted"}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 400
component_types = [
"stacktrace-link",
"issue-link",
]
assert (
response.data["detail"]
== f"The field componentType is required and must be one of {component_types}"
)
def test_incorrect_component_type(self):
self.login_as(self.user)
body = {"tsdbField": "sentry_app_component_interacted", "componentType": "wrong"}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 400
component_types = [
"stacktrace-link",
"issue-link",
]
assert (
response.data["detail"]
== f"The field componentType is required and must be one of {component_types}"
)
def test_allows_logged_in_user_who_doesnt_own_app(self):
self.login_as(self.user)
body = {"tsdbField": "sentry_app_component_interacted", "componentType": "issue-link"}
response = self.client.post(
self.unowned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 201
body = {"tsdbField": "sentry_app_viewed"}
response = self.client.post(
self.unowned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 201
def test_allows_logged_in_user_who_does_own_app(self):
self.login_as(self.user)
body = {"tsdbField": "sentry_app_component_interacted", "componentType": "issue-link"}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 201
body = {"tsdbField": "sentry_app_viewed"}
response = self.client.post(
self.owned_url, body, headers={"Content-Type": "application/json"}
)
assert response.status_code == 201
|
def calc_min_calls(input_stream) -> int:
last_occurence = {0 : -1, 1 : -1}
n_calls = {0 : 0, 1 : 0}
for idx, direction in enumerate(input_stream):
if last_occurence[direction] == -1:
n_calls[direction] += 1
last_occurence[direction] = idx
elif (idx - last_occurence[direction]) > 1:
n_calls[direction] += 1
last_occurence[direction] = idx
return min(n_calls[0], n_calls[1])
def main():
input_stream = [0,0,1,1,1,0,1,1,1,0,0,1,0]
min_calls = calc_min_calls(input_stream)
print(min_calls)
main()
# what happening was.. 2nd cap was not getting orders of getting flipped |
import uuid, datetime, json
import pandas as pd
from functree import app, models, tree, analysis
def from_table(form):
raw_table = pd.read_csv(form.input_file.data, delimiter='\t', comment='#', header=0, index_col=0).dropna(how='all')
root = models.Tree.objects().get(source=form.target.data)['tree']
nodes = tree.get_nodes(root)
entry_to_layer = dict(map(lambda x: (x['entry'], x['layer']), nodes))
profile = []
for entry in raw_table.index:
profile.append({'entry': entry, 'layer': analysis.get_layer(entry, entry_to_layer), 'values': [raw_table.ix[entry].tolist()]})
colors = []
if form.color_file.data:
colors = pd.read_csv(form.color_file.data, header=None, delimiter='\t').as_matrix().tolist()
utcnow = datetime.datetime.utcnow()
return models.Profile(
profile_id=uuid.uuid4(),
profile=profile,
series=['Raw'],
columns=[raw_table.columns.tolist()],
colors=colors,
target=form.target.data,
description=form.description.data,
added_at=utcnow,
expire_at=utcnow + datetime.timedelta(days=app.config['FUNCTREE_PROFILE_TTL_DAYS']),
private=form.private.data
).save().profile_id
def from_json(form):
raw_data = json.load(form.input_file.data)
utcnow = datetime.datetime.utcnow()
return models.Profile(
profile_id=uuid.uuid4(),
profile=raw_data[0]['profile'],
series=raw_data[0]['series'],
columns=raw_data[0]['columns'],
colors=raw_data[0]['colors'],
target=form.target.data,
description=form.description.data,
added_at=utcnow,
expire_at=utcnow + datetime.timedelta(days=app.config['FUNCTREE_PROFILE_TTL_DAYS']),
private=form.private.data
).save().profile_id
|
import os, datetime, uuid
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from hashids import Hashids
# Create your models here.
def get_default_hash_id():
hashids = Hashids(salt=settings.SECRET_KEY, min_length=6)
try:
user_id = User.objects.latest('id').id + 1
except:
user_id = 1
return hashids.encode(user_id)
def business_license_update_filename(instance, filename):
ext = filename.split('.')[-1]
now = datetime.datetime.now()
path = "business_license/" + str(now.year) + "/" + str(now.month) + "/" + str(now.day)
format = uuid.uuid4().hex + "_" + instance.username + "_business" + "." + ext
return os.path.join(path, format)
def partner_update_filename(instance, filename):
ext = filename.split('.')[-1]
now = datetime.datetime.now()
path = "partner/" + str(now.year) + "/" + str(now.month) + "/" + str(now.day)
format = uuid.uuid4().hex + "_partner" + "." + ext
return os.path.join(path, format)
def portfolio_update_filename(instance, filename):
ext = filename.split('.')[-1]
now = datetime.datetime.now()
path = "portfolio/" + str(now.year) + "/" + str(now.month) + "/" + str(now.day)
format = uuid.uuid4().hex + "_portfolio" + "." + ext
return os.path.join(path, format)
def user_portfolio_update_filename(instance, filename):
ext = filename.split('.')[-1]
now = datetime.datetime.now()
path = "user_portfolio/" + str(now.year) + "/" + str(now.month) + "/" + str(now.day)
format = uuid.uuid4().hex + "_portfolio" + "." + ext
return os.path.join(path, format)
def request_update_filename(instance, filename):
ext = filename.split('.')[-1]
now = datetime.datetime.now()
path = "request/" + str(now.year) + "/" + str(now.month) + "/" + str(now.day)
format = uuid.uuid4().hex + "_request" + "." + ext
return os.path.join(path, format)
# ------------------------------------------------------------------
# Model : User
# Description : 회원 모델
# ------------------------------------------------------------------
class User(AbstractUser):
USERNAME_FIELD = 'email'
first_name = None
last_name = None
username = None
CLIENT = 1
PARTNER = 2
TYPE = (
(CLIENT, '클라이언트'),
(PARTNER, '파트너'),
)
email = models.EmailField('이메일', unique=True)
username = models.CharField('유저명', max_length=50, default=get_default_hash_id)
type = models.PositiveSmallIntegerField('유저타입', choices=TYPE, default=1)
business_name = models.CharField('사업자명', max_length=50, blank=True, null=True)
business_number = models.CharField('사업자번호', max_length=50, blank=True, null=True)
business_license = models.FileField('사업자등록증', upload_to=business_license_update_filename, blank=True, null=True)
portfolio = models.FileField('포트폴리오', upload_to=user_portfolio_update_filename, blank=True, null=True)
phone = models.CharField('휴대폰 번호', max_length=32, blank=True, null=True)
REQUIRED_FIELDS = ['username']
class Meta:
verbose_name = ' 회원'
verbose_name_plural = ' 회원'
def __str__(self):
return str(self.email)
# ------------------------------------------------------------------
# Model : Industry
# Description : 업종 모델
# ------------------------------------------------------------------
class Industry(models.Model):
name = models.CharField('업종명', max_length=256)
class Meta:
verbose_name = ' 업종'
verbose_name_plural = ' 업종'
def __str__(self):
return str(self.name)
# ------------------------------------------------------------------
# Model : Partner
# Description : 파트너 모델
# ------------------------------------------------------------------
class Partner(models.Model):
name = models.CharField('업체명', max_length=256)
thumbnail = models.ImageField('썸네일', upload_to=partner_update_filename)
phone = models.CharField('전화번호', max_length=256, blank=True)
industry = models.ManyToManyField(Industry, verbose_name='업종')
address = models.CharField('주소', max_length=256, blank=True)
career = models.IntegerField('경력', default=0)
products = models.TextField('진행한 제품', blank=True)
special = models.TextField('특화분야', blank=True)
info = models.TextField('회사소개', blank=True)
history = models.TextField('주요이력', blank=True)
is_main = models.BooleanField('메인노출여부', default=False)
is_partner = models.BooleanField('파트너여부', default=False)
created_at = models.DateTimeField('등록일자', auto_now_add=True)
class Meta:
verbose_name = ' 파트너'
verbose_name_plural = ' 파트너'
def __str__(self):
return str(self.name) + " 파트너"
# ------------------------------------------------------------------
# Model : Portfolio
# Description : 포트폴리오 모델
# ------------------------------------------------------------------
class Portfolio(models.Model):
partner = models.ForeignKey(Partner, on_delete=models.CASCADE, verbose_name="파트너")
img = models.ImageField('이미지', upload_to=portfolio_update_filename)
created_at = models.DateTimeField('등록일자', auto_now_add=True)
class Meta:
verbose_name = ' 포트폴리오'
verbose_name_plural = ' 포트폴리오'
def __str__(self):
return str(self.partner.name) + " 포트폴리오"
# ------------------------------------------------------------------
# Model : Project
# Description : 프로젝트 모델
# ------------------------------------------------------------------
class Project(models.Model):
title = models.CharField('제목', max_length=40)
content = RichTextUploadingField('내용')
is_main = models.BooleanField('메인노출여부', default=False)
budget = models.CharField('희망 예산', max_length=256, blank=True, null=True)
started_at = models.DateTimeField('작업시작일시')
ended_at = models.DateTimeField('작업종료일시')
created_at = models.DateTimeField('등록일자')
class Meta:
verbose_name = ' 프로젝트'
verbose_name_plural = ' 프로젝트'
def __str__(self):
return str(self.title) + " : 프로젝트"
# ------------------------------------------------------------------
# Model : Bbs
# Description : 게시판 모델
# ------------------------------------------------------------------
class Bbs(models.Model):
title = models.CharField('제목', max_length=40)
content = RichTextUploadingField('내용')
is_top = models.BooleanField('상단고정여부', default=False)
created_at = models.DateTimeField('등록일자', auto_now_add=True)
class Meta:
verbose_name = ' 게시글'
verbose_name_plural = ' 게시글'
def __str__(self):
return str(self.star) + " : 게시글"
# ------------------------------------------------------------------
# Model : product
# Description : 요청 모델
# ------------------------------------------------------------------
class Request(models.Model):
company = models.CharField('회사명', max_length=256, blank=True, null=True)
phone = models.CharField('전화번호', max_length=256, blank=True, null=True)
email = models.CharField('이메일', max_length=256, blank=True, null=True)
product = models.CharField('제품', max_length=256, blank=True, null=True)
budget = models.CharField('희망 예산', max_length=256, blank=True, null=True)
period = models.CharField('희망 기간', max_length=256, blank=True, null=True)
file = models.FileField('포트폴리오', upload_to=request_update_filename, blank=True, null=True)
created_at = models.DateTimeField('등록일자', auto_now_add=True)
class Meta:
verbose_name = ' 요청된 의뢰'
verbose_name_plural = ' 요청된 의뢰'
def __str__(self):
return str(self.company) + " : 요청"
|
# 11有一个列表['a','b','c','a','e','a'], 使用for循环统计a出现的次数,并删除其中的所有a元素
# lis = ['a','b','c','a','e','a',"a","a"]
# count = 0
# for i in lis: #循环列表
# if i == 'a':
# count = count + 1 #当i是a的时候,每循环一次加一次
# print(count) #循环结束后,打印最后次数
# for j in range(count): #按计数引循环a
# lis.remove("a") #每次循环都删除a,直到循环结束
# print(lis)
#
|
# -*- coding: utf-8 -*-
from django import template
from django.db.models import Count
from sdifrontend.apps.mainpage.models import SysDataset, Category, SidebarMenu
register = template.Library()
@register.simple_tag(takes_context=True)
def get_sidebar_items(context, **kwargs):
# get the categories
nav_elements = [
{
'name': "Data Subject",
'id':'category',
'items': []
}, {
'name': "Data Type",
'id':'type',
'items': [
{'name': "Animations/Simulations"},
{'name': "Genome/Genetic Data"},
{'name': "Interactive Data Map"},
{'name': "Numeric Data"},
{'name': "Still Images/Photos"},
{'name': "Figures/Plots"},
{'name': "Specialized Mix"},
{'name': "Multimedia"},
{'name': "General (Other)"}
]
}
]
for category in Category.objects.all().annotate(count=Count('sysdataset')).order_by('-count'):
nav_elements[0]['items'].append({'id': category.id, 'name': category.name, 'count': category.count, 'section': "category"})
count_data_by_type = list(SysDataset.objects.values('type').annotate(count=Count('type')).order_by('type'))
for item in count_data_by_type:
nav_elements[1]['items'][item['type']]['count'] = item['count']
for i in range(0,len(nav_elements[1]['items'])):
try:
nav_elements[1]['items'][i]['count']
except:
nav_elements[1]['items'][i]['count'] = 0
nav_elements[1]['items'][i]['section'] = "type"
nav_elements[1]['items'][i]['id'] = i
return nav_elements
@register.simple_tag(takes_context=True)
def get_ds_types(context, **kwargs):
sb = SidebarMenu()
items = sb.nav_elements[1]['items']
options = []
for item in items:
o = ("{}".format(items.index(item)), item['name'])
options.append(o)
return options
@register.simple_tag(takes_context=True)
def get_ds_subjects(context, **kwargs):
options = []
for category in Category.objects.all():
o = ("{}".format(category.id), category.name)
options.append(o)
return options
def get_ds_subjects_list(context, **kwargs):
return {} |
str="abcdefg"
list1 = list(range(1,7))
list2 = list(range(7,13))
dic = {1:'hi',2:'hello'}
set1 = {1,2,3,3,3,4,4,5,5,5,1}
tuple1 = (1,2,3,4,5,4,4,5,5)
# for i in list1:
# print(i)
# for s in set1:
# print(s)
for t in tuple1:
print(t)
for i in list1:
for j in list2:
print(i,j)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 09:19:41 2020
@author: Christopher S. Francis
"""
import sys
import wx
import interfaceStuff
from datetime import datetime
class StdOutDialog(wx.Dialog):
def __init__(self, parent, result=""):
super().__init__(parent)
self.SetTitle("sys.stdout")
self.SetSize((600, 320))
self.__icon = wx.Icon("GUI\\images\\threep_logo.png")
self.SetIcon(self.__icon)
self.__result = list(result)
if self.__result[0] == b"":
self.__result[0] = b"No messages on sys.stdout from this process"
if self.__result[1] == b"":
self.__result[1] = b"No messages on sys.stderr from this process"
# good feedback
self.__stdout = wx.StaticText(self, -1, label=self.__result[0].decode('ascii'), size=(280, 200), pos=(20, 20), style=wx.ST_NO_AUTORESIZE)
self.__stdout.SetForegroundColour((50, 168, 82))
self.__stdout.Wrap(260)
# bad feedback
self.__stderr = wx.StaticText(self, -1, label=self.__result[1].decode('ascii'), size=(280, 200), pos=(300, 20), style=wx.ST_NO_AUTORESIZE)
self.__stderr.SetForegroundColour((237, 57, 40))
self.__stderr.Wrap(260)
# Clickers
self.__copy = wx.Button(self, wx.ID_OK, label="Copy", pos=(170, 235))
self.__cancel = wx.Button(self, wx.ID_CANCEL, label="Close", pos=(280, 235))
def copyMessage(self):
filepath = interfaceStuff.location + "\\" + "STDOUT and STDERR Log.txt"
with open(filepath, "w") as toFile:
toFile.write("LOGGED AT: " + str(datetime.now()) + "\n\n")
toFile.write("\t\t--sys.stdout--\n")
toFile.write(self.__stdout.GetLabel() + "\n\n")
toFile.write("\t\t--sys.stderr--\n")
toFile.write(self.__stderr.GetLabel() + "\n")
return filepath
|
''' Full suite of error analysis on CoNLL05 '''
from collections import Counter
import itertools
from itertools import izip
import subprocess
import sys
import re
from wsj_syntax_helper import extract_gold_syntax_spans
from full_analysis import read_file, read_conll_prediction, extract_spans,\
find, unlabeled_find
CORE_ROLES = {'A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'AA' }
MAX_LEN = 200
CONLL05_GOLD_SYNTAX = 'conll05.devel.gold.syntax'
CONLL05_GOLD_SRL = 'conll05.devel.props.gold.txt'
def fix_labels(pred_spans, gold_spans):
''' Change the label of an argument if its boundaries match the gold.
'''
ops = []
new_spans = []
for p in pred_spans:
fixed = False
for g in gold_spans:
if p[0] != g[0] and p[1] == g[1] and p[2] == g[2]:
ops.append(("fix_label", p[0], g[0]))
new_spans.append([g[0], p[1], p[2]])
fixed = True
break
if not fixed:
new_spans.append([p[0], p[1], p[2]])
return new_spans, ops
def merge_two_spans(pred_spans, gold_spans, max_gap = 1):
merged = [False] * len(pred_spans)
ops = []
new_spans = []
for i, p1 in enumerate(pred_spans):
for j, p2 in enumerate(pred_spans):
if p1[2] < p2[1] and p1[2] + max_gap + 1 >= p2[1]:
for g in gold_spans:
if p1[1] == g[1] and p2[2] == g[2]:
ops.append(("merge_two", p1, p2, g))
new_spans.append([g[0], g[1], g[2]])
merged[i] = True
merged[j] = True
break
if merged[j]: continue
#
for i, p in enumerate(pred_spans):
if not merged[i]:
new_spans.append([p[0], p[1], p[2]])
return new_spans, ops
def split_into_two_spans(pred_spans, gold_spans, max_gap = 1):
ops = []
new_spans = []
for p in pred_spans:
has_split = False
for g1 in gold_spans:
for g2 in gold_spans:
if g1[2] < g2[1] and g1[2] + max_gap + 1 >= g2[1] and p[1] == g1[1] and g2[2] == p[2]:
ops.append(("split_into_two", p, g1, g2))
new_spans.append([g1[0], g1[1], g1[2]])
new_spans.append([g2[0], g2[1], g2[2]])
has_split = True
break
if has_split: break
if not has_split:
new_spans.append([p[0], p[1], p[2]])
return new_spans, ops
def fix_left_boundary(pred_spans, gold_spans):
ops = []
new_spans = []
for p in pred_spans:
fixed = False
for g in gold_spans:
if p[0] == g[0] and p[1] != g[1] and p[2] == g[2]:
ops.append(("fix_left_boundary", p, g))
new_spans.append(g)
fixed = True
break
if not fixed:
new_spans.append(p)
return new_spans, ops
def fix_right_boundary(pred_spans, gold_spans):
ops = []
new_spans = []
for p in pred_spans:
fixed = False
for g in gold_spans:
if p[0] == g[0] and p[1] == g[1] and p[2] != g[2]:
ops.append(("fix_right_boundary", p, g))
new_spans.append(g)
fixed = True
break
if not fixed:
new_spans.append(p)
return new_spans, ops
def has_overlap(trg, spans, excl = []):
for s in spans:
if excl != [] and s[0] == excl[0] and s[1] == excl[1] and s[2] == excl[2]:
continue
if (s[1] <= trg[1] and trg[1] <= s[2]) or (trg[1] <= s[1] and s[1] <= trg[2]):
return True
return False
def fix_both_boundaries(pred_spans, gold_spans):
ops = []
new_spans = []
for p in pred_spans:
fixed = False
for g in gold_spans:
if p[0] == g[0] and (p[1] != g[1] or p[2] != g[2]) \
and ((p[1] <= g[1] and g[1] <= p[2]) or (g[1] <= p[1] and p[1] <= g[2])) \
and not has_overlap(g, pred_spans, excl=p):
ops.append(("fix_right_boundary", p, g))
new_spans.append(g)
fixed = True
break
if not fixed:
new_spans.append(p)
return new_spans, ops
def move_core_arg(pred_spans, gold_spans):
ops = []
new_spans = []
for p in pred_spans:
moved = False
if p[0] in CORE_ROLES:
for g in gold_spans:
if p[0] == g[0] and (p[2] < g[1] or p[1] > g[2]) \
and len([g1 for g1 in gold_spans if g1[0] == g[0]]) == 1\
and not has_overlap(g, pred_spans, excl=p):
ops.append(("move_core_arg", p, g))
new_spans.append(g)
moved = True
break
if not moved:
new_spans.append(p)
return new_spans, ops
def drop_argument(pred_spans, gold_spans):
ops = []
new_spans = []
for p in pred_spans:
if not has_overlap(p, gold_spans):
ops.append(("drop_arg", p))
else:
new_spans.append(p)
return new_spans, ops
def add_argument(pred_spans, gold_spans):
ops = []
new_spans = []
for g in gold_spans:
if not has_overlap(g, pred_spans):
ops.append(("add_arg", g))
new_spans.append(g)
for p in pred_spans:
new_spans.append(p)
return new_spans, ops
def compute_pp_accuracy(pred_spans, gold_spans, syn_spans, words):
num_pp_cases = 0
num_correct_decisions = 0
# 1. gold srl span contains a pp span
for g in gold_spans:
for s in syn_spans:
#print g,s
if "PP" in s[0].split('-') and g[1] < s[1] and s[2] == g[2] \
and words[s[1]] != 'of':
#print g, s, ' '.join(words[g[1]: g[2] + 1])
num_pp_cases += 1
for p in pred_spans:
if p[1] == g[1] and p[2] == g[2]:
num_correct_decisions += 1
break
#
#
#for s1 in syn_spans:
# if "PP" in s1[0].split('-') and g[2] < s[1]:
# for s2 in syn_spans:
return num_correct_decisions, num_pp_cases
def compute_f1(num_matched, num_predicted, num_gold):
precision = 100.0 * num_matched / num_predicted
recall = 100.0 * num_matched / num_gold
f1 = 2.0 * precision * recall / (precision + recall)
return precision, recall, f1
def update_confusion_matrix(cmat, ops):
for op in ops:
p = op[1]
g = op[2]
if not p in cmat:
cmat[p] = {}
if not g in cmat[p]:
cmat[p][g] = 1
else:
cmat[p][g] += 1
def get_syn_span(span, syn_spans):
for s in syn_spans:
if span[1] == s[1] and span[2] == s[2]:
return s
return []
if __name__ == '__main__':
#sentences, gold, predicted = read_file(sys.argv[1])
sentences, predicates, gold = read_conll_prediction(CONLL05_GOLD_SRL)
_, pred_predicates, predicted = read_conll_prediction(sys.argv[1])
words, postags, syn_spans = extract_gold_syntax_spans(CONLL05_GOLD_SYNTAX)
# print len(sentences), len(predicates), len(syn_spans), len(gold), len(predicted)
assert len(gold) == len(predicted)
num_matched = 0
num_gold = 0
num_predicted = 0
num_new_predicted = [0] * 10
num_new_matched = [0] * 10
num_pp_cases = 0
num_correct_pps = 0
confusion_matrix = {} # Pred->Gold
arg_drop_by_label = Counter()
arg_drop_by_dist = Counter()
fun_ops = [fix_labels, move_core_arg, merge_two_spans, split_into_two_spans, fix_both_boundaries,\
drop_argument, add_argument]
#fun_ops = [fix_labels, add_argument]
# Deep copy.
new_pred = []
for s0, w0, props, p_props, p0 in izip(sentences, words, predicates, pred_predicates, predicted):
if p_props == props:
new_pred.append([])
for prop_id, pred_args in izip(props, p0):
#print prop_id, gold_args, pred_args
new_pred[-1].append([s for s in pred_args if s[0] != 'V' and not 'C-V' in s[0]])
num_pp_involved = 0
num_merge_ops = 0
num_split_ops = 0
attachments = Counter()
sem_attachments = Counter()
for i, fun_op in enumerate(fun_ops):
sid = 0
f = open('temp.txt', 'w')
for s0, w0, props, p_props, syn, g0, p0 in izip(sentences, words, predicates, pred_predicates, syn_spans, gold, predicted):
if p_props == props:
pid = 0
for prop_id, gold_args, pred_args in izip(props, g0, p0):
#print prop_id, gold_args, pred_args
gold_spans = [s for s in gold_args if s[0] != 'V' and not 'C-V' in s[0]]
pred_spans = [s for s in pred_args if s[0] != 'V' and not 'C-V' in s[0]]
# Compute F1
if i == 0:
gold_matched = [find(g, pred_spans) for g in gold_spans]
pred_matched = [find(p, gold_spans) for p in pred_spans]
num_gold += len(gold_spans)
num_predicted += len(pred_spans)
num_matched += sum(pred_matched)
matched_pairs = [["", p[0], p[0]] for p in pred_spans if find(p, gold_spans)]
update_confusion_matrix(confusion_matrix, matched_pairs)
#print len(new_pred), len(new_pred[sid]), sid, pid
while (True):
new_spans, ops = fun_op(new_pred[sid][pid], gold_spans)
new_pred[sid][pid] = [p for p in new_spans]
#print new_spans, '\n', gold_spans, '\n', ops
if ops == []: break
if ops[0][0] == "fix_label":
update_confusion_matrix(confusion_matrix, ops)
if ops[0][0] == "merge_two":
num_merge_ops += len(ops)
for op in ops:
sem_attachments[op[2][0]] += 1
ss = get_syn_span(op[2], syn)
if ss != []:
attachments[ss[0].split('-')[0]] += 1
if ss != [] and 'PP' in ss[0]:
num_pp_involved += 1
'''print ' '.join(w0)
print s0[prop_id]
print "Gold: ", gold_spans
print "Pred: ", pred_spans, '\n'''
if ops[0][0] == "split_into_two":
num_split_ops += len(ops)
for op in ops:
sem_attachments[op[3][0]] += 1
ss = get_syn_span(op[3], syn)
if ss != []:
attachments[ss[0].split('-')[0]] += 1
if ss != [] and 'PP' in ss[0]:
num_pp_involved += 1
#new_gold_matched = [find(g, new_spans) for g in gold_spans]
new_pred_matched = [find(p, gold_spans) for p in new_spans]
num_new_predicted[i] += len(new_spans)
num_new_matched[i] += sum(new_pred_matched)
pid += 1
#
# Write to file
for t in range(len(s0)):
if t in props:
f.write(s0[t])
else:
f.write('-')
for p, prop_id in enumerate(props):
f.write('\t')
if t == prop_id:
f.write('B-V')
continue
in_span = False
for s in new_pred[sid][p]:
if s[1] == t:
f.write('B-' + s[0])
in_span = True
break
if s[1] <= t and t <= s[2]:
f.write('I-' + s[0])
in_span = True
break
if not in_span:
f.write('O')
f.write('\n')
f.write('\n')
sid += 1
f.close()
# Run eval script.
'''child = subprocess.Popen('sh {} {} {}'.format('/home/luheng/Workspace/neural_srl/data/srl/conll05-eval.sh',\
'/home/luheng/Workspace/neural_srl/data/srl/conll05.devel.props.gold.txt',\
'temp.txt'),\
shell = True, stdout=subprocess.PIPE)
eval_info = child.communicate()[0]
try:
Fscore = eval_info.strip().split("\n")[6]
Fscore = Fscore.strip().split()[6]
accuracy = float(Fscore)
#print(eval_info)
print("Fscore={}".format(accuracy))
official_f1s.append(accuracy)
except IndexError:
print("Unable to get FScore. Skipping.")'''
print "Original:"
p,r,f1 = compute_f1(num_matched, num_predicted, num_gold)
print "Precision: {}, Recall: {}, F1: {}".format(p, r, f1)
prev_f1 = f1
for i, fun_op in enumerate(fun_ops):
print str(fun_op).split()[1]
p, r, f1 = compute_f1(num_new_matched[i], num_new_predicted[i], num_gold)
print "Precision: {}, Recall: {}, F1: {}, delta={}".format(p, r, f1, f1 - prev_f1)
prev_f1 = f1
print '\n'
# Print confusion matrix
row_keys = sorted(confusion_matrix.keys())
col_keys = set([])
freq = {}
for p in row_keys:
for g in confusion_matrix[p].keys():
col_keys.add(g)
if not p in freq:
freq[p] = 0
if not g in freq:
freq[g] = 0
freq[p] += 1
freq[g] += 1
row_keys = sorted([r for r in row_keys if freq[r] > 10])
col_keys = sorted([c for c in col_keys if freq[c] > 10])
if 'AM-EXT' in row_keys:
row_keys.remove('AM-EXT')
if 'AM-EXT' in col_keys:
col_keys.remove('AM-EXT')
col_normalizer = {}
for g in col_keys:
col_normalizer[g] = 0
for p in row_keys:
if g in confusion_matrix[p] and p != g:
col_normalizer[g] += confusion_matrix[p][g]
print ' \t&' + '\t&'.join([c.split('-')[-1] for c in col_keys]) #+ '\\\\'
for p in row_keys:
print p.split('-')[-1],
for g in col_keys:
if g in confusion_matrix[p] and p != g:
print '\t& {:.0f}'.format(100.0 * confusion_matrix[p][g] / col_normalizer[g]),
elif p == g:
print '\t& -',
else:
print '\t& 0',
print '\n',
# Recall loss analysis
print '\n'.join([str(a) for a in arg_drop_by_label.most_common(10)])
print '\n'.join([str(a) for a in arg_drop_by_dist.most_common(10)])
total = num_split_ops + num_merge_ops
print "Num. split-merge ops: {}. Num. PPs involved: {}".format(num_split_ops + num_merge_ops, num_pp_involved)
for label, freq in attachments.most_common(10):
print "{}\t{}\t{}".format(label, freq, 100.0 * freq / total)
print "Types of semantic arguments"
for label, freq in sem_attachments.most_common(10):
print "{}\t{}\t{}".format(label, freq, 100.0 * freq / total)
|
import json
# writing into file & reading from files
f=open("My_File.txt","w")
text= "Some random text"
f.write(text)
f.close()
f=open("My_File.txt","r")
text_2=f.read()
print(text_2)
f.close()
# Writing lists into files
some_list = ["1","2","3"]
filename = "numbers.csv"
# CSV = Comma Separated Values
with open(filename,"w") as f:
content = ",".join(some_list) # join elements by adding a "," in-between, creates a string
f.write(content)
with open(filename,"r") as f:
content = f.read()
a_list= content.split(",") # separate by "," and create list
print(a_list)
print("FINISHED WRITING AND READING CSV-FILE")
# JSON
filename = "numbers.txt"
with open(filename, "w") as f:
f.write(json.dumps(some_list))
with open(filename, "r") as f:
content = json.loads(f.read())
print(content)
print("FINISHED WRITING AND READING TXT-FILE")
|
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import os
from tqdm import tqdm
from .utils import augment_val
from datasets.robotics import ProcessForce, ToTensor
from datasets.robotics import MultimodalManipulationDataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms
def combine_modalitiesbuilder(unimodal, output):
def combine_modalities(data):
if unimodal == "force":
return [data['force'], data['action'], data[output]]
if unimodal == "proprio":
return [data['proprio'], data['action'], data[output]]
if unimodal == "image":
return [data['image'], data['depth'].transpose(0, 2).transpose(1, 2), data['action'], data[output]]
return [
data['image'],
data['force'],
data['proprio'],
data['depth'].transpose(0, 2).transpose(1, 2),
data['action'],
data[output],
]
return combine_modalities
def get_data(device, configs, filedirprefix="", unimodal=None, output='contact_next'):
filename_list = []
for file in os.listdir(configs['dataset']):
if file.endswith(".h5"):
filename_list.append(configs['dataset'] + file)
print(
"Number of files in multifile dataset = {}".format(len(filename_list))
)
val_filename_list = []
val_index = np.random.randint(
0, len(filename_list), int(len(filename_list) * configs['val_ratio'])
)
for index in val_index:
val_filename_list.append(filename_list[index])
while val_index.size > 0:
filename_list.pop(val_index[0])
val_index = np.where(
val_index > val_index[0], val_index - 1, val_index)
val_index = val_index[1:]
print("Initial finished")
val_filename_list1, filename_list1 = augment_val(
val_filename_list, filename_list
)
print("Listing finished")
dataloaders = {}
samplers = {}
datasets = {}
samplers["val"] = SubsetRandomSampler(
range(len(val_filename_list1) * (configs['ep_length'] - 1))
)
samplers["train"] = SubsetRandomSampler(
range(len(filename_list1) * (configs['ep_length'] - 1))
)
print("Sampler finished")
datasets["train"] = MultimodalManipulationDataset(
filename_list1,
transform=transforms.Compose(
[
ProcessForce(32, "force", tanh=True),
ProcessForce(32, "unpaired_force", tanh=True),
ToTensor(device=device),
combine_modalitiesbuilder(unimodal, output),
]
),
episode_length=configs['ep_length'],
training_type=configs['training_type'],
action_dim=configs['action_dim'],
filedirprefix=filedirprefix
)
datasets["val"] = MultimodalManipulationDataset(
val_filename_list1,
transform=transforms.Compose(
[
ProcessForce(32, "force", tanh=True),
ProcessForce(32, "unpaired_force", tanh=True),
ToTensor(device=device),
combine_modalitiesbuilder(unimodal, output),
]
),
episode_length=configs['ep_length'],
training_type=configs['training_type'],
action_dim=configs['action_dim'],
)
print("Dataset finished")
dataloaders["val"] = DataLoader(
datasets["val"],
batch_size=configs['batch_size'],
num_workers=configs['num_workers'],
sampler=samplers["val"],
pin_memory=True,
drop_last=True,
)
dataloaders["train"] = DataLoader(
datasets["train"],
batch_size=configs['batch_size'],
num_workers=configs['num_workers'],
sampler=samplers["train"],
pin_memory=True,
drop_last=True,
)
print("Finished setting up date")
return dataloaders['train'], dataloaders['val']
|
import asyncio
async def test_install(guillotina_couchbase_requester): # noqa
async with guillotina_couchbase_requester as requester:
response, _ = await requester('GET', '/db/guillotina/@addons')
assert 'guillotina_couchbase' in response['installed']
|
from drawing_tools.canva import Canvas
class ICanvas:
def __init__(self, command, instances):
self.instance = Canvas(int(command[1]), int(command[2]))
|
import unittest
from mock import Mock
from ml.agent.base import Agent
from ml.runner.env.base import Runner
class RunnerTests(unittest.TestCase):
def setUp(self):
self.runner = Runner(
env_name='',
agent=Mock(spec=Agent),
max_steps=10
)
|
"""
*Variance Inference*
"""
from abc import ABCMeta
from .._operator import MomentInference
__all__ = ["VarianceInference"]
class VarianceInference(
MomentInference,
):
__metaclass__ = ABCMeta
|
import random as r
def mix_strength(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j2[0],j1[1],j1[2])
temp2 = (j1[0],j2[1],j2[2])
d[i1] = temp1
d[i2] = temp2
def mix_decal(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j1[0],j2[1],j1[2])
temp2 = (j2[0],j1[1],j2[2])
d[i1] = temp1
d[i2] = temp2
def mix_qd(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j1[0],j1[1],j2[2])
temp2 = (j2[0],j2[1],j1[2])
d[i1] = temp1
d[i2] = temp2
def mix_hasard(i1, i2,d):
u=r.random()
p=r.random()
if u <= 1/2.:
if p <= 1/3.:
return mix_strength(i1,i2,d)
if p <= 2/3.:
return mix_decal(i1,i2,d)
if p <= 1.:
return mix_qd(i1,i2,d)
else:
if p <= 1/3.:
mix_strength(i1,i2,d)
return mix_decal(i1,i2,d)
if p <= 2/3.:
mix_decal(i1,i2,d)
return mix_qd(i1,i2,d)
if p <= 1.:
mix_qd(i1,i2,d)
return mix_strength(i1,i2,d)
def crossover(i,d):
d2=d.copy()
d2.pop(i)
L=[]
for k in d2.keys():
L.append(k)
i2 = L[r.randint(0,len(L)-1)]
p = r.random()
if p <= 0.7:
return mix_hasard(i,i2,d)
def mutation_hasard(i,d):
u = r.random()
j = d.get(i)
if u <=1/3:
temp = (5*r.random()+1, j[1], j[2])
d[i] = temp
return j
if u <=2/3:
temp = (j[0], 40*r.random()+10, j[2])
d[i] = temp
return j
if u <=1:
temp = (j[0], j[1], 40*r.random()+10 )
d[i] = temp
return j
def mutation(i,d):
if r.random()<=0.01:
mutation_hasard(i,d)
def mix2_strD1(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j2[0],j1[1],j1[2],j1[3],j1[4])
temp2 = (j1[0],j2[1],j2[2],j2[3],j2[4])
d[i1] = temp1
d[i2] = temp2
def mix2_strD2(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j1[0],j2[1],j1[2],j1[3],j1[4])
temp2 = (j2[0],j1[1],j2[2],j2[3],j2[4])
d[i1] = temp1
d[i2] = temp2
def mix2_strPs(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j1[0],j1[1],j2[2],j1[3],j1[4])
temp2 = (j2[0],j2[1],j1[2],j2[3],j2[4])
d[i1] = temp1
d[i2] = temp2
def mix2_decal(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j1[0],j1[1],j1[2],j2[3],j1[4])
temp2 = (j2[0],j2[1],j2[2],j1[3],j2[4])
d[i1] = temp1
d[i2] = temp2
def mix2_qd(i1,i2,d):
j1 = d.get(i1)
j2 = d.get(i2)
temp1 = (j1[0],j1[1],j1[2],j1[3],j2[4])
temp2 = (j2[0],j2[1],j2[2],j2[3],j1[4])
d[i1] = temp1
d[i2] = temp2
|
from django.contrib import admin
from django.urls import include, path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path("admin/", admin.site.urls),
path("photos/", include("photos.urls")),
path("user/", include("users.urls")),
path("posts/", include("posts.urls")),
path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
]
|
import os
import sys
from termcolor import colored
def computeTrueCase(filename, output_file='output_files'):
log = ''
try:
print ('Converting to PDF...')
if not os.path.exists(output_file):
os.makedirs(output_file)
log = os.popen('pdflatex --output-directory={0} {1}'.format(output_file, filename)).read()
# Uncomment to open pdf after build completes
# os.popen('open {0}'.format('{0}/{1}pdf'.format(output_file, filename[:-3]))).read()
print(log)
print ('Done!')
except:
print(log)
print('Ooops! Something went wrong. Does the folder name you provided exist?')
if __name__ == '__main__':
if len(sys.argv) < 2:
print colored('Please provide filename and output directory', 'red')
print colored('i.e. python toPDF.py HW0.tex', 'red')
exit()
if len(sys.argv) > 2:
print colored('Too many arguments provided', 'red')
exit()
if not os.path.exists(sys.argv[1]):
print colored('Tex file you provided does not exist.', 'red')
exit()
computeTrueCase(sys.argv[1])
|
# Copyright (C) 1998-2015 by the Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""Cleanse certain headers from all messages."""
import re
from email.Utils import formataddr, getaddresses, parseaddr
from Mailman import mm_cfg
from Mailman.Utils import unique_message_id
from Mailman.Logging.Syslog import syslog
from Mailman.Handlers.CookHeaders import uheader
cres = []
for regexp in mm_cfg.ANONYMOUS_LIST_KEEP_HEADERS:
try:
if regexp.endswith(':'):
regexp = regexp[:-1] + '$'
cres.append(re.compile(regexp, re.IGNORECASE))
except re.error, e:
syslog('error',
'ANONYMOUS_LIST_KEEP_HEADERS: ignored bad regexp %s: %s',
regexp, e)
def remove_nonkeepers(msg):
for hdr in msg.keys():
keep = False
for cre in cres:
if cre.search(hdr):
keep = True
break
if not keep:
del msg[hdr]
def process(mlist, msg, msgdata):
# Always remove this header from any outgoing messages. Be sure to do
# this after the information on the header is actually used, but before a
# permanent record of the header is saved.
del msg['approved']
# Remove this one too.
del msg['approve']
# And these too.
del msg['x-approved']
del msg['x-approve']
# Also remove this header since it can contain a password
del msg['urgent']
# We remove other headers from anonymous lists
if mlist.anonymous_list:
syslog('post', 'post to %s from %s anonymized',
mlist.internal_name(), msg.get('from'))
del msg['from']
del msg['reply-to']
del msg['sender']
del msg['organization']
del msg['return-path']
# Hotmail sets this one
del msg['x-originating-email']
# And these can reveal the sender too
del msg['received']
# And so can the message-id so replace it.
del msg['message-id']
msg['Message-ID'] = unique_message_id(mlist)
# And something sets this
del msg['x-envelope-from']
# And now remove all but the keepers.
remove_nonkeepers(msg)
i18ndesc = str(uheader(mlist, mlist.description, 'From'))
msg['From'] = formataddr((i18ndesc, mlist.GetListEmail()))
msg['Reply-To'] = mlist.GetListEmail()
uf = msg.get_unixfrom()
if uf:
uf = re.sub(r'\S*@\S*', mlist.GetListEmail(), uf)
msg.set_unixfrom(uf)
# Some headers can be used to fish for membership
del msg['return-receipt-to']
del msg['disposition-notification-to']
del msg['x-confirm-reading-to']
# Pegasus mail uses this one... sigh
del msg['x-pmrqc']
|
import numpy as np
class LatticeBoltzmann:
def __init__(self, ny, nx):
self.q = 9 # number of velocities
self.d = 2 # dimension
# Define class constants for use in its functions
self.nx = nx
self.ny = ny
self.init_grid_var()
def init_grid_var(self):
# Define the d2q9 vectors for each nodal point
self.ei = np.array([(0,0),
(1,0), (1,1), (0,1), (-1,1),
(-1,0), (-1,-1), (0,-1), (1,-1)])
# Define the weights for the vectors
self.w = 1.0/9.0 * np.ones(self.q)
self.w[0] = 4.0/9.0
self.w[[2, 4, 6, 8]] = 1.0/36.0
# Define the opposite vectors for bouncing back
self.opposite = np.array([0,5,6,7,8,1,2,3,4])
return True
def calc_eq_dens(self, rho, v):
# Density is in format (q, ny, nx) and ei is (q, dimension)
# v is the velocity vector, with dimension (dimension, ny, nx)
# rho is the density at each node, rho(ny, nx)
dens_eq = np.zeros((self.q, self.ny, self.nx))
# Make the dot product of ei and v.
# Dimensions -> edotv(q, ny, nx)
edotv = 3.0*np.dot(self.ei, v.transpose(1,0,2))
# Calculate the square velocity at each node, usqr(ny, nx)
vsqr = np.sum(v**2, axis = 0)
# Calculate the equilibrium density
for i in range(self.q):
dens_eq[i,:,:] = self.w[i]*rho*(1 + edotv[i,:,:] + 0.5*edotv[i,:,:]**2.0 - 1.5*vsqr)
return dens_eq
def calc_v_rho(self, dens):
# Densities are dens(q, ny, nx) and rho(ny, nx)
rho = np.sum(dens, axis = 0)
# ei(q, d) and dens(q, ny, nx). We need v(d, ny, nx).
v = np.dot(self.ei.transpose(), dens.transpose(1,0,2))/rho
return v, rho
def bounce_back(self, dens, wall):
dens_out = dens.copy()
for i in range(self.q):
dens_out[i, wall] = dens[self.opposite[i], wall]
return dens_out
def stream(self, dens):
dens_out = dens.copy()
for i in range(self.q):
dens_out[i,:,:] = np.roll(np.roll(dens[i,:,:], self.ei[i,1], axis=0), self.ei[i,0], axis=1)
return dens_out
|
#PRINTING THE Q TABLE
import rospy
# from std_msgs.msg import Float64, String
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
import sys
import random
from math import cos, sin, atan, pow
import numpy as np
import csv
from pandas import ExcelWriter
PI = 3.14159265359
vel = 0
ang = 0
laserMsg = LaserScan()
laserReading = []
noOfsamples = 50
msgTwist = Twist()
epsilon = 0.01
discount = 0.3
actions = []
states = []
Q = {}
time = 0
angVals = np.around(np.arange(-0.2, 0.25, 0.1), decimals=2)
velIncVals = np.arange(-2, 3, 1) # (-4,5,2)
velocities = np.arange(0, 20, 4)
laserVals = np.arange(5, 35, 10)
laserPosCombs = []
for velocityInc in velIncVals:
for angle in angVals:
actions.append((velocityInc, angle))
for i1 in laserVals:
for i2 in laserVals:
for i3 in laserVals:
laserPosCombs.append((i1,i2,i3))
for velocity in velocities:
for laserComb in laserPosCombs:
states.append((velocity, laserComb))
temp_dic = {}
for a in actions:
temp_dic[a] = random.randint(0, 10)
Q[(velocity, laserComb)] = temp_dic
def save_xls(list_dfs, xls_path):
writer = ExcelWriter(xls_path)
for n, df in enumerate(list_dfs):
df.to_excel(writer,'sheet%s' % n)
writer.save()
def main(argv):
with open('dict.csv', 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter = '\t')
writer.writerow(['ID'] + [keyr for keyr in Q.iterkeys()])
#for key, value in Q[(velocities[1], laserPosCombs[1])].items():
# writer.writerow([key, value])
for key in Q[(velocities[1], laserPosCombs[1])].iterkeys():
RR = '(' + str(key[0]) + ", %.2f" %key[1] + ')'
writer.writerow([RR] + [Q[maskey][key] for maskey in Q.iterkeys()])
print velocities[1]
print laserPosCombs[1]
print Q[(velocities[1], laserPosCombs[1])]
if __name__ == '__main__':
try:
main(sys.argv[1:])
except rospy.ROSInterruptException:
pass
|
"""
Estimating Hessian for ReLU neural network activations using forward HVP + Lanzosc.
"""
import matplotlib.pyplot as plt
from insilico_Exp_torch import TorchScorer
from GAN_utils import upconvGAN
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm, trange
from torch_utils import show_imgrid
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from os.path import join
from NN_sparseness.sparse_invariance_lib import shorten_layername
from NN_sparseness.insilico_manif_configs import RN50_config, VGG16_config, manifold_config, DenseNet169_config
from collections import defaultdict
from stats_utils import saveallforms
figdir = r"E:\OneDrive - Harvard University\Manifold_Toymodel\tuning"
sumdir = r"E:\OneDrive - Harvard University\Manifold_Toymodel\summary"
#%%
G = upconvGAN()
G.cuda().eval()
G.requires_grad_(False)
from Hessian.load_hessian_data import load_Haverage
H, eva, evc = load_Haverage("fc6GAN")
#%%
def grad_evol_unit(scorer, eps=0.5):
z = torch.randn(1, 4096).cuda()
z.requires_grad_(True)
optimizer = optim.Adam([z], lr=1e-1)
cnt = 0
failed_try = 0
while cnt < 150:
imgs = G.visualize(z)
act = scorer.score_tsr_wgrad(imgs)
loss = -act
if torch.isclose(act, torch.zeros(1).cuda()).any():
z.data = z.data + eps * torch.randn(1, 4096).cuda()
failed_try += 1
if failed_try > 50:
z.data = 1.5 * torch.randn(1, 4096).cuda()
if failed_try > 1500:
print("failed evolution, stop")
break
else:
optimizer.zero_grad()
loss.backward()
optimizer.step()
cnt += 1
if cnt % 10 == 0 and cnt > 0:
print(cnt, act.item())
return z, act, failed_try
def perturb_activation(G, scorer, z, EPSs, sample_size=100):
if isinstance(EPSs, float):
EPSs = [EPSs]
act_dict = {}
for EPS in EPSs:
dist = z.norm(dim=1).detach() * EPS
with torch.no_grad():
dz = torch.randn([sample_size, 4096]).cuda()
dz = dz / dz.norm(dim=1, keepdim=True) * dist.unsqueeze(1)
acts = scorer.score_tsr_wgrad(G.visualize(z.detach() + dz)).cpu()
print(f"EPS={EPS} (D={dist.item():.2f}) mean={acts.mean():.2f} std={acts.std():.2f} median={torch.median(acts):.2f}"
f" [{acts.min():.2f}-{acts.max():.2f}]")
act_dict[EPS] = acts.numpy()
return act_dict
#%%
from time import time
from hessian_eigenthings.lanczos import lanczos
from Hessian.lanczos_generalized import lanczos_generalized
from Hessian.GAN_hvp_operator import GANHVPOperator, GANForwardHVPOperator, \
compute_hessian_eigenthings, NNForwardHVPOperator, \
GANForwardHVPOperator_multiscale, NNForwardHVPOperator_multiscale
from NN_sparseness.insilico_manif_configs import VGG16_config, RN50_config
#%%
"""
Attempt 3, using Lanczos iteration and HVP to compute the approximate Hessian
"""
#%%
savedir = r"E:\OneDrive - Harvard University\Manifold_Toymodel\gradHess"
netname = "densenet169" #"vgg16"
scorer = TorchScorer(netname, rawlayername=False)
confg = manifold_config(netname)
# scorer.set_unit("score", '.layer3.Bottleneck4', unit=(55, 7, 7,), ingraph=True)
for chan in range(10, 20):
for layer, unit_dict in confg.items():#RN50_config.items():
if unit_dict["unit_pos"] is None:
scorer.set_unit("score", layer, unit=(chan,), ingraph=True)
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d" % (chan)
else:
unit_x, unit_y = unit_dict["unit_pos"]
scorer.set_unit("score", layer, unit=(chan, unit_x, unit_y), ingraph=True)
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d-%d-%d" % (chan, unit_x, unit_y)
#%%
z, act, failed_try = grad_evol_unit(scorer, eps=0.5)
if failed_try > 1500:
print("failed evolution, stop")
continue
pert_actdict = perturb_activation(G, scorer, z,
EPSs=[1, 0.5, 0.2, 1E-1, 1E-2, ], sample_size=50)
#%%
activHVP = GANForwardHVPOperator_multiscale(G, z[0].detach(),
lambda x: scorer.score_tsr_wgrad(x),
preprocess=lambda x: x, EPS=0.2,
scalevect=(4.0, 2.0, 1.0, 0.5))
# activHVP = NNForwardHVPOperator_multiscale(net, cent, EPS=5E-1,
# scalevect=(4.0, 2.0, 1.0, 0.5))
t0 = time()
eigvals, eigvects = lanczos(activHVP, num_eigenthings=2000, use_gpu=True)
print(time() - t0) # 146sec for 2000 eigens
eigvals = eigvals[::-1]
eigvects = eigvects[::-1, :]
np.savez(join(savedir, f"{unitstr}_Hess_data_ForwardHVP_multiscale.pt"),
**{"z": z.detach().cpu().numpy(),
"act": act.detach().cpu().numpy(),
"eigvals": eigvals, "eigvects": eigvects,
"pert_actdict": pert_actdict
}, )
#%%
plt.figure(figsize=(5, 5))
plt.semilogy(np.sort(np.abs(eigvals))[::-1])
plt.title(f"SVD of Hessian matrix (Forward HVP)\n{unitstr}")
plt.savefig(join(savedir, f"{unitstr}_SVD_spectrum_ForwardHVP_multiscale.png"))
plt.show()
#%%
scorer.cleanup()
#%%
savedir = r"E:\OneDrive - Harvard University\Manifold_Toymodel\gradHess"
spect_HVP_dict = defaultdict(list)
peakact_dict = defaultdict(list)
z_dict = defaultdict(list)
netname = "densenet169" #"vgg16"
confg = manifold_config(netname)
for layer, unit_dict in confg.items():
for chan in range(10, 20):
if unit_dict["unit_pos"] is None:
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d" % (chan)
else:
unit_x, unit_y = unit_dict["unit_pos"]
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d-%d-%d" % (chan, unit_x, unit_y)
try:
data = np.load(join(savedir, f"{unitstr}_Hess_data_ForwardHVP_multiscale.pt.npz"), allow_pickle=True)
spect_HVP_dict[layer].append(data["eigvals"])
peakact_dict[layer].append(data["act"])
z_dict[layer].append(data["z"])
except FileNotFoundError:
print(f"{unitstr} not found")
#%%
netname = "vgg16"# "resnet50_linf8"
spect_HVP_dict = defaultdict(list)
peakact_dict = defaultdict(list)
z_dict = defaultdict(list)
for layer, unit_dict in VGG16_config.items():#RN50_config.items():
for chan in range(10, 20):
if unit_dict["unit_pos"] is None:
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d" % (chan)
else:
unit_x, unit_y = unit_dict["unit_pos"]
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d-%d-%d" % (chan, unit_x, unit_y)
try:
data = np.load(join(savedir, f"{unitstr}_Hess_data_ForwardHVP_multiscale.pt.npz"), allow_pickle=True)
spect_HVP_dict[layer].append(data["eigvals"])
peakact_dict[layer].append(data["act"])
z_dict[layer].append(data["z"])
except FileNotFoundError:
print(f"{unitstr} not found")
#%%
netname = "vgg16"# "resnet50_linf8"
spect_HVP_dict = defaultdict(list)
peakact_dict = defaultdict(list)
z_dict = defaultdict(list)
tune_dict = defaultdict(list)
for layer, unit_dict in VGG16_config.items():#RN50_config.items():
for chan in range(10, 20):
if unit_dict["unit_pos"] is None:
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d" % (chan)
else:
unit_x, unit_y = unit_dict["unit_pos"]
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d-%d-%d" % (chan, unit_x, unit_y)
try:
data = np.load(join(savedir, f"{unitstr}_Hess_data_ForwardHVP_multiscale.pt.npz"), allow_pickle=True)
spect_HVP_dict[layer].append(data["eigvals"])
peakact_dict[layer].append(data["act"])
z_dict[layer].append(data["z"])
tune_dict[layer].append(data['pert_actdict'].item())
except FileNotFoundError:
print(f"{unitstr} not found")
#%%
"""
Plot activation as a function of random perturbation size
"""
for layer, unit_dict in VGG16_config.items():
plt.figure(figsize=(5, 5))
actdicts = tune_dict[layer]
actvec = peakact_dict[layer]
for act, actdict in zip(actvec, actdicts):
norm_actdict = {k: v / act for k, v in actdict.items()}
# sns.scatterplot(data=norm_actdict, palette="Set1",)
sns.stripplot(data=pd.DataFrame(norm_actdict),
palette="Set1", alpha=0.25, jitter=True)
# for k, v in actdict.items():
# plt.scatter(k * np.ones_like(v), v, label=k)
plt.ylim(-0.05, 1.05)
plt.title(f"Activation at perturbation\n{layer}")
plt.xlabel("EPS (fraction of |z|)")
plt.ylabel("Activation normed to peak")
saveallforms(sumdir, f"{netname}_{layer}_act_pert_multiscale")
# plt.legend()
plt.show()
#%%
"""
Plot Hessian spectrum
"""
plt.figure(figsize=(6, 6))
for layer, spect_col in spect_HVP_dict.items():
spect_arr = np.array(spect_col)
act_arr = np.array(peakact_dict[layer])
z_arr = np.array(z_dict[layer])
znorm = np.linalg.norm(z_arr, axis=1)
spect_arr = np.sort(np.abs(spect_arr), axis=1)[:, ::-1]
# norm_spect_arr = spect_arr / np.nanmax(spect_arr, axis=1, keepdims=True)# / act_arr #
norm_spect_arr = spect_arr / act_arr #/ znorm
norm_spect_range = np.nanpercentile(norm_spect_arr, [25, 75], axis=0)
plt.semilogy(np.nanmean(norm_spect_arr, axis=0),
label=shorten_layername(layer), linewidth=2, alpha=0.7)
print(f"{layer}: znorm {znorm} activation {act_arr}")
plt.fill_between(range(len(norm_spect_range[0])),
norm_spect_range[0],
norm_spect_range[1], alpha=0.2)
# plt.plot(np.log10(np.nanmedian(norm_spect_arr, axis=0)), label=layer)
# plt.semilogy(data["eigvals"], alpha=0.3, label=shorten_layername(layer))
plt.xlim([-25, 500])
plt.ylim([1E-4, 1E-2])
plt.legend()
plt.title(f"Network: {netname} \n Eigen Spectrum of Hessian matrix (Forward HVP_multiscale)")
saveallforms(sumdir, f"{netname}_spectrum_cmp_ForwardHVP_multiscale2")
plt.show()
#%%
"""Alternative ways to summarize the spectrum"""
plt.figure(figsize=(6, 6))
for layer, spect_col in spect_HVP_dict.items():
spect_arr = np.array(spect_col)
act_arr = np.array(peakact_dict[layer])
z_arr = np.array(z_dict[layer])
znorm = np.linalg.norm(z_arr, axis=1)
spect_arr = np.sort(np.abs(spect_arr), axis=1)[:, ::-1]
# norm_spect_arr = spect_arr / np.nanmax(spect_arr, axis=1, keepdims=True)# / act_arr #
norm_spect_arr = spect_arr / act_arr #/ znorm
norm_spect_range = np.nanpercentile(norm_spect_arr, [25, 75], axis=0)
plt.plot(np.nanmean(norm_spect_arr, axis=0),
label=shorten_layername(layer), linewidth=2, alpha=0.7)
# print(f"{layer}: znorm {znorm} activation {act_arr}") # sanity check
plt.fill_between(range(len(norm_spect_range[0])),
norm_spect_range[0],
norm_spect_range[1], alpha=0.2)
# plt.plot(np.log10(np.nanmedian(norm_spect_arr, axis=0)), label=layer)
# plt.semilogy(data["eigvals"], alpha=0.3, label=shorten_layername(layer))
plt.xlim([-25, 500])
plt.ylim([1E-8, 0.005])
plt.legend()
plt.title(f"Network: {netname} \n Eigen Spectrum of Hessian matrix (Forward HVP_multiscale)")
saveallforms(sumdir, f"{netname}_spectrum_cmp_ForwardHVP_multiscale2_lin")
plt.show()
#%%
# netname = "resnet50_linf8"
# netname = "resnet50"
# netname = "densenet121"
netname = "vgg16"
scorer = TorchScorer(netname, rawlayername=False)
scorer.set_unit("score", '.layer3.Bottleneck4', unit=(55, 7, 7,), ingraph=True)
for chan in range(10, 20):
for layer, unit_dict in VGG16_config.items():#RN50_config.items():
if unit_dict["unit_pos"] is None:
scorer.set_unit("score", layer, unit=(chan,), ingraph=True)
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d" % (chan)
else:
unit_x, unit_y = unit_dict["unit_pos"]
scorer.set_unit("score", layer, unit=(chan, unit_x, unit_y), ingraph=True)
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d-%d-%d" % (chan, unit_x, unit_y)
#%%
z, act, failed_try = grad_evol_unit(scorer, eps=0.5)
if failed_try > 1500:
print("failed evolution, stop")
continue
#%%
activHVP = GANForwardHVPOperator(G, z[0].detach(),
lambda x: scorer.score_tsr_wgrad(x).mean(),
preprocess=lambda x: x, EPS=1E-2,)
t0 = time()
eigvals, eigvects = lanczos(activHVP, num_eigenthings=2000, use_gpu=True)
print(time() - t0) # 146sec for 2000 eigens
eigvals = eigvals[::-1]
eigvects = eigvects[::-1, :]
np.savez(join(figdir, f"{unitstr}_Hess_data_ForwardHVP.pt"),{"z": z.detach().cpu().numpy(),
"act": act.detach().cpu().numpy(),
"eigvals": eigvals, "eigvects": eigvects, },
)
#%%
plt.figure(figsize=(5, 5))
plt.semilogy(eigvals)
plt.title(f"SVD of Hessian matrix (Forward HVP)\n{unitstr}")
plt.savefig(join(figdir, f"{unitstr}_SVD_spectrum_ForwardHVP.png"))
plt.show()
#%%
scorer.cleanup()
#%%
eigvals = eigvals[::-1]
plt.figure()
plt.semilogy(np.abs(eigvals), label="conv3_relu", linewidth=2, alpha=0.7)
plt.show()
# VGG16_config
#%%
netname = "vgg16"# "resnet50_linf8"
spect_HVP_dict = defaultdict(list)
peakact_dict = defaultdict(list)
for layer, unit_dict in VGG16_config.items():#RN50_config.items():
for chan in range(10, 20):
if unit_dict["unit_pos"] is None:
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d" % (chan)
else:
unit_x, unit_y = unit_dict["unit_pos"]
unitstr = f"{netname}-{shorten_layername(layer)}-unit%d-%d-%d" % (chan, unit_x, unit_y)
try:
data = np.load(join(figdir, f"{unitstr}_Hess_data_ForwardHVP.pt.npz"), allow_pickle=True)
data = data['arr_0'].item()
spect_HVP_dict[layer].append(data["eigvals"])
peakact_dict[layer].append(data["act"])
except FileNotFoundError:
print(f"{unitstr} not found")
#%%
plt.figure(figsize=(6, 6))
for layer, spect_col in spect_HVP_dict.items():
spect_arr = np.array(spect_col)
act_arr = np.array(peakact_dict[layer])
norm_spect_arr = spect_arr / np.nanmax(spect_arr, axis=1, keepdims=True)# / act_arr #
norm_spect_arr = spect_arr
norm_spect_range = np.nanpercentile(norm_spect_arr, [25, 75], axis=0)
plt.semilogy(np.nanmean(norm_spect_arr, axis=0),
label=shorten_layername(layer), linewidth=2, alpha=0.7)
# plt.fill_between(range(len(norm_spect_range[0])),
# norm_spect_range[0],
# norm_spect_range[1], alpha=0.2)
# plt.plot(np.log10(np.nanmedian(norm_spect_arr, axis=0)), label=layer)
# plt.semilogy(data["eigvals"], alpha=0.3, label=shorten_layername(layer))
plt.xlim([0, 300])
plt.ylim([1E-3, 5E-1])
plt.xlim([0, 2000])
plt.ylim([1E-7, 5E-1])
plt.legend()
plt.title(f"Network: {netname} \n Eigen Spectrum of Hessian matrix (Forward HVP)")
saveallforms(sumdir, f"{netname}_spectrum_cmp_ForwardHVP")
plt.show()
#%% Dev zone
#%% Experiment on the spatial scale of hessian
netname = "vgg16"
scorer = TorchScorer(netname, rawlayername=True)
scorer.set_unit("score", '.features.ReLU6', unit=(55, 56, 56,), ingraph=True) # .features.ReLU29
#%%
z, act, failed_try = grad_evol_unit(scorer, eps=0.5)
#%%
eigvals_dict = {}
eigvects_dict = {}
for EPS in [1E-1, 1E-2, 1E-3, 1E-4, 1E-5]:
activHVP = GANForwardHVPOperator(G, z[0].detach(),
lambda x: scorer.score_tsr_wgrad(x).mean(),
preprocess=lambda x: x, EPS=EPS,)
# activHVP.apply(1*torch.randn(4096).requires_grad_(False).cuda())
t0 = time()
try:
eigvals, eigvects = lanczos(activHVP, num_eigenthings=2000, use_gpu=True)
print("%.1e took %.3f sec"%(EPS, time() - t0)) # 146sec for 2000 eigens
eigvals_dict[EPS] = eigvals
eigvects_dict[EPS] = eigvects
except:
print("%.1e failed"%(EPS))
#%%
plt.figure(figsize=(5, 5))
for EPS, eigvals in eigvals_dict.items():
# if EPS != 1E-1:
# continue
plt.semilogy(np.sort(np.abs(eigvals), )[::-1], label=f"EPS={EPS:.1e}")
plt.semilogy(np.abs(eva[::-1])[:2000], label=f"GAN")
plt.legend()
plt.title(f"SVD of Hessian matrix (Forward HVP)\n spatial scale comparison")
saveallforms(sumdir, f"{netname}_spectrum_ForwardHVP_spatial_comparison.png")
plt.show()
#%% Distribution of activation at a certain distance
pert_actdict = perturb_activation(G, scorer, z, EPSs=[1, 0.5, 0.2, 1E-1, 1E-2,], sample_size=50)
#%%
EPS = 2E-1
dist = z.norm(dim=1).detach() * EPS
dz = torch.randn([100, 4096]).cuda()
dz = dz / dz.norm(dim=1, keepdim=True) * dist.unsqueeze(1)
acts = scorer.score_tsr_wgrad(G.visualize(z.detach() + dz)).cpu()
print(acts.mean(), acts.std(), acts.min(), acts.max(), acts)
#%%
EPS = 1E-4
dist = z.norm(dim=1).detach() * EPS
dz = torch.randn([50, 4096]).cuda()
dz = dz / dz.norm(dim=1, keepdim=True) * dist.unsqueeze(1)
dz.requires_grad_(True)
acts = scorer.score_tsr_wgrad(G.visualize(z.detach() + dz)).cpu()
acts.sum().backward()
#%%
dzgrad = dz.grad
dzgrad.unique(dim=0).shape |
#!/usr/bin/python
from flask import Flask, request
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def getSpecs():
return "Hello world"
if __name__ == "__main__":
app.run(debug=True,host="0.0.0.0")
|
import random
def coloda():
deck = []
for i in range(2, 11):
for e in ('C', 'P', 'B', 'X'):
deck.append(str(i)+e)
for i in ('J', 'Q', 'K', 'T'):
for e in ('C', 'P', 'B', 'X'):
deck.append(i+e)
return deck
def start(deck):
hand = []
for i in range(2):
hand.append(random.choice(deck)) #Вставка элемента random.choice(desk) в конец списка hand
deck.remove(hand[-1]) #Удаление последнего элемента из списка desk
return hand, deck
def nachalo():
deck = coloda()
hand = []
hand, deck = start(deck)
while True:
print_hand(hand)
print_score(hand)
vopros(hand, deck)
def give_new_kart(deck):
new_kart = random.choice(deck)
deck.remove(new_kart)
return new_kart, deck
def vopros(hand, deck):
a = input('Хотите взять еще одну карту? ("yes" or "no"): ')
if a.lower() == 'no':
exit()
elif a.lower() == 'yes':
f = give_new_kart(deck)
hand.append(f[0])
deck = f[1]
else:
print('Неверный ввод')
def print_hand(hand):
for kart in hand:
print(kart, end=' ')
print()
def print_score(hand):
costs = {
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'1': 10,
'J': 2,
'Q': 3,
'K': 4,
'T': 11
}
score = 0
for kart in hand:
score += costs[kart[0]]
print(score)
nachalo()
|
import numpy as np
from sklearn.datasets import make_circles, make_moons, make_blobs
import os
import matplotlib.pyplot as plt
import plotly.graph_objects as go
def generate_data_lin(N=1000):
x, y = make_blobs(N, centers=np.array(
[[-1, -1], [1, 1]]), n_features=2, cluster_std=0.2)
y[y == 0] = -1
return x, y
def generate_data_basis(P, noise, N):
if P == 'circle':
phi = np.random.randn(N) * 2 * np.pi
x1 = np.cos(phi)
x2 = np.sin(phi)
y = np.ones((N, 1))
y[((x1 < 0) & (x2 > 0)) | ((x1 > 0) & (x2 < 0))] = -1
x = np.stack((x1, x2), axis=1) + np.random.rand(N, 2) * noise
return x, y.flatten()
if P == 'inner_circle':
x, y = make_circles(N, factor=0.2, noise=noise)
y[y == 0] = -1
return x, y
def generate_data_moons(noise):
x_train, y_train = make_moons(500, noise=noise, random_state=75)
y_train[y_train == 0] = -1
x_test, y_test = make_moons(500, noise=noise, random_state=233)
y_test[y_test == 0] = -1
return x_train, y_train, x_test, y_test
def generate_data_non_lin(N=1000):
x = np.random.randn(N, 2)
w = np.array([-.00, 0.01, 0.1, -0.04, 0.09, 0.02])
features = np.hstack([np.ones([N, 1]), x, x**2, x[:, :1]*x[:, 1:2]])
f = np.dot(features, w)
labels = 2*((f + np.random.randn(N)*0.02) > 0) - 1
return x, labels
def maybe_makedirs(path_to_create):
"""This function will create a directory, unless it exists already,
at which point the function will return.
The exception handling is necessary as it prevents a race condition
from occurring.
Inputs:
path_to_create - A string path to a directory you'd like created.
"""
try:
os.makedirs(path_to_create)
except OSError:
if not os.path.isdir(path_to_create):
raise
def plot_data_decision_function(data, y_pred, clf, preprocessing=None):
x_train = data['x_train']
y_train = data['y_train']
x_pred = data['x_pred']
y_true = data['y_true']
plt.figure()
plt.subplot(1, 2, 1)
c = ['powderblue' if lb == 1 else 'indianred' for lb in y_train]
plt.scatter(x_train[:, 0], x_train[:, 1], c=c, alpha=0.5, s=50)
plt.title("Training Set")
plt.subplot(1, 2, 2)
c = ['powderblue' if lb == 1 else 'indianred' for lb in y_pred]
plt.scatter(x_pred[:, 0], x_pred[:, 1], c=c, s=50, alpha=0.5)
# misclassified data
d = y_pred - y_true
misclass_idx = np.where(d != 0)[0]
c = ['red' if lb == 2 else 'blue' for lb in d[misclass_idx]]
plt.scatter(x_pred[misclass_idx, 0],
x_pred[misclass_idx, 1], c=c, s=50, alpha=0.8)
accuracy = 100 * (1 - len(misclass_idx) / float(x_pred.shape[0]))
plt.title("Classification accuracy on test set: %.2f%%" % accuracy)
plt.legend(handles=[plt.scatter([], [], c='powderblue', s=50, alpha=0.5),
plt.scatter([], [], c='indianred', s=50, alpha=0.5),
plt.scatter([], [], c='red', s=50, alpha=0.8),
plt.scatter([], [], c='blue', s=50, alpha=0.8)],
labels=['Class A', 'Class B', 'Misclassified Class B', 'Misclassified Class A'])
plt.show()
fig = go.Figure()
# plot the decision function
xm = np.linspace(np.min(x_pred[:, 0]), np.max(x_pred[:, 0]), 20)
ym = np.linspace(np.min(x_pred[:, 1]), np.max(x_pred[:, 1]), 20)
xx, yy = np.meshgrid(xm, ym)
inpoints = np.c_[xx.ravel(), yy.ravel()]
if preprocessing:
inpoints = preprocessing(inpoints)
Z = clf.decision_function(inpoints)
# Computes the decision function that separates the 1 labels
# from the -1 labels
# ravel() returns a contiguous flattened array
# c_ concatenates arrays along the 2nd dimension
Z = Z.reshape(xx.shape)
fig.add_trace(go.Scatter(x=x_pred[:, 0], y=x_pred[:, 1],
mode='markers', showlegend=False,
marker=dict(color=y_pred, colorscale="RdBu", size=10)))
fig.add_trace(go.Contour(x=xm, y=ym, z=Z, colorscale='RdBu'))
fig.update_layout(title="Decision Function", width=600,
yaxis=dict(scaleanchor="x", scaleratio=1))
fig.show()
|
from pwn import *
import string
import sys
HOST = sys.argv[1]
io = remote(HOST, 6789)
io.sendline("mjtezmjtezmjtez")
print(io.recvuntil('That\'s it!',timeout=2).decode()) |
#!/usr/bin/env python
# coding: utf-8
# # Ejemplo
# In[14]:
#Se importan las librerias
import sys
import glob
import os
import datetime
import logging
import subprocess
#from importlib import reload
import re, csv
from io import StringIO
import shlex
import uuid
import pandas as pd
import shutil
# In[36]:
#Declarando las funciones
def func(x):
if x == '+10%':
return 0.1
elif x == '+6%':
return 0.06
elif x == '-10%':
return -0.1
elif x == '-6%':
return -0.06
return 0
# In[17]:
#Declarando las funciones
def print_with_logging(str, level):
print( str )
if level == 'error':
logging.error(str)
elif level == 'warning':
logging.warning(str)
elif level == 'info':
logging.info(str)
else:
logging.info(str)
# In[8]:
#Declarando las rutas
# path_data = argv[1]
path_data = r"C:/Users/jhchafloque/Documents/Capacitacion Python/Data"
path_bin = r"C:/Users/jhchafloque/Documents/Capacitacion Python/Bin"
file = 'BD_CONSUMO_FUEL.csv'
process_name = 'prueba'
# In[9]:
path_input = path_data + '/input/'
path_processed = path_data + '/processed/'
path_processing = path_data + '/processing/'
path_invalid = path_data + '/invalid/'
path_results = path_data + '/results/'
# In[10]:
load_date = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
# In[11]:
path_log = path_bin + '/log/'
file_log = process_name + '_' + load_date + '.log'
path_file_log = path_log + file_log
print(path_log)
print(file_log)
print(path_file_log)
# In[15]:
if not os.path.exists(path_log):
os.makedirs(path_log, 0o775) #Crea la carpeta y asigna los permisos
print_with_logging("Se creo el directorio: "+ path_log , 'info')
# In[16]:
#Se crea el log de ejecucion
#Resetear los handlers del logging
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=path_file_log, filemode="w", level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# In[19]:
print_with_logging('Se creo el log del proceso :'+process_name,'info')
print_with_logging('path_bin :'+path_bin,'info')
print_with_logging('path_data :'+path_data,'info')
print_with_logging('path_input :'+path_input,'info')
print_with_logging('path_processed :'+path_processed,'info')
print_with_logging('path_log :'+path_file_log,'info')
# In[20]:
file_to_read = path_input + file
print(file_to_read)
# In[22]:
try:
data = pd.read_csv(file_to_read)
print_with_logging('\nSe logro abrir el archivo' + file_to_read , 'info')
#data.head()
print_with_logging(data.head(),'info')
except Exception as e:
print_with_logging('Error al intentar leer el archivo','error')
print_with_logging('Moviendo el archivo a invalid','error')
shutil.move(file_to_read, path_invalid + file)
#Para que el programa acabe
#sys.exit(1)
# In[23]:
try:
with open(file_to_read, "r") as fichero:
for linea in fichero:
print (linea)
except:
print('No se pudo leer el archivo')
# In[24]:
print_with_logging('Moviendo el archivo a la ruta de processing ' + path_processing + file, 'info')
shutil.move(file_to_read, path_processing + file)
# In[30]:
data.head(3)
# In[26]:
#Viendo los tipos de datos que tiene el archivo
data.dtypes
# In[34]:
data.groupby('PENDIENTE_TOPO')['PENDIENTE_TOPO'].count()
# In[37]:
data['PENDIENTE_TOPO'].apply(func)
# In[38]:
data.head()
# In[39]:
data['PENDIENTE_TOPO_2'] = data['PENDIENTE_TOPO'].apply(func)
data.head()
# In[40]:
data.describe()
# In[ ]:
col = ['PENDIENTE_TOPO_2']
# In[41]:
data_final = data['PENDIENTE_TOPO_2']
# In[42]:
data_final.head()
# In[43]:
file_out= 'consumo_fuel_out'
file_results = path_results + file_out + '_' + load_date + '.csv'
print(file_results)
# In[45]:
data_final.to_csv(file_results,index=False,sep='|',header=True)
# In[46]:
print_with_logging('Moviendo el archivo de la ruta '+ path_processing + file + ' hacia la ruta ' + path_processed + file,'info')
#print(file_to_read)
shutil.move(path_processing + file, path_processed + file)
|
from pathlib import Path
import logging
import argparse
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from siamese.data.datamodule import OmniglotDataModule
from siamese.modules.model import siamese_net
from pytorch_lightning.callbacks import QuantizationAwareTraining
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./dataset/omniglot/Alphabet_of_the_Magi')
parser.add_argument('--batch_size', type=int, default=2)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--backbone_name', type=str, default='siamese')
parser.add_argument('--simmilar_data_multiplier', type=int, default=20)
parser = pl.Trainer.add_argparse_args(parser)
hparams = parser.parse_args()
dict_args = vars(hparams)
datamodule = OmniglotDataModule(**dict_args)
# datamodule.setup()
# print('validset lenght : ',len(datamodule.validset))
module = siamese_net(pretrained=True, encoder_digit=32, **dict_args)
print(module)
model_checkpoint = ModelCheckpoint(
dirpath='checkpoints/',
save_top_k=1,
filename="siamese-{val_step_loss:.4f}",
verbose=True,
monitor='val_step_loss',
mode='min',
)
trainer = pl.Trainer.from_argparse_args(hparams, callbacks=[QuantizationAwareTraining(observer_type='histogram', input_compatible=True), model_checkpoint])
# with mlflow.start_run() as run:
trainer.fit(module, datamodule)
trainer.save_checkpoint("checkpoints/latest.ckpt")
metrics = trainer.logged_metrics
vloss = metrics['val_step_loss']
filename = f'siamese-loss{vloss:.4f}.pth'
saved_filename = str(Path('weights').joinpath(filename))
logging.info(f"Prepare to save training results to path {saved_filename}")
torch.save(module.feature_extractor.state_dict(), saved_filename) |
# coding: utf-8
import pbr.version
__version__ = pbr.version.VersionInfo('dopplerr').release_string()
VERSION = __version__
LOGGER_NAME = "cfgtree"
__all__ = [
'__version__',
'VERSION',
]
|
# Copyright 2016 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import time
from tempest import config
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
import tempest.test
from kingbird.tests.tempest.scenario import consts
from kingbird.tests.tempest.scenario.quota_management \
import sync_client
CONF = config.CONF
GLOBAL_INSTANCE_LIMIT = 10
GLOBAL_NETWORK_LIMIT = 10
GLOBAL_VOLUME_LIMIT = 10
DEFAULT_QUOTAS = consts.DEFAULT_QUOTAS
# Time to wait for sync to finish
TIME_TO_SYNC = CONF.kingbird.TIME_TO_SYNC
class BaseKingbirdTest(api_version_utils.BaseMicroversionTest,
tempest.test.BaseTestCase):
"""Base test case class for all Kingbird API tests."""
@classmethod
def skip_checks(cls):
super(BaseKingbirdTest, cls).skip_checks()
def setUp(self):
super(BaseKingbirdTest, self).setUp()
@classmethod
def setup_credentials(cls):
super(BaseKingbirdTest, cls).setup_credentials()
session = sync_client.get_session()
cls.auth_token = session.get_token()
cls.keystone_client = sync_client.get_keystone_client(session)
cls.regions = sync_client.get_regions(cls.keystone_client)
@classmethod
def setup_clients(cls):
super(BaseKingbirdTest, cls).setup_clients()
@classmethod
def resource_setup(cls):
super(BaseKingbirdTest, cls).resource_setup()
cls.class_name = data_utils.rand_name('kb-class')
@classmethod
def create_resources(cls):
# Create Project, User, flavor, subnet & network for test
project_name = data_utils.rand_name('kb-project')
user_name = data_utils.rand_name('kb-user')
password = data_utils.rand_name('kb-password')
target_project_name = data_utils.rand_name('kb-target-project')
target_user_name = data_utils.rand_name('kb-target-user')
cls.openstack_details = sync_client.get_openstack_drivers(
cls.keystone_client, cls.regions[0], project_name, user_name,
password, target_project_name, target_user_name)
cls.openstack_drivers = cls.openstack_details['os_drivers']
cls.session = cls.openstack_details['session']
cls.token = cls.openstack_details['token']
cls.target_token = cls.openstack_details['target_token']
cls.resource_ids = sync_client.create_resources(cls.openstack_drivers)
cls.resource_ids.update(cls.openstack_details)
cls.resource_ids["server_ids"] = []
cls.session = cls.openstack_details['session']
@classmethod
def resource_cleanup(cls):
super(BaseKingbirdTest, cls).resource_cleanup()
@classmethod
def delete_resources(cls):
sync_client.resource_cleanup(cls.openstack_drivers, cls.resource_ids)
@classmethod
def create_custom_kingbird_quota(cls, project_id, new_quota_values):
new_values = sync_client.create_custom_kingbird_quota(
cls.openstack_drivers, project_id, new_quota_values)
return new_values
@classmethod
def get_kingbird_quota_another_tenant(cls, target_project_id):
new_values = sync_client.get_kingbird_quota_another_tenant(
cls.openstack_drivers, target_project_id)
return new_values
@classmethod
def get_own_kingbird_quota(cls, target_project_id):
return_quotas = sync_client.get_own_kingbird_quota(
cls.target_token, target_project_id)
return return_quotas
@classmethod
def delete_custom_kingbird_quota(cls, target_project_id):
sync_client.delete_custom_kingbird_quota(
cls.openstack_drivers, target_project_id)
@classmethod
def get_default_kingbird_quota(cls, project_id):
return_quotas = sync_client.get_default_kingbird_quota(
cls.target_token, project_id)
return return_quotas
@classmethod
def quota_sync_for_project(cls, project_id):
sync_status = sync_client.quota_sync_for_project(
cls.openstack_drivers, project_id)
return sync_status
@classmethod
def get_quota_usage_for_project(cls, project_id):
quota_usage = sync_client.get_quota_usage_for_project(
cls.openstack_drivers, project_id)
return quota_usage
@classmethod
def create_custom_kingbird_quota_wrong_token(cls, target_project_id,
new_quota_values):
new_values = sync_client.kingbird_create_quota_wrong_token(
cls.openstack_drivers, target_project_id, new_quota_values)
return new_values
@classmethod
def create_instance(cls, count=1):
try:
server_ids = sync_client.create_instance(cls.openstack_drivers,
cls.resource_ids, count)
except Exception as e:
server_ids = list(e.args)
raise
finally:
cls.resource_ids["server_ids"].extend(server_ids)
@classmethod
def delete_instance(cls):
sync_client.delete_instance(cls.openstack_drivers, cls.resource_ids)
@classmethod
def calculate_quota_limits(cls, project_id):
calculated_quota_limits = collections.defaultdict(dict)
resource_usage = sync_client.get_usage_from_os_client(
cls.session, cls.regions, project_id)
total_usages = cls.get_summation(resource_usage)
for current_region in cls.regions:
# Calculate new limit for instance count
global_remaining_limit = GLOBAL_INSTANCE_LIMIT - \
total_usages['instances']
instances_limit = global_remaining_limit + resource_usage[
current_region]['instances']
# Calculate new limit for network count
global_remaining_limit = GLOBAL_NETWORK_LIMIT - \
total_usages['network']
network_limit = global_remaining_limit + resource_usage[
current_region]['network']
# Calculate new limit for volume count
global_remaining_limit = GLOBAL_VOLUME_LIMIT - \
total_usages['volumes']
volume_limit = global_remaining_limit + resource_usage[
current_region]['volumes']
calculated_quota_limits.update(
{current_region: [instances_limit, network_limit,
volume_limit]})
return calculated_quota_limits
@classmethod
def get_summation(cls, regions_dict):
# Adds resources usages from different regions
single_region = {}
resultant_dict = collections.Counter()
for current_region in regions_dict:
single_region[current_region] = collections.Counter(
regions_dict[current_region])
resultant_dict += single_region[current_region]
return dict(resultant_dict)
@classmethod
def get_usage_manually(cls, project_id):
resource_usage = sync_client.get_usage_from_os_client(
cls.session, cls.regions, project_id)
resource_usage = cls.get_summation(resource_usage)
return {'quota_set': resource_usage}
@classmethod
def get_actual_limits(cls, project_id):
actual_limits = sync_client.get_actual_limits(
cls.session, cls.regions, project_id)
return actual_limits
@classmethod
def wait_sometime_for_sync(cls):
time.sleep(TIME_TO_SYNC)
@classmethod
def set_default_quota(cls, project_id, quota_to_set):
sync_client.set_default_quota(
cls.session, cls.regions, project_id, **quota_to_set)
@classmethod
def update_quota_for_class(cls, class_name, new_quota_values):
new_values = sync_client.update_quota_for_class(
cls.openstack_drivers, class_name, new_quota_values)
return new_values
@classmethod
def get_quota_for_class(cls, class_name):
return_quotas = sync_client.get_quota_for_class(
cls.openstack_drivers, class_name)
return return_quotas
@classmethod
def delete_quota_for_class(cls, class_name):
deleted_quotas = sync_client.delete_quota_for_class(
cls.openstack_drivers, class_name)
return deleted_quotas
|
# 유저(클라이언트)들이 웹페이지를 요청할때 데이터를 보내고 싶다
# Method 방식
# GET : 데이터를 헤더(header)에 싣고 전송
# POST : 데이터는 바디(body)에 싣고 전송
# 요청패킷 = 헤더 + 바디 로 구성된다
# GET방식은 보안에 취약하다
# POST방식은 보안에 좋다 ( GET <-> POST )
#
# 데이터 추출은 request 객체를 통해서!!
#
'''
GET의 예시 : 주소 + '?' + 데이터(키=값&키=값&...)
Ex)
https://news.naver.com/main/read.nhn?
oid=018&sid1=102&aid=0004285425&mid=shm&mode=LSD&nh=20190104133020
1. 주소 : https://news.naver.com/main/read.nhn
2. 물음표 : ? <- 기준이 물음표로 그 다음부터 키/값 순서로...
3. 데이터 : oid=018&sid1=102&aid=0004285425&mid=shm&mode=LSD&nh=20190104133020
'''
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def home():
return 'hello world'
# ~/test?name=multi
# 이렇게 요청하면 데이터를 받아서 출력하는 라우트 처리하는 함수를 구성하시오
@app.route('/test')
def test():
# GET 방식으로 데이터 획득하는 방법
# request.args.get('키')
name = request.args.get('name')
print(name)
return name
# http://127.0.0.1:5000/login?uid=abc&upw=1234
@app.route('/login')
def login():
uid = request.args.get('uid')
upw = request.args.get('upw')
# print(request.args)
return 'ID : %s, PW : %s' % (uid,upw)
#return uid,upw
if __name__ == '__main__':
app.run(debug=True) |
#!/usr/bin/env python
# encoding: utf-8
import re
import string
import random
import bcrypt
import hmac
from datetime import date, datetime
import time
import os
def get_model_table(str):
"""
获取模型类对应的表名,大驼峰字符串转换为小写加下划线格式, 如果最后一个为Model,则去掉
eg: UserInfo => user_info
:return: str
"""
_word_list = re.findall(r'[A-Z][a-z]+', str)
word_list = _word_list[:-1]
new_str = "_".join(word_list).lower()
return new_str
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""
生成随机字符串
:param size:
:param chars:
:return:
"""
return ''.join(random.choice(chars) for _ in range(size))
def encryp_password(password):
"""密码加密"""
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
return hashed
def verify_passwrd(password, hashed):
"""密码验证"""
# Validating a hash (don't use ==)
print 'password is',password
print 'hashed is',hashed
if (hmac.compare_digest(bcrypt.hashpw(password, hashed), hashed)):
# Login successful
return True
else:
return False
def trans_db_time(db_datetime, pattern='%Y-%m-%d %H:%M:%S'):
"""
datetime
:param db_datetime:
:param pattern:
:return:
"""
if not db_datetime:
return '较早时间'
return trans_db_datetime(db_datetime, pattern)
def trans_db_datetime(dbdatetime, pattern='%Y-%m-%d %H:%M:%S'):
if isinstance(dbdatetime, datetime):
datestr = dbdatetime.strftime(pattern)
else:
datestr = dbdatetime
return datestr
def get_datetime(timestamp=None):
if timestamp is None:
return time.strftime('%Y-%m-%d %H:%M',time.localtime(time.time()))
else:
return time.strftime('%Y-%m-%d %H:%M',time.localtime(timestamp))
def calculate_age(born):
"""
根据生日计算年龄
:param born: 出生日期
:return:
"""
if isinstance(born, int):
return born
if not isinstance(born, date):
born = datetime.strptime(born, "%Y-%m-%d").date()
today = date.today()
try:
birthday = born.replace(year=today.year)
except ValueError:
# raised when birth date is February 29
# and the current year is not a leap year
birthday = born.replace(year=today.year, day=born.day-1)
if birthday > today:
return today.year - born.year - 1
else:
return today.year - born.year
def calculate_born_year(age):
"""
根据年龄计算生日
:param age: 出生年龄
:return:
"""
today = date.today()
now_year = today.year
return now_year-int(age)
def member_info_finished_ratio(member_info):
"""计算用户信息完成度"""
score = 0
for key,val in member_info.items():
if isinstance(val, str) or isinstance(val, int):
if val:
# print '1:',key
score += 3
elif isinstance(val, list):
# print '2:',key
if len(val):
score += len(val) + 3
# else:
# print '3:',key
if score >= 100:
score = 100
return score
def generate_rand_code(length=6):
"""
随机产生一个手机校验码字符串,6位全数字
"""
rdcode = []
first = True
random.seed(str(time.time())+os.urandom(32))
while len(rdcode) < length:
if first is True:
rnum = str(random.randint(0, 9))
first = False
else:
rnum = random.choice(string.digits)
rdcode.append(rnum)
return ''.join(rdcode)
def get_time_stamp(timestr):
"""
就是time.time()得到的时间,
这个是从'%Y-%m-%d %H:%M:%S'格式转换为18289423.23这种格式,用于计算时间间隔
:param timestr:
:return:
"""
if timestr is None:
return 0
return time.mktime(time.strptime(str(timestr), '%Y-%m-%d %H:%M:%S')) |
from django.db import models
# Create your models here.
class UrlString(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
url_path = models.URLField(max_length=300, default="")
def __str__(self):
return self.url_path
def get_absolute_url(self):
return 'home'
|
from django.shortcuts import render, redirect, HttpResponse
from django.views import View
from system.models import System
from authen.models import Project, UserInfo
from audits.utils import yualert, SystemAudits
from authen.utils import FormAuthen
from system.utils import yuoem, yuversion, yusystemconfig
import json
class system(View):
def get(self, request):
if request.session.get('is_login', None):
loginuser = UserInfo.objects.filter(username=request.session['username']).first()
loginip = request.META['REMOTE_ADDR']
if loginuser.project.project == 'admin':
system = System.objects.filter(system='openeyes').first()
oem = yuoem.yuoem()
version = yuversion.yuversion()
yuopera = yualert.yuoperalog(loginuser.id)
alert = yuopera.alertGet()
# interfaces = getcentos7ip()
# interfaces_nunber = len(interfaces)
systeminfoconfig = SystemAudits.systemInfoConfig()
systemcanvas = systeminfoconfig.canvas_home()
return render(request, 'system.html',
{'loginuser': loginuser, 'systemcanvas': systemcanvas,
'oem': oem, 'version': version,
'system':system, 'alert': alert,
})
else:
return redirect('/authen/login/')
else:
return redirect('/authen/login/')
class systemconfig(View):
def post(self, request):
ret = {'status': True, 'error': None, 'data': None}
try:
loginuser = UserInfo.objects.filter(username=request.session['username']).first()
loginip = request.META['REMOTE_ADDR']
if loginuser.project.project == 'admin':
action = request.POST.get('action')
yuopera = yualert.yuoperalog(loginuser.id)
formau = FormAuthen.formauthen(ret)
sysconfig = yusystemconfig.yusystemconfig()
if action == 'systemname':
host = request.POST.get('host')
ret = formau.checkminlen(host, 2, '主机名至少2个字符')
ret = formau.checkmaxlen(host, 32, '主机名最多32个字符')
ret = formau.checkinput(host, '主机名只能为汉字、数字、字母或下划线')
if ret['status']:
sysconfig.editsystem(host)
yuopera.alertSet('success', '您好 {0}'.format(loginuser.username), '主机名修改成功')
elif action == 'theme':
theme = request.POST.get('theme')
sysconfig.systemtheme(theme)
elif action == 'reload':
sysconfig.systemreload()
elif action == 'shutdown':
sysconfig.systemshutdown()
except Exception as e:
ret['status'] = False
ret['error'] = 'Error : {0}'.format(e)
return HttpResponse(json.dumps(ret)) |
# -*- coding: utf-8 -*-
import numpy as np
# numpy 배열의 생성 함수
# numpy 모듈의 함수를 사용하여 배열을 생성할 수 있음
# 1. np.zeros(배열의형태) : 입력된 형태의 배열을
# 생성하면서 모든 요소의 값을 0으로 초기화
# - 아래의 코드는 3행 2열의 다차원 배열이 생성되고
# 모든 요소의 값은 0으로 채워짐
numpy_array_0 = np.zeros((3,2))
print("numpy_array_0 : \n{0}".format(numpy_array_0))
# 2. np.ones(배열의형태) : 입력된 형태의 배열을
# 생성하면서 모든 요소의 값을 1으로 초기화
# - 아래의 코드는 2행 2열의 다차원 배열이 생성되고
# 모든 요소의 값은 1으로 채워짐
numpy_array_1 = np.ones((2,2))
print("numpy_array_1 : \n{0}".format(numpy_array_1))
# 3. np.full(배열의형태, 값) : 입력된 형태의 배열을
# 생성하면서 모든 요소의 값을 지정한 값으로 초기화
# - 아래의 코드는 3행 3열의 다차원 배열이 생성되고
# 모든 요소의 값은 5으로 채워짐
numpy_array_2 = np.full((3,3), 5)
print("numpy_array_2 : \n{0}".format(numpy_array_2))
# 4. np.eye(배열의 행의크기) :
# - 2차원 배열을 생성하는 함수
# - 지정된 행의 크기를 갖는 배열을 생성
# - 대각선 방향으로 1의 값은 갖는 배열 생성(대각행렬)
# - 아래의 코드는 5행 5열의 다차원 배열이 생성되고
# 대각선 방향으로 1의 값이 채워짐
numpy_array_3 = np.eye(5)
print("numpy_array_3 : \n{0}".format(numpy_array_3))
|
# Progress Bar part 2
import tkinter
from tkinter import *
from tkinter.ttk import *
# functions
# def clicked():
# probar["value"] = 50
def clicked():
probar2.start(10)
def stop():
probar2.stop()
root = Tk()
root.title("Progress Bar")
root.geometry("500x300")
probar1 = Progressbar(root, length=200, orient=HORIZONTAL, maximum=100, value=10, mode="determinate")
probar1.pack()
probar2 = Progressbar(root, length=200, orient=HORIZONTAL, mode="indeterminate")
probar2.pack()
btn = tkinter.Button(root, text="Click Me", command=clicked)
btn.pack()
btn2 = tkinter.Button(root, text="Stop", command=stop)
btn2.pack()
root.mainloop() |
# Software Design Final Project
# Team Daydream
# V1.0 - ERIC + KIM (only OpenCV)
# Changelog
# - Added basic overlay functionality
import cv2
import numpy
import time
MAXFRAMES=60 # max number of frames (length of animation)
NUM_FRAMES=36 # number of animation frames
OVERLAYFRAMERATE = 24
fc_overlay = 0 # frame count of overlay video
video_overlay=[]
overlay_x = 0
overlay_y = 0
overlay_w = 639 #64
overlay_h = 479 #64
overlay_starttime = 0
overlay_playing = False
stableTime=50 #iterations before average is taken-- used for stabilization
orangeLower=numpy.array([5, 50, 150])
orangeUpper=numpy.array([100, 200, 255]) #represents upper and lower bounds of the color "orange"
orangeCount=0 #used for cueing which set of images to use
greenLower=numpy.array([0,87, 39])
greenUpper=numpy.array([198,187, 139])
greenCount=1
blueLower=numpy.array([107,45,9])
blueUpper=numpy.array([207,145,109])
blueCount=2
redLower=numpy.array([0,0,67])
redUpper=numpy.array([72,84,167])
redCount=3
blackUpper=numpy.array([50, 50, 50])
blackLower=numpy.array([0,0,0]) #black
blackCount=4
colors=([orangeLower,orangeUpper,orangeCount],[greenLower,greenUpper,greenCount], [blueLower,blueUpper,blueCount],[redLower,redUpper,redCount], [blackLower,blackUpper, blackCount])
def loadOverlayVideo(col):
"""
Load and generate overlay video, It's just for demo
"""
global video_overlay
video_overlay = []
if(col[2]==0):#orange is priority
mode = 1
for i in range(1,NUM_FRAMES+1):
# load 4-channel png image
video_overlay.append(cv2.imread('maybe/' +str(i) + '.png', cv2.IMREAD_UNCHANGED))
#return mode
elif(col[2]==1): #then green
mode = 1
for i in range(1,NUM_FRAMES+1):
# load 4-channel png image
video_overlay.append(cv2.imread('maybe/'+str(i) + '.png', cv2.IMREAD_UNCHANGED))
#return mode
elif(col[2]==2): #then blue
mode = 1
for i in range(1,NUM_FRAMES+1):
# load 4-channel png image
video_overlay.append(cv2.imread('maybe/'+str(i) + '.png', cv2.IMREAD_UNCHANGED))
#return mode
elif(col[2]==3): #then red
mode = 1
for i in range(1,NUM_FRAMES+1):
# load 4-channel png image
video_overlay.append(cv2.imread('maybe/'+str(i) + '.png', cv2.IMREAD_UNCHANGED))
#return mode
elif(col[2]==4): #finally black
mode = 2
for i in range(1,NUM_FRAMES+1):
# load 4-channel png image
video_overlay.append(cv2.imread('maybe/'+str(i) + '.png', cv2.IMREAD_UNCHANGED))
#return mode
def playoverlay(x, y):
global overlay_playing, overlay_starttime, overlay_x, overlay_y
overlay_playing = True
overlay_starttime = time.clock()
overlay_x = 400
overlay_y = 300
def overlayFrame(frame):
global overlay_playing
centre_x=0
centre_y=0 #centering the image
# get frame number of overlay frame
currTime = time.clock() - overlay_starttime
if currTime > MAXFRAMES / OVERLAYFRAMERATE:
overlay_playing = False
frame_no = int(currTime * OVERLAYFRAMERATE) % NUM_FRAMES
# composite overlay frame
for c in range(0,3):
alpha = video_overlay[frame_no][:,:,3] / 255.0
color = video_overlay[frame_no][:,:,c] * (alpha)
beta = frame[centre_y : centre_y + overlay_h, centre_x : centre_x + overlay_w, c] * (1.0 - alpha)
frame[centre_y : centre_y + overlay_h, centre_x : centre_x + overlay_w, c] = color + beta
def largeRectangle(frame,c): #finds the largest rectangle created in a frame by a black object, 1st input is image, 2nd input is color that is being searched for
mask=cv2.inRange(frame, c[0], c[1]) #creates mask of all the pixels
output=cv2.bitwise_and(frame,frame,mask=mask) #maps mask onto image
#getting rid of false negatives and other outliers/smoothing over larger objects
output = cv2.cvtColor(output, cv2.COLOR_BGR2HSV)
output = cv2.erode(output, None, iterations=2)
output = cv2.dilate(output, None, iterations=2)
#cv2.imshow('dilated', output)
#conversion to find contours
blurred = cv2.GaussianBlur(output, (7, 7), 0)
edged = cv2.Canny(blurred, 50, 150)
#imgray = cv2.cvtColor(edged, cv2.COLOR_BGR2GRAY)
#ret, thresh= cv2.threshold(imgray,127,255,0)
# find contours in the edge map
contours, hierarchy= cv2.findContours(edged,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#returning/drawing the biggest rectangle---- initial conditions unreasonable/will get reset
minx=800
maxx=0
miny=800
maxy=0
for cnt in contours:
x, y, w, h= cv2.boundingRect(cnt)
if minx>x:
minx=x
if maxx<x+w:
maxx=x+w
if miny>y:
miny=y
if maxy<y+h:
maxy=y+h
return minx, maxx, miny, maxy, output
cap = cv2.VideoCapture(0)
cv2.namedWindow('cameraview')
#in order to take the average of the largest rectangle formed by the black mask/object
count=0
minxsum=0
maxxsum=0
minysum=0
maxysum=0
while(True):
#initializing values
minxAverage=0
minyAverage=0
maxxAverage=0
maxyAverage=0
# Capture frame-by-frame
ret, frame = cap.read() #gets the frame
#orangeLower=numpy.array([5, 50, 150], dtype="uint8") #uint8 necessary for this kind of thing
#orangeUpper=numpy.array([100, 200, 255], dtype= "uint8") #represents upper and lower bounds of the color "orange"
for col in colors: #iterating through each color
minx, maxx, miny, maxy, output=largeRectangle(frame, col)
#if a contour of the right color is detected...
if(isinstance(minx, int) and maxx-minx>20):
loadOverlayVideo(col)
"""if mode_select == 2:
overlay_w = 40
overlay_h = 30
else:
overlay_w = 64
overlay_h = 64"""
if count<stableTime:
minxsum+=minx
minysum+=miny
maxxsum+=maxx
maxysum+=maxy
count+=1
elif count==stableTime:
#taking averages
minxAverage=minxsum/stableTime
minyAverage=minysum/stableTime
maxxAverage=maxxsum/stableTime
maxyAverage=maxysum/stableTime
#resetting values
minxsum=0
maxxsum=0
minysum=0
maxysum=0
count=0
#the actual rectangle calculation
cv2.rectangle(output, (minxAverage,minyAverage),(maxxAverage-minxAverage,maxyAverage-minyAverage),(0,255,0),2)
centre_x = (maxxAverage + minxAverage)/2.0
centre_y = (maxyAverage + minyAverage)/2.0
print [centre_x, centre_y]
playoverlay(centre_x - overlay_w / 2, centre_y - overlay_h / 2)
print col
if overlay_playing:
overlayFrame(frame)
break #so it doesn't cycle through all colors
#rect = cv2.minAreaRect(contours)
#box = cv2.boxPoints(rect)
#box = np.int0(box)
#cv2.drawContours(img,[box],0,(0,0,255),2)
#ret, frame = cap.read()
# Display the resulting frame
#if overlay_playing:
#overlayFrame(frame)
###Detection part
cv2.imshow('cameraview', frame)
# Display the resulting framegit pu
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
|
from networkx.algorithms.community import LFR_benchmark_graph
import networkx as nx
n = 1000
tau1 = 2.2
tau2 = 2.3
mu = 0.35
generated_sample_count = 0
num_of_samples = 5
while generated_sample_count < num_of_samples:
try:
#G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=5, min_community=20)
G = LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=1, min_community=20)
communities = {frozenset(G.nodes[v]['community']) for v in G}
generated_sample_count += 1
print(communities)
except nx.exception.ExceededMaxIterations:
#generated_sample_count -= 1
print("exception")
print(generated_sample_count)
|
import logging
import sys
from datetime import timedelta, datetime
from hashlib import sha1
from yaml import load, FullLoader
import lcg_generator.random as lcg
from constants import *
from query_writer.writer import sql_insert
# Init
_result_list = []
# Setup
logging.basicConfig(level=LOG_DEFAULT_LOGGING_LEVEL, filename=LOG_FILENAME, format=LOG_DEFAULT_FORMAT)
logger = logging.getLogger(APP_NAME)
logger.info(f"logger set up at {logger.name}, writing to {LOG_FILENAME}")
data_file = open(ORDER_GENERATOR_DATA_ABS_PATH)
generator_data_dict = load(data_file, Loader=FullLoader)
sql_dump = open(DUMP_FILENAME, 'w')
TOTAL_ORDERS = generator_data_dict[TOTAL_ORDERS_KEY]
INITIAL_ORDER_ID = str(generator_data_dict[INITIAL_ORDER_ID_KEY])
PROVIDER_ID_LIST = generator_data_dict[PROVIDER_ID_KEY]
DIRECTION_LIST = generator_data_dict[DIRECTION_KEY]
CURRENCY_PAIR_LIST = list(generator_data_dict[CURRENCY_PAIR_KEY].items())
TAGS_LIST = generator_data_dict[TAGS_KEY]
ZONES = generator_data_dict[ZONES_KEY]
# Incremental fields
def get_initial_order_id_as_decimal(initial_order_id_string: str):
try:
return int(initial_order_id_string, 16)
except ValueError:
raise ValueError(f"INITIAL_ORDER_ID value {initial_order_id_string} is not in hexadecimal format")
def order_id_incrementer(initial_order_id: int):
order_id = initial_order_id
yield order_id
while True:
order_id += lcg.randint(*ORDER_ID_INCREMENT_RANGE)
yield order_id
_order_id_sequence = iter(order_id_incrementer(get_initial_order_id_as_decimal(INITIAL_ORDER_ID)))
def random_provider_id(provider_id_list: list):
return lcg.choice(provider_id_list)
def random_direction(random_direction_list: list):
return lcg.choice(random_direction_list)
def random_currency_pair(currency_pairs_list: list):
currency_pair = lcg.choice(currency_pairs_list)
currency = currency_pair[CURRENCY_PAIR_NAME]
px_init = currency_pair[CURRENCY_PAIR_VALUE]
px = round(lcg.randomly_modify_value(px_init, *PX_DELTA_RANGE), PX_DEFAULT_ROUND)
return [currency, px]
def random_vol(vol_min: int, vol_max: int):
return lcg.randint(vol_min, vol_max)
def random_tags(tags_list: list):
return lcg.sample(tags_list, lcg.randint(*NUMBER_OF_TAGS_PER_ORDER))
def random_description():
pass
return None
def random_extra_data(value):
return sha1(bytes(value)).hexdigest()
# Zone specific) fields
def get_zone_total_orders(zone: dict, total_orders: int):
return int(total_orders * zone[ZONE_PERCENT_OF_TOTAL_ORDERS_KEY])
def get_zone_initial_date(zone: dict):
return datetime.strptime(zone[ZONE_INITIAL_DATE_KEY], DEFAULT_DATE_FORMAT)
def get_zone_end_date(zone: dict):
return datetime.strptime(zone[ZONE_END_DATE_KEY], DEFAULT_DATE_FORMAT)
def get_zone_time_step(zone: dict, total_orders: int):
return (get_zone_end_date(zone) - get_zone_initial_date(zone)) / get_zone_total_orders(zone, total_orders)
def date_incrementer(date: datetime, time_step: timedelta):
while True:
date += time_step + timedelta(microseconds=lcg.randint(*TIME_DELTA_BETWEEN_STATUS))
yield date
def random_possible_statuses(zone: dict):
return lcg.choice(zone[ZONE_POSSIBLE_STATUSES_KEY])
# Combined fields
def generate_order_static_section() -> list:
return [
random_provider_id(PROVIDER_ID_LIST),
random_direction(DIRECTION_LIST),
random_tags(TAGS_LIST),
random_description(),
random_extra_data(lcg.randint(*RANDOM_EXTRA_DATA_HASH_RANGE))
]
def generate_zone_specific_section(zone: dict, order_initial_date: datetime) -> list:
dynamic_fields = []
change_date = order_initial_date
currency_pair = random_currency_pair(CURRENCY_PAIR_LIST)
currency = currency_pair[CURRENCY_PAIR_NAME]
px = currency_pair[CURRENCY_PAIR_VALUE]
vol = random_vol(*RANDOM_VOL_RANGE)
for status in random_possible_statuses(zone):
if status != NEW_KEY:
change_date += timedelta(microseconds=lcg.randint(*TIME_DELTA))
if status == PARTIALLY_FILLED_KEY:
px = round(lcg.randomly_modify_value(px, *PX_DELTA_RANGE), PX_DEFAULT_ROUND)
if status == REJECTED_KEY:
px = 0
vol = 0
dynamic_fields.append([str(change_date), status, currency, px, round(vol * px, VOL_DEFAULT_ROUND)])
return dynamic_fields
def generate_orders_for_zone(zone: dict):
zone_orders = []
order_id = next(_order_id_sequence)
order_creation_date = get_zone_initial_date(zone)
date_sequence = iter(date_incrementer(get_zone_initial_date(zone), get_zone_time_step(zone, TOTAL_ORDERS)))
for _ in range(get_zone_total_orders(zone, TOTAL_ORDERS)):
order_static_section = generate_order_static_section()
for dynamic_field in generate_zone_specific_section(zone, order_creation_date):
zone_orders.append([
hex(order_id),
*order_static_section,
str(order_creation_date),
*dynamic_field
])
order_id = next(_order_id_sequence)
order_creation_date = next(date_sequence)
return zone_orders
def generate_orders_for_all_zones():
for zone in ZONES:
_result_list.append(generate_orders_for_zone(ZONES[zone]))
if __name__ == '__main__':
logger.info(f'{APP_NAME} started')
logger.info('generating orders')
try:
generate_orders_for_all_zones()
except Exception as e:
logger.error(e, exc_info=DEBUG_PRINT)
sys.exit(1)
else:
logger.info(f"succsessfully generated {len(_result_list)} zones")
logger.info('writing sql dump to %s' % sql_dump.name)
try:
for i in _result_list:
for j in i:
sql_insert(sql_dump, TABLE_NAME, COLUMNS, j)
except Exception as e:
logger.exception(e, exc_info=DEBUG_PRINT)
else:
logger.info(f"succsessfully wrote to {sql_dump.name}")
logger.info('Done')
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from seqmod.modules import attention as attn
from seqmod.modules.custom import StackedLSTM, StackedGRU, MaxOut
class Decoder(nn.Module):
"""
Attentional decoder for the EncoderDecoder architecture.
Parameters:
-----------
num_layers: tuple (enc_num_layers, dec_num_layers)
init_hidden: one of 'last', 'project', optional
Whether to use the last layer or an extra projection
of the last encoder hidden state to initialize decoder hidden state.
add_prev: bool, whether to append last hidden state.
"""
def __init__(self, emb_dim, hid_dim, num_layers, cell, att_dim,
att_type='Bahdanau', dropout=0.0, maxout=2,
add_prev=True, init_hidden='last'):
in_dim = emb_dim if not add_prev else hid_dim + emb_dim
if isinstance(num_layers, tuple):
enc_num_layers, dec_num_layers = num_layers
else:
enc_num_layers, dec_num_layers = num_layers, num_layers
self.num_layers = dec_num_layers
self.hid_dim = hid_dim
self.cell = cell
self.add_prev = add_prev
self.dropout = dropout
self.init_hidden = init_hidden
super(Decoder, self).__init__()
# rnn layers
stacked = StackedLSTM if cell == 'LSTM' else StackedGRU
self.rnn_step = stacked(
self.num_layers, in_dim, hid_dim, dropout=dropout)
# attention network
self.att_type = att_type
if att_type == 'Bahdanau':
self.attn = attn.BahdanauAttention(att_dim, hid_dim)
elif att_type == 'Global':
assert att_dim == hid_dim, \
"For global, Encoder, Decoder & Attention must have same size"
self.attn = attn.GlobalAttention(hid_dim)
else:
raise ValueError("unknown attention network [%s]" % att_type)
if self.init_hidden == 'project':
assert self.att_type != "Global", \
"GlobalAttention doesn't support projection"
# normally dec_hid_dim == enc_hid_dim, but if not we project
self.project_h = nn.Linear(hid_dim * enc_num_layers,
hid_dim * dec_num_layers)
if self.cell.startswith('LSTM'):
self.project_c = nn.Linear(hid_dim * enc_num_layers,
hid_dim * dec_num_layers)
# maxout
self.has_maxout = False
if bool(maxout):
self.has_maxout = True
self.maxout = MaxOut(att_dim + emb_dim, att_dim, maxout)
def init_hidden_for(self, enc_hidden):
"""
Creates a variable at decoding step 0 to be fed as init hidden step.
Returns (h_0, c_0):
--------
h_0: torch.Tensor (dec_num_layers x batch x hid_dim)
c_0: torch.Tensor (dec_num_layers x batch x hid_dim)
"""
if self.cell.startswith('LSTM'):
h_t, c_t = enc_hidden
else:
h_t = enc_hidden
enc_num_layers, bs, hid_dim = h_t.size()
if enc_num_layers == self.num_layers:
if self.cell.startswith('LSTM'):
dec_h0, dec_c0 = enc_hidden
else:
dec_h0 = enc_hidden
else:
if self.init_hidden == 'project':
# use a projection of last encoder hidden state
h_t = h_t.t().contiguous().view(-1, enc_num_layers * hid_dim)
dec_h0 = self.project_h(h_t)
dec_h0 = dec_h0.view(bs, self.num_layers, self.hid_dim).t()
if self.cell.startswith('LSTM'):
c_t = c_t.t().contiguous()\
.view(-1, enc_num_layers * hid_dim)
dec_c0 = self.project_c(c_t)
dec_c0 = dec_c0.view(bs, self.num_layers, self.hid_dim).t()
else:
# pick last layer of last hidden state
dec_h0 = h_t[-1, :, :].unsqueeze(0)
if self.cell.startswith('LSTM'):
dec_c0 = c_t[-1, :, :].unsqueeze(0)
if self.cell.startswith('LSTM'):
return dec_h0, dec_c0
else:
return dec_h0
def init_output_for(self, dec_hidden):
"""
Creates a variable to be concatenated with previous target
embedding as input for the first rnn step. This is used
for the first decoding step when using the add_prev flag.
Parameters:
-----------
hidden: tuple (h_0, c_0)
h_0: torch.Tensor (dec_num_layers x batch x hid_dim)
c_0: torch.Tensor (dec_num_layers x batch x hid_dim)
Returns:
--------
torch.Tensor (batch x hid_dim)
"""
if self.cell.startswith('LSTM'):
dec_hidden = dec_hidden[0]
data = dec_hidden.data.new(dec_hidden.size(1), self.hid_dim).zero_()
return Variable(data, requires_grad=False)
def forward(self, prev, hidden, enc_outs,
out=None, enc_att=None, mask=None):
"""
Parameters:
-----------
prev: torch.Tensor (batch x emb_dim),
Previously decoded output.
hidden: Used to seed the initial hidden state of the decoder.
h_t: (enc_num_layers x batch x hid_dim)
c_t: (enc_num_layers x batch x hid_dim)
enc_outs: torch.Tensor (seq_len x batch x enc_hid_dim),
Output of the encoder at the last layer for all encoding steps.
"""
if self.add_prev:
# include last out as input for the prediction of the next item
inp = torch.cat([prev, out or self.init_output_for(hidden)], 1)
else:
inp = prev
out, hidden = self.rnn_step(inp, hidden)
# out (batch x hid_dim), att_weight (batch x seq_len)
out, att_weight = self.attn(out, enc_outs, enc_att=enc_att, mask=mask)
out = F.dropout(out, p=self.dropout, training=self.training)
if self.has_maxout:
out = self.maxout(torch.cat([out, prev], 1))
return out, hidden, att_weight
|
import datetime as dt
total = 0
d = dt.date(1901,1,1)
delta = dt.timedelta(days=1)
while d <= dt.date(2000,12,31):
if d.weekday() == 6 and d.day == 1:
total += 1
d += delta
print total |
import sys
from sqlalchemy import Column, Integer, String, Float, DateTime
from sqlalchemy.ext.declarative import declarative_base
from setting import Base
from setting import ENGINE
class Mitemset(Base):
"""
ユーザモデル
"""
__tablename__ = 'mitemset'
setno = Column(Integer, primary_key=True)
traning1 = Column(Integer)
traning2 = Column(Integer)
traning3 = Column(Integer)
createdate = Column(DateTime)
updatedate = Column(DateTime)
def main(args):
"""
メイン関数
"""
Base.metadata.create_all(bind=ENGINE)
if __name__ == "__main__":
main(sys.argv) |
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 10000
#create a socket object
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind to address and ip
server.bind((bind_ip, bind_port))
#server listening to tcp connection
server.listen(5)
print("[*] Listening on %s: %d" % (bind_ip, bind_port))
#client-handling thread
def handle_client(client_socket):
#print out what client sends
request = client_socket.recv(1024)
print("[*] Received: %s" % request)
#send back a packet
client_socket.send("ACK!".encode(encoding='utf-8'))
client_socket.close()
while True:
client,addr = server.accept()
print("[*] Accepted connection from: %s:%d" % (addr[0],addr[1]))
#client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
request.decode()
#close server connection
server.close() |
#-*- coding: utf-8 -*-
def logging_sources(request):
login_ip = request.META.get('REMOTE_ADDR', '')
user = request.user
return login_ip, user |
import numpy as np
import cv2
start = [0,0]
end = [0,0]
cl1 = True
def theloop(event,x,y,flags,param):
global cl1, start, end
if event == cv2.EVENT_LBUTTONDOWN:
if cl1:
start = [x,y]
cl1 = False
print 'st : ', start
else:
end = [x,y]
cl1 = True
print 'end: ', end
minR = [255,255,255]
maxR = [ 0, 0, 0]
for i in range(start[0],end[0]):
for j in range(start[1],end[1]):
for c in range(0,3):
if hsv[j,i,c] > maxR[c]:
maxR[c] = hsv[j,i,c]
if hsv[j,i,c] < minR[c]:
minR[c] = hsv[j,i,c]
print minR, maxR
cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback('image',theloop)
image = cv2.imread('data/1.jpg')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
cv2.imshow('image',hsv)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
x=float(input("Enter the Base:"))
n=int(input("Enter the Exponent:"))
s=0
for a in range(n+1):
s=s+x**a
print("Sum of Series=",s)
|
# Copyright 2016 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from cliff import command
from cliff import lister
from cliff import show
import six
class CommandMeta(abc.ABCMeta):
def __new__(mcs, name, bases, cls_dict):
if 'log' not in cls_dict:
cls_dict['log'] = logging.getLogger(
cls_dict['__module__'] + '.' + name)
return super(CommandMeta, mcs).__new__(mcs, name, bases, cls_dict)
@six.add_metaclass(CommandMeta)
class Command(command.Command):
def run(self, parsed_args):
self.log.debug('run(%s)', parsed_args)
return super(Command, self).run(parsed_args)
class Lister(Command, lister.Lister):
pass
class ShowOne(Command, show.ShowOne):
pass
|
from PySide.QtCore import*
from PySide.QtGui import*
import sys
import updateBox
class upDialog(QDialog, updateBox.Ui_Dialog):
def __init__ (self, parent = None):
super(upDialog, self).__init__(parent)
self.setupUi(self)
|
# -*- coding: utf-8 -*-
import os
import sys
# class that will contain the data entries for organized funtioning
class detailInfo:
def __init__(self, date , maxtemp, mintemp, maxhumid, minhumid):
self.date= date
self.maxTemp= maxtemp
self.minTemp = mintemp
self.maxHumid= maxhumid
self.minHumid= minhumid
# contains the logic if user selects option one
#that is generation of Annual report
def option1 ():
print("Year MaxTemp MinTemp MaxHumid MinHumid")
print("-----------------------------------------------------")
# going through every year
for year in yearlist:
#getting list againt year from dictionary
list_cur= my_dict[year]
#Sorting list on Max Temp
list_cur.sort( key=lambda x: x.maxTemp, reverse=True)
#this loop will find the true value because there are some dummy values
#this wont be inefficient because value will be found in less than 10 tries
pos=0
while True:
if (list_cur[pos].maxTemp == "Max TemperatureC") or (list_cur[pos].maxTemp == ""):
pos = pos +1
else:
maxT= list_cur[pos].maxTemp
break
#Sorting list on Min Temp
list_cur.sort( key=lambda x: x.minTemp, reverse=False)
pos=0
#same upper Loop
while True:
if (list_cur[pos].minTemp == "Min TemperatureC") or (list_cur[pos].minTemp == ""):
pos = pos +1
else:
minT= list_cur[pos].minTemp
break
#Sorting list on Min humidity
list_cur.sort( key=lambda x: x.minHumid, reverse=False)
pos=0
#same loop
while True:
if (list_cur[pos].minHumid == " Min Humidity") or (list_cur[pos].minHumid == ""):
pos = pos +1
else:
minH= list_cur[pos].minHumid
break
#Sorting list on Max humidity
list_cur.sort( key=lambda x: x.maxHumid, reverse=True)
pos=0
#same loop
while True:
if (list_cur[pos].maxHumid == "Max Humidity") or (list_cur[pos].maxHumid == ""):
pos = pos +1
else:
maxH= list_cur[pos].maxHumid
break
print(year +" "+ maxT +" "+ minT +" "+maxH +" "+minH)
# contains the logic if user selects option two
#that is generation of Yearly Max temp report
def option2():
print("Year Date Max temp")
print("-------------------------------")
# going through every year
for year in yearlist:
#getting list againt year from dictionary
list_cur= my_dict[year]
#Sorting list on Max Temp
list_cur.sort( key=lambda x: x.maxTemp, reverse=True)
#this loop will find the true value because there are some dummy values
#this wont be inefficient because value will be found in less than 10 tries
pos=0
while True:
if (list_cur[pos].maxTemp == "Max TemperatureC") or (list_cur[pos].maxTemp == ""):
pos = pos +1
else:
print(year +" "+ list_cur[pos].date +" "+ list_cur[pos].maxTemp )
break
# contains the logic if user selects option two
#that is generation of Yearly Min temp report
def option3():
print("Year Date Min Temp")
print("-----------------------------------")
# going through every year
for year in yearlist:
#getting list againt year from dictionary
list_cur= my_dict[year]
#Sorting list on Max Temp
list_cur.sort( key=lambda x: x.minTemp, reverse=False)
#this loop will find the true value because there are some dummy values
#this wont be inefficient because value will be found in less than 10 tries
pos=0
while True:
if (list_cur[pos].minTemp == "Min TemperatureC") or (list_cur[pos].minTemp == ""):
pos = pos +1
else:
minT= list_cur[pos].minTemp
print(year +" "+ list_cur[pos].date +" "+ list_cur[pos].minTemp )
break
# instructions for running the code file
def instructions():
print " "
print ("Usage: weatherApp <report#> <data_dir>")
print " "
print ("[Report #]")
print ("1 for Annual Max/Min Temperature")
print "2 for Hottest day of each year"
print "3 for coldest day of each year"
print " "
print "[data_dir]"
print "Path of Directory containing weather data files"
print " "
# dictionary data struct for having list of data against year
# i-e [key:value]-> [year: listofusrecords]
my_dict={}
# simple list of years to avoid hard coding of years
yearlist=[]
#argumnet list check
if len(sys.argv)!= 3 :
instructions();
sys.exit()
try:
option=int(sys.argv[1])
except:
instructions();
sys.exit()
path=sys.argv[2]
#path = '/home/mateenahmeed/Downloads/weatherdata'
# checking of Directory path exists or not
if not os.path.exists(path):
print("Directory path not found : " + path)
instructions();
sys.exit();
#iterating thorugh every file
for filename in os.listdir(path):
#splitting the filename to get the year
tokens = filename.split("_")
year= tokens[2]
#test list of records for local calculation
list1=[]
# if entry exists against that year in dictionary
# retrieve the list
if year in my_dict:
list1=my_dict[year]
#otherwise add new list to dictionary
else:
my_dict[year]=list1
yearlist.append(year)
#opening file
file=open(path+"/"+filename, 'r')
#going through line by line
for line in file:
#splitting data on teh basis of comma
details = line.split(",")
#length check is to avoid last lines that are extra in each file
if len(details) > 1:
date= details[0];
maxT= details[1]
#zero is padded to single digits to get correct sorting results
# as everything is in string format
if len(maxT) == 1:
maxT="0"+maxT
minT= details[3]
if len(minT) == 1:
minT="0"+minT
maxH= details[7]
if len(maxH) == 1:
maxH="0"+maxT
minH= details[9]
if len(minH) == 1:
minH="0"+minH
#Adding each recorf to list
list1.append( detailInfo(date,maxT,minT,maxH,minH))
#list1.append( detailInfo(details[0],details[1],details[3],details[7],details[9]))
file.close()
# updating the dictionary with new updated list of records
my_dict[year]=list1
#sorting year records to get organized results
yearlist.sort()
#function calls against user choice
if option==1:
option1();#anual report
if option==2:
option2();#max temp report
if option==3:
option3();#min temp report
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
return any(target in a for a in matrix)
if __name__ == '__main__':
matrix = [
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
target=200
print(Solution().searchMatrix(matrix,target))
|
import pathlib
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from model import MyModel
test_path = pathlib.Path('data/test')
CLASSES = np.array([dire.name for dire in test_path.iterdir()])
BATCH_SIZE = 64
IMAGE_HEIGHT = 112
IMAGE_WIDTH = 112
def get_label(file_path):
parts = tf.strings.split(file_path, '/')
label = tf.where(CLASSES == parts[-2])[0][0]
return label
def decode_img(img):
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
return tf.image.resize(img, [IMAGE_WIDTH, IMAGE_HEIGHT])
def process_path(file_path):
label = get_label(file_path)
img = tf.io.read_file(file_path)
img = decode_img(img)
img = tf.expand_dims(img, 0)
return img, label
test_list_ds = tf.data.Dataset.list_files(str(test_path / '*/*'))
test_labeled_ds = test_list_ds.map(
process_path,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
model = MyModel()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint, directory="checkpoints", max_to_keep=5
)
checkpoint.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
test_accuracy = tf.keras.metrics.BinaryAccuracy()
for image, label in test_labeled_ds:
prediction = model(image, training=False)
test_accuracy(label, prediction)
print("Accuracy: {}".format(test_accuracy.result()*100))
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Basic Image Process."""
from absl import logging
import numpy as np
import tensorflow as tf
from deep_representation_one_class.data.augment import apply_augment
from deep_representation_one_class.data.augment import compose_augment_seq
class BasicImageProcess():
"""Basic Image Process."""
def __init__(self, data, input_shape=(256, 256, 3)):
self.input_shape = input_shape
self.data = data
def image_normalize(self, image, do_mean=True, do_std=True):
channel_means = [0.485, 0.456, 0.406]
channel_stds = [0.229, 0.224, 0.225]
if do_mean:
means = tf.broadcast_to(channel_means, tf.shape(image))
image = image - means
if do_std:
stds = tf.broadcast_to(channel_stds, tf.shape(image))
image = image / stds
return image
def preprocess_image(self, image, dtype=tf.float32, aug_ops_list=None):
"""Preprocess images."""
image = tf.cast(image, dtype) / 255.0
image = tf.reshape(
image, shape=tf.stack(self.input_shape)) # may become problematic
images = apply_augment(image, ops_list=aug_ops_list)
return images
def parse_record_fn(self, raw_record, is_training, dtype, aug_list=None):
"""Parse record function."""
# create augmentation list
aug_ops_list = [
compose_augment_seq(aug_type, is_training=is_training)
for aug_type in aug_list
]
# do preprocessing
image, label, image_id = raw_record
images = self.preprocess_image(
image, dtype=dtype, aug_ops_list=aug_ops_list)
label = tf.cast(tf.reshape(label, shape=[1]), dtype=tf.float32)
return images + (label, image_id)
def process_record_dataset(self,
dataset,
aug_list,
is_training,
batch_size,
shuffle_buffer,
num_batch_per_epoch=1,
dtype=tf.float32,
datasets_num_private_threads=None,
force_augment=False,
drop_remainder=False):
"""Process record dataset."""
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = (
datasets_num_private_threads)
dataset = dataset.with_options(options)
logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
if is_training:
# multiplier if original dataset is too small
num_data = len([1 for _ in dataset.enumerate()])
multiplier = np.maximum(1, np.int(np.ceil(batch_size / num_data)))
if multiplier > 1:
dataset = dataset.repeat(multiplier)
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(
buffer_size=shuffle_buffer, reshuffle_each_iteration=True)
# Parses the raw records into images and labels.
dataset = dataset.map(
lambda *args: self.parse_record_fn(
args,
is_training=is_training or force_augment,
dtype=dtype,
aug_list=aug_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
if not is_training:
num_batch_per_epoch = len([1 for _ in dataset.enumerate()])
else:
if num_batch_per_epoch <= 0:
num_batch_per_epoch = len([1 for _ in dataset.enumerate()])
dataset = dataset.repeat()
# Prefetch.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return [dataset, num_batch_per_epoch]
def input_fn(self,
is_training,
batch_size,
aug_list=None,
num_batch_per_epoch=1,
dtype=tf.float32,
datasets_num_private_threads=None,
input_context=None,
force_augment=False,
training_dataset_cache=False):
"""Creates an input function from the dataset."""
dataset = self.make_dataset(
is_training=is_training, input_context=input_context)
if is_training and training_dataset_cache:
# Improve training performance when training data is in remote storage and
# can fit into worker memory.
dataset = dataset.cache()
# Aug_list should be a list of list of tuples
if not isinstance(aug_list, list):
raise TypeError('augmentation list should be a list')
if isinstance(aug_list, list):
if not isinstance(aug_list[0], list):
aug_list = [aug_list]
return self.process_record_dataset(
dataset=dataset,
aug_list=aug_list,
is_training=is_training,
batch_size=batch_size,
shuffle_buffer=1000,
num_batch_per_epoch=num_batch_per_epoch,
dtype=dtype,
datasets_num_private_threads=datasets_num_private_threads,
force_augment=force_augment,
drop_remainder=True if is_training else False)
def make_dataset(self, is_training, input_context=None):
"""Makes a dataset."""
dataset = tf.data.Dataset.from_tensor_slices(self.data)
if input_context:
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
input_context.input_pipeline_id, input_context.num_input_pipelines)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if is_training:
# Shuffle the input files
dataset = dataset.shuffle(buffer_size=len(self.data[0]))
return dataset
|
import os
import importlib
dataset = os.environ.get('DATASET', 'ibm')
load_dataset = (importlib
.import_module('explainer.data.' + dataset)
.load_dataset)
|
from selenium import webdriver
import time
chrome_browser = webdriver.Chrome(executable_path='C:/chromedriver')
chrome_browser.get('https://web.whatsapp.com/')
time.sleep(15)
user_name = 'Whatsapp bot'
user = chrome_browser.find_element_by_xpath('//span[@title="{}"]'.format(user_name))
user.click()
message_box = chrome_browser.find_element_by_xpath('//div[@class"DuUXI"]')
message_box.send_keys(Hey there,I am your Whatsapp bot)
message_box = chrome_browser.find_element_by_xpath('//button[@class"_3M-N-"]')
message_box.click() |
#!/usr/bin/env.python
# -*- coding:utf-8 -*-
a, b, c = map(int, input().split())
d = {10:'A',11:'B',12:'C'}
def cal(x):
l = ''
while x > 0:
x,j = divmod(x,13)
if j>9:
j = d[j]
l = l+str(j)
return l[::-1]
a = cal(a)
b = cal(b)
c = cal(c)
print('#{:0>2}{:0>2}{:0>2}'.format(a,b,c))
|
"""
import modulo
modulo.fun()
modulo.fun2()
import modulo as mod
mod.fun()
mod.fun2()
"""
"""
from modulo import fun, fun2
fun()
fun2()
"""
from modulo import *
fun()
fun2() |
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
from shutil import copyfile
from ....tokens.fast import PreTrainedTokenizerFast
from ..pegasus import Tokenizer as Pegasus
SPIECE_UNDERLINE = "▁"
VOCAB_FS = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
VOCAB_MAP = {
"vocab_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"
},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
INPUT_CAPS = {
"google/pegasus-xsum": 512,
}
class Tokenizer(PreTrainedTokenizerFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
slow_tokenizer_class = Pegasus
model_input_names = ["input_ids", "mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
pad="<pad>",
eos="</s>",
unk="<unk>",
msk="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103,
**kw,
):
self.offset = offset
if additional_special_tokens is not None:
assert isinstance(additional_special_tokens, list)
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
additional_special_tokens_extended += [
f"<unk_{i}>"
for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(
additional_special_tokens_extended
):
raise ValueError(
f"Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
pad=pad,
eos=eos,
unk=unk,
msk=msk,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
**kw,
)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
assert all_special_ids == set(range(len(self.additional_special_tokens) + 3))
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self,
toks_0,
toks_1=None,
has_specials=False,
):
if has_specials:
return self._special_token_mask(toks_0)
elif toks_1 is None:
return self._special_token_mask(toks_0) + [1]
else:
return self._special_token_mask(toks_0 + toks_1) + [1]
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return toks_0 + [self.EOS]
return toks_0 + toks_1 + [self.EOS]
def save_vocabulary(self, dir, pre=None):
assert self.can_save_slow_tokenizer
path = os.path.join(dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(path):
copyfile(self.vocab_file, path)
return (path,)
|
#!/usr/bin/env python
import sys
import numpy
import catmaid
import vispy
import vispy.scene
import vispy.app
def renderNeurons(neurons, colors='random', w=2, aa=False):
print("Rendering...")
canvas = vispy.scene.SceneCanvas(
keys='interactive', show=True,
title='skeletons',
px_scale=1)
view = canvas.central_widget.add_view()
view.camera = 'fly'
view.camera.aspect = 1
for n in neurons:
if colors == 'random':
RGB = numpy.random.rand(3)
c = RGB[0], RGB[1], RGB[2], 1.
else:
c = 'green'
ns, es = catmaid.algorithms.morphology.node_edge_array(n)
if len(ns) == 0 or len(es) == 0:
print("Skipping %s either 0 nodes or edges" % (n.skeleton_id, ))
continue
print("Adding: %s [%i, %i]" % (n.skeleton_id, len(ns), len(es)))
pre_lines = vispy.scene.Line(
pos=ns, connect=es,
antialias=aa, width=w,
method='gl', color=c, parent=view.scene)
print("Showing...")
view.camera.set_range()
vispy.app.run()
if __name__ == "__main__":
skel_fns = sys.argv[1:]
if len(skel_fns) == 0:
raise Exception("Must provide skeleton filenames")
print("Loading skeletons: %s" % (skel_fns, ))
neurons = [catmaid.Neuron(catmaid.neuron.load_skeleton(fn))
for fn in skel_fns]
renderNeurons(neurons)
|
from django.contrib import admin
from .models import Service
# Register your models here.
class ServiceAdmin(admin.ModelAdmin):
readonly_fields=('created_at','updated_at')
admin.site.register(Service,ServiceAdmin) |
#!/usr/local/python
# -*- coding:utf-8 -*-
import redis
r = redis.Redis(host='localhost', port=6379)
r.rpush('list', 'item1')
r.rpush('list', 'item2')
r.rpush('list2', 'item3')
# 将item3从list2弹出并推入list左端
print r.brpoplpush('list2', 'list', 1)
print r.brpoplpush('list2', 'list', 1)
print r.lrange('list', 0, -1)
# 将item2从list弹出并推入list2左端
print r.brpoplpush('list', 'list2', 1)
# 按列表顺序,最先遇到的非空列表执行弹出操作
print r.blpop(['list', 'list2'], 1)
print r.blpop(['list', 'list2'], 1)
print r.blpop(['list', 'list2'], 1)
print r.blpop(['list', 'list2'], 1)
|
import random, datetime
from django.shortcuts import (
render, reverse, HttpResponse, redirect
)
from django.contrib import auth
from django.http import JsonResponse
from django.views import View
from django.db.models import Q, Count
from django.forms import modelformset_factory
from PIL import (
Image, ImageDraw, ImageFont
)
from io import BytesIO
from Crm.models import (
Customer, ConsultRecord, UserInfo, ClassStudyRecord, StudentStudyRecord
)
from Crm.utils import myfunction
from Crm.utils.mypagination import MyPagination
from Crm.crm_form import (
UserRegModelForm, CustomerModelForm, ConsultRecordModelForm, UserReg, ClassStudyRecordModelForm, StudentStudyRecordModelFormSet
)
from rbac.services.initial_permission import initial_session
from rbac.models import User
class LoginView(View):
"""登录"""
def get(self, request):
return render(request, 'login.html')
def post(self, request):
next_url = request.GET.get('next', '/index/')
res = {'code': '', 'user': '', 'error_msg': '', 'next_url': next_url}
username = request.POST.get('username')
password = request.POST.get('password')
valid_code = request.POST.get('validcode')
check_code = request.session.get('check_code')
if valid_code.upper() == check_code.upper():
# 验证码输入正确再去判断用户名和密码,运用了django提供的auth组件
user_obj = auth.authenticate(username=username, password=password)
if user_obj:
res['code'] = 1000
res['user_info'] = username
# 保存登录状态,实际上就是保存了session_id,源码主要代码request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
auth.login(request, user_obj)
# 获取rbac中的user对象,这里是因为嵌入了rbac,所以CRM用户表和rbac用户表做了1对1关联,因为权限认证要用rbac的用户表
n_user = user_obj.user
# 初始化用户,也就是读取用户的权限
initial_session(n_user, request)
else:
res['code'] = 1001
res['error_msg'] = '用户名或密码错误!'
else:
res['code'] = 1002
res['error_msg'] = '验证码错误!'
# 以json格式返回,实际上就是响应头说明返回是json数据,和将字典序列化了(dumps)
return JsonResponse(res)
def register(request):
"""基于form组件的注册页面"""
if request.method == "POST":
res = {'code':'','error_msg':''}
username = request.POST.get('username')
password = request.POST.get('password')
email = request.POST.get('email')
telphone = request.POST.get('telphone')
user_form = UserReg(request.POST)
if user_form.is_valid():
res['code'] = 2000
# 注册时在权限用户表和crm用户表都创建相同用户,初始化给与访客的权限
user = User.objects.create(name=username,pwd=password)
user.roles.add(4)
UserInfo.objects.create_user(username=username,password=password,email=email,telphone=telphone, user=user)
else:
res['code'] = 2001
res['error_msg'] = user_form.errors # 把不符合的错误全部返回
return JsonResponse(res)
user_form = UserReg()
return render(request,'register.html',{'user_form':user_form})
# def reg_modelform(request):
# """modelform构建的注册页面"""
# if request.method == "POST":
# user_modelform = UserRegModelForm(request.POST)
# if user_modelform.is_valid():
# # modelform提供save方法可直接保存ok的数据
# user_modelform.save()
# return redirect(reverse('login'))
# return render(request, 'reg_modelform.html', {'user_modelform': user_modelform})
# user_modelform = UserRegModelForm()
# return render(request, 'reg_modelform.html', {'user_modelform': user_modelform})
def get_vaildcode_img(request):
"""生成验证码"""
img = Image.new('RGB', (180, 38), myfunction.get_random_color()) # 生成随机底色
draw = ImageDraw.Draw(img) # 以img进行画画
font = ImageFont.truetype("static/font/gordon.ttf", 35) # 设置字体
check_code = ""
# 获取四个随机字符
for i in range(4):
random_num = str(random.randint(0, 9))
random_lowstr = chr(random.randint(ord('a'), ord('z')))
random_upperstr = chr(random.randint(ord('A'), ord('Z')))
random_char = random.choice([random_num, random_lowstr, random_upperstr])
draw.text((20+i*30+10, 0), random_char, myfunction.get_random_color(), font=font) # 在img上写字
check_code += random_char
print(check_code)
# 将用户个人的验证码保存到session中
request.session['check_code'] = check_code
# 加噪点和线
# width = 180
# height = 38
# # 加10条线
# for i in range(10):
# x1 = random.randint(0, width)
# x2 = random.randint(0, width)
# y1 = random.randint(0, height)
# y2 = random.randint(0, height)
# draw.line((x1,y1,x2,y2), fill=myfunction.get_random_color())
#
# # 加10个点
# for i in range(10):
# draw.point([random.randint(0, width), random.randint(0, height)], fill=myfunction.get_random_color())
# x = random.randint(0, width)
# y = random.randint(0, height)
# draw.arc((x, y ,x+4, y+4), 0 , 90, fill=myfunction.get_random_color())
# 将图片保存到内存
f = BytesIO()
img.save(f, "png")
data = f.getvalue() # 从内存中读取
return HttpResponse(data)
def logout(request):
"""注销"""
auth.logout(request)
return redirect(reverse('login'))
def index(request):
"""crm首页"""
return render(request, 'crm/index.html')
class CustomersList(View):
"""客户列表"""
def get(self, request):
# 通过反向解析获取的路径对比当前请求路径,返回查询不同的数据
if reverse('allcustomers_list') == request.path:
customer_list = Customer.objects.all()
elif reverse('customers_list') == request.path:
customer_list = Customer.objects.filter(consultant__isnull=True)
else:
customer_list = Customer.objects.filter(consultant=request.user)
# 搜索的字段和内容
search_field = request.GET.get('field_select')
search_content = request.GET.get('table_search')
if search_content:
# Q的扩展使用
q = Q()
if search_field == 'consultant':
q.children.append((search_field + "__username__icontains", search_content))
else:
q.children.append((search_field + "__icontains", search_content))
customer_list = customer_list.filter(q)
# 分页的使用
current_page_num = request.GET.get('page')
pagination = MyPagination(current_page_num, customer_list.count(), request)
customer_list = customer_list[pagination.start:pagination.end]
# 返回当前path,记录当前的path,新增,编辑后返回原来页面
path = request.path
next = "?next=%s" % path
return render(request, "crm/customer_manager/customer_list.html",
{'next': next, 'customer_list': customer_list, 'pagination': pagination,
'search_field': search_field, 'search_content': search_content})
def post(self, request):
select_action = request.POST.get('select_action')
selected_pk_list = request.POST.getlist('selected_pk_list')
if hasattr(self, select_action):
# 通过反射实现
func = getattr(self, select_action)
queryset = Customer.objects.filter(pk__in=selected_pk_list)
func(request, queryset)
return self.get(request)
def delete_selected(self, request, queryset):
"""删除选中的数据"""
queryset.delete()
def public_to_private(self, request, queryset):
"""公户转私户"""
if queryset.filter(consultant__isnull=True):
queryset.update(consultant=request.user)
def private_to_public(self, request, queryset):
"""私户转公户"""
queryset.update(consultant=None)
class CustomerOperate(View):
"""客户信息操作(新增和编辑)"""
def get(self, request, edit_id=None):
customer_obj = Customer.objects.filter(pk=edit_id).first()
# 注意,虽然新增没有edit_id,但是编辑有,注意modelform有instance=customer_obj
customer_form = CustomerModelForm(instance=customer_obj)
return render(request, "crm/customer_manager/customer_operate.html", {'customer_form':customer_form, 'customer_obj':customer_obj})
def post(self, request, edit_id=None):
customer_obj = Customer.objects.filter(pk=edit_id).first()
# 如果不写instance=customer_obj,那么又是新增一条记录了
customer_form = CustomerModelForm(request.POST, instance=customer_obj)
if customer_form.is_valid():
customer_form.save()
# 跳转回编辑或添加前的页面
return redirect(request.GET.get('next'))
else:
return render(request, "crm/customer_manager/customer_operate.html", {'customer_form':customer_form, 'customer_obj':customer_obj})
class CustomerDelete(View):
"""客户删除"""
def get(self, request, delete_id):
Customer.objects.filter(pk=delete_id).delete()
# 跳转回删除前的页面
return redirect(request.GET.get('next'))
class ConsultRecordList(View):
"""跟进记录列表"""
def get(self, request):
# 如果指定客户跟进记录则需要获取具体客户的跟进记录数据
customer_id = request.GET.get('customer_id')
# 获取当前用户的跟进记录数据
consult_record_list = ConsultRecord.objects.filter(consultant=request.user)
if customer_id:
consult_record_list = consult_record_list.filter(customer=customer_id)
# 搜索的字段和内容
search_field = request.GET.get('field_select')
search_content = request.GET.get('table_search')
if search_content:
q = Q()
if search_field == 'customer':
q.connector = 'or'
q.children.append((search_field + "__qq__contains", search_content))
q.children.append((search_field + "__name__icontains", search_content))
else:
q.children.append((search_field + "__icontains", search_content))
consult_record_list = consult_record_list.filter(q)
# 分页
current_page_num = request.GET.get('page')
pagination = MyPagination(current_page_num, consult_record_list.count(), request)
consult_record_list = consult_record_list[pagination.start:pagination.end]
return render(request, "crm/customer_manager/consult_record_list.html", {'consult_record_list':consult_record_list})
def post(self, request):
print(request.POST)
select_action = request.POST.get('select_action')
selected_pk_list = request.POST.getlist('selected_pk_list')
if hasattr(self, select_action):
func = getattr(self, select_action)
queryset = ConsultRecord.objects.filter(pk__in=selected_pk_list)
func(request, queryset)
return self.get(request)
def delete_selected(self, request, queryset):
"""删除选中的数据"""
queryset.delete()
class ConsultRecordOperate(View):
"""跟进记录操作(新增和编辑)"""
def get(self, request, edit_id=None):
consult_record_obj = ConsultRecord.objects.filter(pk=edit_id).first() or ConsultRecord(consultant=request.user)
consult_record_list = ConsultRecordModelForm(instance=consult_record_obj)
return render(request, 'crm/customer_manager/consult_record_operate.html', {'consult_record_list': consult_record_list})
def post(self, request, edit_id=None):
consult_record_obj = ConsultRecord.objects.filter(pk=edit_id).first() or ConsultRecord(consultant=request.user)
consult_record_list = ConsultRecordModelForm(request.POST, instance=consult_record_obj)
if consult_record_list.is_valid():
consult_record_list.save()
return redirect(reverse('consult_record_list'))
else:
return render(request, 'crm/customer_manager/consult_record_operate.html', {'consult_record_list': consult_record_list})
class ConsultRecordDelete(View):
"""跟进记录删除"""
def get(self, request, delete_id):
ConsultRecord.objects.filter(pk=delete_id).delete()
return redirect(reverse('consult_record_list'))
class ClassStudyRecordList(View):
"""班级学习记录列表"""
def get(self, request):
class_study_record_list = ClassStudyRecord.objects.all()
# 搜索的字段和内容
search_field = request.GET.get('field_select')
search_content = request.GET.get('table_search')
if search_content:
q = Q()
q.connector = "or"
q.children.append((search_field + "__course__icontains", search_content))
q.children.append((search_field + "__semester__icontains", search_content))
q.children.append((search_field + "__campuses__name__icontains", search_content))
class_study_record_list = class_study_record_list.filter(q)
# 分页
current_page_num = request.GET.get('page')
pagination = MyPagination(current_page_num, class_study_record_list.count(), request)
class_study_record_list = class_study_record_list[pagination.start:pagination.end]
return render(request, "crm/class_manager/class_study_record_list.html", {'class_study_record_list': class_study_record_list,
'pagination': pagination})
def post(self, request):
select_action = request.POST.get('select_action')
selected_pk_list = request.POST.getlist('selected_pk_list')
if hasattr(self, select_action):
getattr(self, select_action)(request, selected_pk_list)
return self.get(request)
def init_student_study_record(self, request, selected_pk_list):
"""批量生成班级学生记录"""
try:
for i in selected_pk_list:
class_study_record_obj = ClassStudyRecord.objects.filter(id=i).first()
student_list = class_study_record_obj.class_obj.students.all()
for student in student_list:
StudentStudyRecord.objects.create(student=student, classstudyrecord=class_study_record_obj)
except Exception as e:
pass
class ClassStudyRecordOperate(View):
"""班级学习记录操作(新增和编辑)"""
def get(self, request, edit_id=None):
class_study_record_obj = ClassStudyRecord.objects.filter(pk=edit_id).first()
class_study_record_list = ClassStudyRecordModelForm(instance=class_study_record_obj)
return render(request, 'crm/class_manager/class_study_record_operate.html', {'class_study_record_list': class_study_record_list})
def post(self, request, edit_id=None):
class_study_record_obj = ClassStudyRecord.objects.filter(pk=edit_id).first()
class_study_record_list = ClassStudyRecordModelForm(request.POST, instance=class_study_record_obj)
if class_study_record_list.is_valid():
class_study_record_list.save()
return redirect(reverse('class_study_record_list'))
else:
return render(request, 'crm/class_manager/class_study_record_operate.html', {'class_study_record_list': class_study_record_list})
class ClassStudyRecordDelete(View):
"""跟进记录删除"""
def get(self, request, delete_id):
ClassStudyRecord.objects.filter(pk=delete_id).delete()
return redirect(reverse('class_study_record_list'))
class RecordScoreView(View):
"""为班级批量录入成绩"""
def get(self, request, id):
# 根据表模型和表约束创建modelformset类(批量操作使用modelformset)
model_formset_cls = modelformset_factory(model=StudentStudyRecord, form=StudentStudyRecordModelFormSet, extra=0)
# 根据班级记录的id找出所有这个班级的学生记录
queryset = StudentStudyRecord.objects.filter(classstudyrecord=id)
# 将数据给modelformset,实例化,前端循环formset就可以取出对应的数据
formset = model_formset_cls(queryset=queryset)
class_study_record_list = ClassStudyRecord.objects.filter(pk=id).first()
# 获取当前班级的所有学生记录
student_study_record_list = class_study_record_list.studentstudyrecord_set.all()
return render(request, "crm/class_manager/record_score.html", locals())
def post(self, request, id):
model_formset_cls = modelformset_factory(model=StudentStudyRecord, form=StudentStudyRecordModelFormSet, extra=0)
formset = model_formset_cls(request.POST)
if formset.is_valid():
formset.save()
return self.get(request, id)
class CustomerQuantity(View):
"""客户成交量统计"""
def get(self, request):
# 获取前端需要展示的天数,默认是今天
date = request.GET.get('date', 'today')
# 以年月日表示今天
now = datetime.datetime.now().date()
# 时延,分别是1天,一周,和一个月
delta1 = datetime.timedelta(days=1)
delta2 = datetime.timedelta(weeks=1)
delta3 = datetime.timedelta(days=30)
# 通过字典嵌套列表再包含字典的形式保存数据
condition = {'today': [{'deal_date': now}, {'customers__deal_date': now}],
'yesterday': [{'deal_date': now-delta1}, {'customers__deal_date': now-delta1}],
'week': [{'deal_date__gte': now - delta2, 'deal_date__lte': now},
{'customers__deal_date__gte': now - delta2, 'customers__deal_date__lte': now}],
'month': [{'deal_date__gte': now - delta3, 'deal_date__lte': now},
{'customers__deal_date__gte': now - delta3, 'customers__deal_date__lte': now}],
}
# 根据条件查询出所有的客户
customer_list = Customer.objects.filter(**(condition[date][0]))
# 每个销售的成交量(根据时间不同进行筛选)
customer_count = UserInfo.objects.filter(depart__name='销售部门').filter(**(condition[date][1])).annotate(
c=Count('customers')).values_list('username', 'c')
# 由于highchart接收的数据是[[]]这种格式,所以将querysey变成列表,发现[()]也可以
customer_count = list(customer_count)
return render(request, "crm/count_manager/customer_quantity.html", {'customer_count': customer_count,'customer_list': customer_list})
|
# Переставьте цифры числа в обратном порядке .
n = input()
sz = n.__len__()
i = sz - 1
while i >= 0:
print(n[i], end='')
i -= 1
|
import random
from django.contrib.auth.models import User
from faker import Faker
#from backend.users.models import UserProfile, Organization
from ..users.models import UserProfile, Organization
def create_user(username,
password='12345',
cname='无名氏',
ename='Unknown',
company_id='12345',
position='tester',
email='unknown@bysoft.com',
phone='110',
department='test centre',
org='beijing'
):
p = UserProfile(
username=username,
password=password,
chinese_name=cname,
english_name=ename,
company_id=company_id,
position=position,
email=email,
phone=phone,
department=department,
organization=Organization.objects.get(name=org),
)
p.save()
def create_faker_user(num=1):
fc = Faker(locale='zh_CN')
fe = Faker()
profiles = UserProfile.objects.all()
company_ids = [p.company_id for p in profiles]
for _ in range(num):
first_eng_name = fe.first_name()
last_eng_name = fe.last_name()
full_eng_name = ' '.join((first_eng_name, last_eng_name))
email = '_'.join((first_eng_name, last_eng_name)) + '@bysoft.com'
password = '12345'
gender = random.choice(('male', 'female'))
dep = ['ISV', 'Test Centre', 'Admin', 'HR']
orgs = Organization.objects.order_by('name')[:]
org = orgs[random.randint(0,len(orgs) - 1)]
unique_cid = False
while not unique_cid:
company_id = random.randint(0,99999)
if company_id not in company_ids:
unique_cid = True
company_ids.append(company_id)
p = UserProfile(
username='_'.join((first_eng_name, last_eng_name)),
password=password,
chinese_name=fc.name(),
english_name=full_eng_name,
gender=gender,
company_id=company_id,
position=fc.job(),
email=email,
phone=fc.phone_number(),
department=dep[random.randint(0,3)],
organization=org,
)
p.save()
def create_org(name='beijing'):
org = Organization(name=name)
org.save()
def create_orgs():
for org in ['Beijing','Chengdu','Shanghai','Boise','San Davis','Guangzhou','Wuhan']:
create_org(org)
def create_random_org():
f = Faker()
org = Organization(name=f.city())
org.save() |
import httplib2
import json
import urllib
hObj = httplib2.Http('.cache')
wSrc = 'http://www.wikidata.org/w/api.php'
lang = 'en'
def _get(param):
param['format'] = 'json'
param = urllib.urlencode(param)
resp, cont = hObj.request(wSrc + '?' + param)
return json.loads(cont)
def search(type, search):
return _get({
'action': 'wbsearchentities',
'language': lang,
'search': search,
'type': type
})
def query(serach):
return _get({
'action': 'query',
'titles': '|'.join(search),
'prop': 'revisions',
'rvprop': 'content'
})
def getEntities(ids, props):
return _get({
'action': 'wbgetentities',
'language': lang,
'ids': ids,
'props': props
})
def getClaim(entityId, propertyId):
return _get({
'action': 'wbgetclaims',
'entity': entityId,
'property': propertyId
})
def formatDatavalue(datavalue):
return _get({
'action': 'wbformatvalue',
'language': 'text/plain',
'datavalue': json.dumps(datavalue),
'options': json.dumps({
'lang': lang,
'geoformat': 'dms'
})
})
|
from collections import namedtuple, Counter
Program = namedtuple('Program', ['weight', 'sons'])
Node = namedtuple('Node', ['name', 'sum', 'weight', 'sons', 'equal'])
Leaf = namedtuple('Leaf', ['name', 'sum', 'equal'])
class MemoryTree:
TOWER = {}
ARROW = ' -> '
def __init__(self, input='input.txt'):
with open(input, 'r') as data_input:
for line in data_input.readlines():
if self.ARROW in line:
program, sons = line.split(self.ARROW)
sons = sons.strip().split(', ')
else:
program = line.strip()
sons = []
name, weight = program.split(' ')
self.TOWER[name] = Program(
weight=int(weight[1:-1].strip()), sons=sons)
def __str__(self):
return str(self.TOWER)
@property
def sons(self):
sons = set()
for name, program in self.TOWER.items():
if program.sons:
sons.update(program.sons)
return sons
def remove_sons(self):
for son in self.sons:
del self.TOWER[son]
def find_father(self):
return list(set(self.TOWER.keys()) - self.sons)[0]
def balance(self, elem):
node = self.TOWER[elem]
if node.sons:
sons = [self.balance(son) for son in node.sons]
sons_weights = [son.sum for son in sons]
are_equal = len(set(sons_weights)) == 1
return Node(
elem, node.weight + sum(sons_weights), node.weight, sons, are_equal)
return Leaf(elem, node.weight, True)
def is_balanced(self, balanced):
sums = [son.sum for son in balanced.sons]
if set(sums) == 1:
return balanced
count = Counter(sums)
count.subtract()
different = [x[0] for x in dict(count).items() if x[1] == 1][0]
different = [son for son in balanced.sons if son.sum == different][0]
diff_sons_sums = [son.sum for son in different.sons]
if len(set(diff_sons_sums)) == 1:
return [(son.weight, son.sum) for son in balanced.sons]
return self.is_balanced(different)
tree = MemoryTree()
father = tree.find_father()
balanced = tree.balance(father)
print(tree.is_balanced(balanced))
|
"""
Visualize trends of COVID-19 cases and deaths
"""
# %%
from os.path import join
import logging
from bokeh.io import curdoc
from bokeh.palettes import Purples
from bokeh.layouts import gridplot, row
from bokeh.plotting import figure
from bokeh.themes import Theme
from bokeh.models import (
ColumnDataSource,
MultiSelect,
NumeralTickFormatter,
HoverTool,
Legend,
Title
)
from database import DataBase
from utilities import cwd
from arima import (
ARIMA_CASES_TABLE,
ARIMA_DEATHS_TABLE
)
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
class LinePlot:
"""Line plot for covid19 cases and deaths by state
"""
def __init__(self, table):
# data
_db = DataBase()
self.data = _db.get_table(table, parse_dates=['date'])
_db.close()
# options
_ids = self.data['state_id'].unique()
_states = self.data['state'].unique()
self.options = list(zip(_ids, _states))
self.data.set_index('state_id', inplace=True)
self.plot = None
# glyphs
self.source = dict()
self.actual = dict()
self.predict = dict()
self.lower = dict()
self.upper = dict()
self.area = dict()
def _add_figure(self):
_args = dict(x_axis_type='datetime', tools='save')
self.plot = figure(**_args)
self.plot.xaxis.ticker.desired_num_ticks = 10
self.plot.yaxis.formatter = NumeralTickFormatter(format='0,0')
self.plot.xaxis.axis_label = 'x'
self.plot.yaxis.axis_label = 'y'
def _add_lines(self):
for _id, _state, in self.options:
source = ColumnDataSource(data=dict(date=[], actual=[], predict=[],
lower=[], upper=[]))
self.source[_id] = source
_args = dict(x='date', y='actual', source=source, name=_state, visible=False)
self.actual[_id] = self.plot.line(**_args)
_args = dict(x='date', y='predict', source=source, name=_state, visible=False)
self.predict[_id] = self.plot.line(**_args)
_args = dict(x='date', y='lower', source=source, name=_state, visible=False)
self.lower[_id] = self.plot.line(**_args)
_args = dict(x='date', y='upper', source=source, name=_state, visible=False)
self.upper[_id] = self.plot.line(**_args)
def _add_hover(self):
for _id, _state, in self.options:
_renderers = [self.actual[_id], self.predict[_id]]
_renderers += [self.upper[_id], self.lower[_id]]
_hover = HoverTool(renderers=_renderers,
toggleable=False,
tooltips=[('State', '$name'),
('Date', '$x{%m/%d/%Y}'),
('Count', '$y{0,0}')],
formatters={'$x': 'datetime'})
self.plot.add_tools(_hover)
def _add_area(self):
for _id, _state, in self.options:
_source = self.source[_id]
_area_args = dict(x='date', y1='lower', y2='upper', source=_source,
name=_state, visible=False)
self.area[_id] = self.plot.varea(**_area_args)
def _add_legend(self):
_actual_renderer = self.actual[self.options[0][0]]
_predict_render = self.predict[self.options[0][0]]
_area_renderer = self.area[self.options[0][0]]
_legend = Legend(items=[('Actual', [_actual_renderer]),
('Predicted', [_predict_render]),
('95% Conf.', [_area_renderer])],
location='top_left')
self.plot.add_layout(_legend)
def color_actual(self, line_color='navy', line_dash='solid'):
"""Color actual line and change line dash style in all states
Keyword Arguments:
line_color {rgb color} -- rgb color (default: {'navy'})
line_dash {'solid', 'dashed'} -- line style (default: {'solid'})
"""
for _id, _, in self.options:
self.actual[_id].glyph.line_color = line_color
self.actual[_id].glyph.line_dash = line_dash
def color_predict(self, line_color='red', line_dash='dashed'):
"""Color predict line and change line dash style in all states
Keyword Arguments:
line_color {rgb color} -- rgb color (default: {'navy'})
line_dash {'solid', 'dashed'} -- line style (default: {'dashed'})
"""
for _id, _, in self.options:
self.predict[_id].glyph.line_color = line_color
self.predict[_id].glyph.line_dash = line_dash
def color_interval(self, line_color='navy', line_dash='solid'):
"""Color interval lines and change line dash style in all states
Keyword Arguments:
line_color {rgb color} -- rgb color (default: {'navy'})
line_dash {'solid', 'dashed'} -- line style (default: {'solid'})
"""
for _id, _, in self.options:
self.lower[_id].glyph.line_color = line_color
self.lower[_id].glyph.line_dash = line_dash
self.upper[_id].glyph.line_color = line_color
self.upper[_id].glyph.line_dash = line_dash
def color_area(self, fill_color='grey', fill_alpha=0.25):
"""Color interval area fill color and fill alpha in all states
Keyword Arguments:
fill_color {rgb color} -- rgb color (default: {'grey'})
fill_alpha {float} -- fill alpha (default: {0.25})
"""
for _id, _, in self.options:
self.area[_id].glyph.fill_color = fill_color
self.area[_id].glyph.fill_alpha = fill_alpha
def color_palette(self, palette=Purples[3]):
"""Color lines and interval area in all states
Keyword Arguments:
palette {list} -- list of rgb color (default: {Purples[3]})
"""
self.color_actual(line_color=palette[0])
self.color_predict(line_color=palette[0])
self.color_interval(line_color=palette[1])
self.color_area(fill_color=palette[2])
def title(self, title=None):
"""Plot title
Keyword Arguments:
title {String} -- plot title (default: {None})
"""
self.plot.title = Title(text=title)
def axis_label(self, xlabel='x', ylabel='y'):
"""Set x and y axis labels
Keyword Arguments:
xlabel {str} -- x axis label (default: {'x'})
ylabel {str} -- y axis label (default: {'y'})
"""
self.plot.xaxis.axis_label = xlabel
self.plot.yaxis.axis_label = ylabel
def render_figure(self):
"""Render figure, glyphs and color glyphs with default colors
"""
self._add_figure()
self._add_lines()
self._add_area()
self._add_hover()
self._add_legend()
class Trends:
"""Trends layout
"""
def __init__(self, palette=Purples[3]):
self.cases = LinePlot(ARIMA_CASES_TABLE)
self.cases.render_figure()
self.cases.title("Cumulative Cases by State")
self.cases.axis_label('Date', 'Cases')
self.cases.color_palette(palette)
LOG.debug('state cases')
self.deaths = LinePlot(ARIMA_DEATHS_TABLE)
self.deaths.render_figure()
self.deaths.title("Cumulative Deaths by State")
self.deaths.axis_label('Date', 'Deaths')
self.deaths.color_palette(palette)
LOG.debug('state deaths')
self.multiselect = None
self._add_multiselect()
self.multiselect.value = ['12', '34', '36']
LOG.debug('render default states')
def _add_multiselect(self):
self.multiselect = MultiSelect(title='States:', value=['01'],
options=self.cases.options)
self.multiselect.max_width = 170
self.multiselect.min_height = 500 - 47
self.multiselect.on_change('value', self._callback_cases)
self.multiselect.on_change('value', self._callback_deaths)
def _callback_cases(self, _attr, _old, new):
for _id, _ in list(self.multiselect.options):
if self.cases.actual[_id].visible:
self.cases.actual[_id].visible = False
self.cases.predict[_id].visible = False
self.cases.lower[_id].visible = False
self.cases.upper[_id].visible = False
self.cases.area[_id].visible = False
for _id in new:
if not self.cases.actual[_id].visible:
_slice = self.cases.data.loc[_id, :]
self.cases.source[_id].data = ColumnDataSource.from_df(data=_slice)
self.cases.actual[_id].visible = True
self.cases.predict[_id].visible = True
self.cases.lower[_id].visible = True
self.cases.upper[_id].visible = True
self.cases.area[_id].visible = True
def _callback_deaths(self, _attr, _old, new):
for _id, _ in list(self.multiselect.options):
if self.deaths.actual[_id].visible:
self.deaths.actual[_id].visible = False
self.deaths.predict[_id].visible = False
self.deaths.lower[_id].visible = False
self.deaths.upper[_id].visible = False
self.deaths.area[_id].visible = False
for _id in new:
if not self.deaths.actual[_id].visible:
_slice = self.deaths.data.loc[_id, :]
self.deaths.source[_id].data = ColumnDataSource.from_df(data=_slice)
self.deaths.actual[_id].visible = True
self.deaths.predict[_id].visible = True
self.deaths.lower[_id].visible = True
self.deaths.upper[_id].visible = True
self.deaths.area[_id].visible = True
def layout(self):
"""Build trend layout
Returns:
Bokeh Layout -- layout with cases, deaths and state selection
"""
_graphs = gridplot([self.cases.plot, self.deaths.plot], ncols=1,
plot_width=800 - self.multiselect.max_width,
plot_height=250, toolbar_location=None)
_layout = row(_graphs, self.multiselect)
return _layout
# %%
if __name__[:9] == 'bokeh_app':
print('unit testing...')
TREND = Trends(palette=Purples[3])
curdoc().add_root(TREND.layout())
curdoc().title = "trends"
curdoc().theme = Theme(filename=join(cwd(), "theme.yaml"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import argparse
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
import pandas.rpy.common as com
from pandas import DataFrame
def Sizezonematrixfeatures(Img):
rdf = com.convert_to_r_dataframe(Img)
ro.globalenv['Image'] = rdf
ro.r('Image <- as.matrix(Image)')
ro.r('library(radiomics)')
ro.r('szmatrix <- glszm(Image)')
# ro.r('szmatrix[nrow(szmatrix),] <- 0')
ro.r('szmfeature <- array(NA,dim=c(11,1))')
ro.r('szmfeature[1,1] <- glszm_SAE(szmatrix)')
ro.r('szmfeature[2,1] <- glszm_LAE(szmatrix)')
ro.r('szmfeature[3,1] <- glszm_IV(szmatrix)')
ro.r('szmfeature[4,1] <- glszm_HILAE(szmatrix)')
ro.r('szmfeature[5,1] <- glszm_LILAE(szmatrix)')
ro.r('szmfeature[6,1] <- glszm_HISAE(szmatrix)')
ro.r('szmfeature[7,1] <- glszm_LISAE(szmatrix)')
ro.r('szmfeature[8,1] <- glszm_HIE(szmatrix)')
ro.r('szmfeature[9,1] <- glszm_LIE(szmatrix)')
ro.r('szmfeature[10,1] <- glszm_ZP(szmatrix)')
ro.r('szmfeature[11,1] <- glszm_SZV(szmatrix)')
ro.r('colname <-c("SAE","LAE","IV","SZV","ZP","LIE","HIE","LISAE","HISAE","LILAE","HILAE")')
ro.r('szmfeat <- cbind(colname,szmfeature)')
ro.r('write.csv(szmfeat,file="SZM_featuresList.csv")')
def main():
parser = argparse.ArgumentParser(description="Size-zone matrix based features calculation")
parser.add_argument('-f',dest = "filename",required=True, help="input file with image matrix",metavar="FILE")
args = parser.parse_args()
Img = np.genfromtxt(args.filename,dtype= float,delimiter=',')
Img = DataFrame(Img)
Sizezonematrixfeatures(Img)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
MANHATTAN_ZIP_CODES = [
10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011,
10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 10020, 10021, 10022,
10023, 10024, 10025, 10026, 10027, 10029, 10030, 10031, 10032, 10033, 10034,
10035, 10036, 10037, 10038, 10039, 10040, 10044, 10046, 10047, 10048, 10055,
10060, 10069, 10072, 10082, 10087, 10090, 10095, 10099, 10101, 10102, 10103,
10104, 10105, 10106, 10107, 10108, 10109, 10110, 10111, 10112, 10113, 10114,
10115, 10116, 10117, 10118, 10119, 10120, 10121, 10122, 10123, 10124, 10125,
10126, 10128, 10129, 10130, 10131, 10132, 10133, 10138, 10149, 10150, 10151,
10152, 10153, 10154, 10155, 10156, 10157, 10158, 10159, 10160, 10161, 10162,
10163, 10164, 10165, 10166, 10167, 10168, 10169, 10170, 10171, 10172, 10173,
10174, 10175, 10176, 10177, 10178, 10179, 10185, 10197, 10199, 10211, 10259,
10261, 10272, 10276
]
import csv, xmlrpclib
geocoded_file = open("geocoded-locations.csv", "r")
geocodes = {}
for (address, lat, long) in csv.reader(geocoded_file):
geocodes[address] = (lat, long)
geocoded_file.close()
locations_file = open("locations.csv", "r")
server = xmlrpclib.Server("http://rpc.geocoder.us/service/xmlrpc")
for (address,) in csv.reader(locations_file):
if address.find("&") == -1 or address in geocodes: continue
print "geocoding %s" % address
result = server.geocode("%s, New York, NY" % address)
geocoded_results = filter(lambda x: "lat" in x and "long" in x, result)
if len(geocoded_results) > 1:
manhattan_results = filter(lambda x: x["zip"] in MANHATTAN_ZIP_CODES, geocoded_results)
if len(manhattan_results) == 1:
geocoded_results = manhattan_results
if len(geocoded_results) > 1:
print " ambiguous geocode"
elif len(geocoded_results) == 0:
print " no results"
else:
(lat, long) = (geocoded_results[0]["lat"], geocoded_results[0]["long"])
geocodes[address] = [lat, long]
geocoded_file = open("geocoded-locations.csv", "a")
csv.writer(geocoded_file).writerow([address, lat, long])
geocoded_file.close()
print " %f,%f" % (lat, long)
locations_file.close() |
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db.models import QuerySet
from web.domains.case.shared import ImpExpStatus
from web.domains.case.types import ImpOrExp
from web.flow import errors
from web.models import AccessRequest, ApprovalRequest, Process, Task
TT = Task.TaskType
ST = ImpExpStatus
__all__ = [
#
# Functions to check an application is at a particular point in the application process
#
"application_in_progress",
"application_in_processing",
"access_request_in_processing",
"application_is_authorised",
"application_is_with_chief",
#
# Utility functions
#
"check_expected_status",
"check_expected_task",
"get_expected_task",
"get_active_tasks",
"get_active_task_list",
]
# TODO: Consider splitting.
# def application_in_progress
# def application_in_progress_update_request
# def application_in_progress_variation_request
def application_in_progress(application: ImpOrExp) -> None:
"""Check if the application is in progress with the applicant."""
# A fresh new application (IN_PROGRESS)
# An update request (PROCESSING / VARIATION_REQUESTED)
expected_status = [ST.IN_PROGRESS, ST.PROCESSING, ST.VARIATION_REQUESTED]
expected_task = TT.PREPARE
check_expected_status(application, expected_status)
check_expected_task(application, expected_task)
def application_in_processing(application: ImpOrExp) -> None:
"""Check if an application is being processed by a caseworker."""
expected_status = [ST.SUBMITTED, ST.PROCESSING, ST.VARIATION_REQUESTED]
expected_task = TT.PROCESS
check_expected_status(application, expected_status)
check_expected_task(application, expected_task)
def access_request_in_processing(application: AccessRequest) -> None:
"""Check if an access request is being processed by a caseworker"""
expected_status = [AccessRequest.Statuses.SUBMITTED]
expected_task = TT.PROCESS
check_expected_status(application, expected_status) # type:ignore[arg-type]
check_expected_task(application, expected_task)
def approval_request_in_processing(application: ApprovalRequest) -> None:
expected_status = [ApprovalRequest.Statuses.OPEN]
check_expected_status(application, expected_status) # type:ignore[arg-type]
def application_is_authorised(application: ImpOrExp) -> None:
"""Check if an application has been authorised."""
expected_status = [ST.PROCESSING, ST.VARIATION_REQUESTED]
expected_task = TT.AUTHORISE
check_expected_status(application, expected_status)
check_expected_task(application, expected_task)
def application_is_with_chief(application: ImpOrExp) -> None:
"""Check if an application is with CHIEF."""
expected_status = [ST.PROCESSING, ST.VARIATION_REQUESTED]
expected_task = TT.CHIEF_WAIT
check_expected_status(application, expected_status)
check_expected_task(application, expected_task)
def check_expected_status(application: Process, expected_statuses: list[ImpExpStatus]) -> None:
"""Check the process has one of the expected statuses."""
# status is set as a model field on all derived classes
status: str = application.status
if status not in expected_statuses:
raise errors.ProcessStateError(f"Process is in the wrong state: {status}")
def check_expected_task(application: Process, expected_task: str) -> None:
"""Check the expected task is in the applications active task list"""
active_tasks = get_active_task_list(application)
if expected_task not in active_tasks:
raise errors.TaskError(
f"{expected_task} not in active task list {active_tasks} for Process {application.pk}"
)
def get_expected_task(
application: Process, task_type: str, *, select_for_update: bool = True
) -> Task:
"""Get the expected active current task"""
if not application.is_active:
raise errors.ProcessInactiveError("Process is not active")
try:
task = get_active_tasks(application, select_for_update).get(task_type=task_type)
except (ObjectDoesNotExist, MultipleObjectsReturned) as exc:
raise errors.TaskError(f"Failed to get expected task: {task_type}") from exc
return task
def get_active_task_list(application: Process) -> list[str]:
"""Get all active tasks for the current process as a list"""
return list(get_active_tasks(application, False).values_list("task_type", flat=True))
def get_active_tasks(application: Process, select_for_update: bool = True) -> QuerySet[Task]:
"""Get all active task for current process."""
tasks = application.tasks.filter(is_active=True)
return tasks.select_for_update() if select_for_update else tasks
|
from text_gnn.config import TEMP_PATH
import joblib
import gensim
import numpy as np
from seaborn import distplot
from matplotlib import pyplot
from collections import Counter
from nltk.corpus import stopwords
from sklearn.datasets import fetch_20newsgroups
def get_data(cate):
if cate == 'all':
l1, t1 = get_data('train')
l2, t2 = get_data('test')
return l1 + l2, t1 + t2
data = fetch_20newsgroups(subset=cate)
targets, texts = list(data.target), data.data
return targets, texts
def handle_text(text: str):
return text.split()
def handle_text_list(texts):
res = []
for i, text in enumerate(texts):
i += 1
if i % 1000 == 0:
print("handle: {:.2f}% {}/{}".format(i * 100 / len(texts), i, len(texts)))
res.append(handle_text(text))
return res
def padding_input(inp, padding_idx, padding_len):
if len(inp) > padding_len: return inp[:padding_len]
return inp + [padding_idx] * (padding_len - len(inp))
def padding_input_list(inputs, padding_idx, padding_len):
res = []
for i, inp in enumerate(inputs):
i += 1
if i % 1000 == 0:
print("handle: {:.2f}% {}/{}".format(i * 100 / len(inputs), i, len(inputs)))
res.append(padding_input(inp, padding_idx, padding_len))
return res
def get_word_count(word2count, rate):
word2count = [[w, c] for w, c in word2count.items() if w not in stopword]
word2count = sorted(word2count, key=lambda x: x[1], reverse=True)
tol_num = sum([c for w, c in word2count])
cur_num = 0
for i, (w, c) in enumerate(word2count):
cur_num += c
if cur_num > rate * tol_num:
return word2count[:i]
return word2count
def get_padding_len(inputs, rate):
lens = [len(e) for e in inputs]
distplot([e for e in lens if e < 1000])
pyplot.show()
return sorted(lens)[int(rate * len(inputs))]
def handle_dataset(cate, word2index, padding_len):
targets, texts = get_data(cate)
texts = handle_text_list(texts)
inputs = [[word2index[w] for w in words if w in word2index] for words in texts]
print(get_padding_len(inputs, 0.9))
inputs = padding_input_list(inputs, len(word2index), padding_len)
return targets, inputs
if __name__ == '__main__':
stopword = set(stopwords.words('english'))
embedding_dim = 300
len_rate = 0.9
min_count = 8
targets, texts = get_data('all')
num_classes = len(set(targets))
word_count = [[w, c] for w, c in sorted(list(Counter(" ".join(texts).split()).items()),
reverse=True, key=lambda x: x[1]) if c >= min_count][5:]
word_set = {w for w, c in word_count}
word2index = {w: i for i, w in enumerate(word_set)}
inputs = [[w for w in text.split() if w in word_set] for text in texts]
padding_len = get_padding_len(inputs, len_rate)
padding_idx = len(word2index)
y_train, x_train = handle_dataset('train', word2index, padding_len)
y_test, x_test = handle_dataset('test', word2index, padding_len)
joblib.dump(word2index, TEMP_PATH + "/len={} w_num={} l_num={}.pkl".format(padding_len, padding_idx, num_classes))
joblib.dump(word2index, TEMP_PATH + "/word2index.pkl")
np.save(TEMP_PATH + "/train.input.npy", x_train)
np.save(TEMP_PATH + "/train.target.npy", y_train)
np.save(TEMP_PATH + "/test.input.npy", x_test)
np.save(TEMP_PATH + "/test.target.npy", y_test)
google = gensim.models.KeyedVectors.load_word2vec_format(TEMP_PATH + '/Google-vectors-negative.bin', binary=True)
google_set = set(list(google.vocab))
index2vec = {i: google[w] if w in google_set else list(np.random.random(embedding_dim)) for w, i in
word2index.items()}
vectors = [index2vec[i] for i in range(len(index2vec))] + [[0.] * embedding_dim]
np.save(TEMP_PATH + "/word2vector.npy", vectors)
|
import imaplib
import imaplib_connect
with imaplib_connect.open_connection() as c:
# Find the "SEEN" messages in INBOX
c.select('INBOX')
typ, [response] = c.search(None, 'SEEN')
if typ != 'OK':
raise RuntimeError(response)
msg_ids = ','.join(response.decode('utf-8').split(' '))
# Create a new mailbox, "Example.Today"
typ, create_response = c.create('Example.Today')
print('CREATED Example.Today:', create_response)
# Copy the messages
print('COPYING:', msg_ids)
c.copy(msg_ids, 'Example.Today')
# Look at the results
c.select('Example.Today')
typ, [response] = c.search(None, 'ALL')
print('COPIED:', response)
|
from flask import redirect, render_template, url_for
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from app.extensions.login import login_player
from app.models import db, Player
from app.views.salad_bowl import salad_bowl
class CreatePlayerForm(FlaskForm):
name = StringField('name', validators=[DataRequired()])
catch_phrase = StringField('catch phrase', validators=[DataRequired()])
@salad_bowl.route('/create_player/', methods=['GET', 'POST'])
def create_player():
form = CreatePlayerForm()
if form.validate_on_submit():
new_player = Player(name=form.name.data, catch_phrase=form.catch_phrase.data)
db.session.add(new_player)
db.session.commit()
login_player(new_player)
return redirect(url_for('.games'))
return render_template('salad_bowl/actions/create_player.html', form=form, action_url=url_for('.create_player'))
|
# --------------------------------------------------------------------------- #
# __init__.py #
# #
# Copyright © 2015-2020, Rajiv Bakulesh Shah, original author. #
# All rights reserved. #
# --------------------------------------------------------------------------- #
'''Redis for Humans.
Redis is awesome, but Redis commands are not always fun. Pottery is a Pythonic
way to access Redis. If you know how to use Python dicts, then you already
know how to use Pottery.
'''
__title__ = 'pottery'
__version__ = '0.71'
__description__, __long_description__ = (
s.strip() for s in __doc__.split(sep='\n\n', maxsplit=1)
)
__url__ = 'https://github.com/brainix/pottery'
__author__ = 'Rajiv Bakulesh Shah'
__author_email__ = 'brainix@gmail.com'
__license__ = 'Apache 2.0'
__keywords__ = 'Redis client persistent storage'
__copyright__ = 'Copyright © 2015-2019, {}, original author.'.format(__author__)
from .exceptions import PotteryError
from .exceptions import KeyExistsError
from .exceptions import RandomKeyError
from .exceptions import PrimitiveError
from .exceptions import QuorumNotAchieved
from .exceptions import TooManyExtensions
from .exceptions import ReleaseUnlockedLock
from .bloom import BloomFilter
from .cache import CachedOrderedDict
from .cache import redis_cache
from .hyper import HyperLogLog
from .nextid import NextId
from .redlock import Redlock
from .timer import ContextTimer
from .counter import RedisCounter
from .deque import RedisDeque
from .dict import RedisDict
from .list import RedisList
from .set import RedisSet
__all__ = [
'PotteryError',
'KeyExistsError',
'RandomKeyError',
'PrimitiveError',
'QuorumNotAchieved',
'TooManyExtensions',
'ReleaseUnlockedLock',
'BloomFilter',
'CachedOrderedDict',
'redis_cache',
'HyperLogLog',
'NextId',
'Redlock',
'ContextTimer',
'RedisCounter',
'RedisDeque',
'RedisDict',
'RedisList',
'RedisSet',
]
|
import torch
import numpy as np
import random
from prospr.dataloader import dataloader
from prospr.nn import CUDA, CROP_SIZE, DIST_BINS, ANGLE_BINS, SS_BINS, ASA_BINS, INPUT_DIM
IDEAL_BATCH_SIZE = 2
def norm(thing):
mean = np.mean(thing)
stdev = np.std(thing)
return (thing - mean) / stdev
def get_start_idxs():
"""determine possible start indices for maximal grid coverage depending on sequence length"""
padding = CROP_SIZE // 2
mods = []
mods.append([padding])
indices = [i for i in range(0,padding+1)]
for i in range(1,padding+1):
mods.append(indices[0:i])
for i in range(padding+1, CROP_SIZE):
mods.append(indices[i-padding:])
return mods #list of possible start indices indexed by L % CROP_SIZE
def get_masks(shape=(64,64,64), real=True):
"""get mask for crop assembly based on how close i,j pair is to center"""
if real: #want the central weighting
mask = np.zeros(shape)
step = shape[1] // 8
start_value = 0.25
for n in range(4):
v = start_value * (n+1)
mask[:,(step*n):CROP_SIZE-(step*n),(step*n):CROP_SIZE-(step*n)] = v
else: #uniform mask
mask = np.ones(shape)
return mask, mask[:,shape[1]//2,:], mask[:,:,shape[2]//2]
def predict_domain(data, model, num_offsets=10, real_mask=True):
'''make prediction for entire protein domain via crop assembly and averaging'''
seq = data.seq
seq_len = len(seq)
# randomly select i,j pairs for grid offsets
# first pick from ones that give optimal (full) sequence coverage, then randomly select rest
normal_offsets = get_start_idxs()[seq_len % CROP_SIZE]
start_coords = []
crop_list = []
while len(start_coords) < num_offsets:
if len(start_coords) >= (len(normal_offsets) **2):
possible_starts = [i for i in range(31)]
start_i = random.choice(possible_starts)
start_j = random.choice(possible_starts)
else:
start_i = random.choice(normal_offsets)
start_j = random.choice(normal_offsets)
if (start_i,start_j) not in start_coords:
start_coords.append((start_i,start_j))
i = start_i
j = start_j
while i < seq_len:
while j < seq_len:
crop_list.append((i, j))
j += CROP_SIZE
j = start_j
i += CROP_SIZE
ss_sum = np.zeros([SS_BINS,seq_len])
phi_sum = np.zeros([ANGLE_BINS,seq_len])
psi_sum = np.zeros([ANGLE_BINS,seq_len])
asa_sum = np.zeros([ASA_BINS,seq_len])
dist_sum = np.zeros([DIST_BINS,seq_len,seq_len])
dim2_ct = np.zeros([ANGLE_BINS,seq_len])
dim3_ct = np.zeros([DIST_BINS,seq_len,seq_len])
model = model.eval()
BATCH_SIZE = IDEAL_BATCH_SIZE
while len(crop_list) > 0:
if len(crop_list) < IDEAL_BATCH_SIZE:
BATCH_SIZE = len(crop_list)
input_vector = torch.zeros([BATCH_SIZE,INPUT_DIM,CROP_SIZE,CROP_SIZE], dtype=torch.float, device=CUDA)
batch_crops = []
for batch in range(BATCH_SIZE):
crop = crop_list.pop(0)
input_vector[batch,:] = dataloader(data, i=crop[0], j=crop[1])
batch_crops.append(crop)
pred_dist, pred_aux_i, pred_aux_j = model(input_vector)
batch_ss_i = pred_aux_i['ss'].cpu().detach().numpy()
batch_ss_j = pred_aux_j['ss'].cpu().detach().numpy()
batch_phi_i = pred_aux_i['phi'].cpu().detach().numpy()
batch_phi_j = pred_aux_j['phi'].cpu().detach().numpy()
batch_psi_i = pred_aux_i['psi'].cpu().detach().numpy()
batch_psi_j = pred_aux_j['psi'].cpu().detach().numpy()
batch_asa_i = pred_aux_i['asa'].cpu().detach().numpy()
batch_asa_j = pred_aux_j['asa'].cpu().detach().numpy()
batch_dist = pred_dist.cpu().detach().numpy()
for batch in range(BATCH_SIZE):
crop_id = batch_crops[batch]
i = crop_id[0]
j = crop_id[1]
ss_i = batch_ss_i[batch]
ss_j = batch_ss_j[batch]
phi_i = batch_phi_i[batch]
phi_j = batch_phi_j[batch]
psi_i = batch_psi_i[batch]
psi_j = batch_psi_j[batch]
asa_i = batch_asa_i[batch]
asa_j = batch_asa_j[batch]
dist = batch_dist[batch]
mask, mask_i, mask_j = get_masks(shape=(ANGLE_BINS,CROP_SIZE,CROP_SIZE), real=real_mask)
mask = mask[:DIST_BINS,:,:]
#crop off padding
if i > (seq_len-32):
ss_i = ss_i[:,:(CROP_SIZE-(i-(seq_len-32)))]
phi_i = phi_i[:,:(CROP_SIZE-(i-(seq_len-32)))]
psi_i = psi_i[:,:(CROP_SIZE-(i-(seq_len-32)))]
asa_i = asa_i[:,:(CROP_SIZE-(i-(seq_len-32)))]
dist = dist[:,:(CROP_SIZE-(i-(seq_len-32))),:]
mask_i = mask_i[:,:(CROP_SIZE-(i-(seq_len-32)))]
mask = mask[:,:(CROP_SIZE-(i-(seq_len-32))),:]
if i < 32:
ss_i = ss_i[:,(32-i):]
phi_i = phi_i[:,(32-i):]
psi_i = psi_i[:,(32-i):]
asa_i = asa_i[:,(32-i):]
dist = dist[:,(32-i):,:]
mask_i = mask_i[:,(32-i):]
mask = mask[:,(32-i):,:]
if j > (seq_len-32):
ss_j = ss_j[:,:(CROP_SIZE-(j-(seq_len-32)))]
phi_j = phi_j[:,:(CROP_SIZE-(j-(seq_len-32)))]
psi_j = psi_j[:,:(CROP_SIZE-(j-(seq_len-32)))]
asa_j = asa_j[:,:(CROP_SIZE-(j-(seq_len-32)))]
dist = dist[:,:,:(CROP_SIZE-(j-(seq_len-32)))]
mask_j = mask_j[:,:(CROP_SIZE-(j-(seq_len-32)))]
mask = mask[:,:,:(CROP_SIZE-(j-(seq_len-32)))]
if j < 32:
ss_j = ss_j[:,(32-j):]
phi_j = phi_j[:,(32-j):]
psi_j = psi_j[:,(32-j):]
asa_j = asa_j[:,(32-j):]
dist = dist[:,:,(32-j):]
mask_j = mask_j[:,(32-j):]
mask = mask[:,:,(32-j):]
# apply masks
ss_i *= mask_i[:SS_BINS,:]
ss_j *= mask_j[:SS_BINS,:]
phi_i *= mask_i
phi_j *= mask_j
psi_i *= mask_i
psi_j *= mask_j
asa_i *= mask_i[:ASA_BINS,:]
asa_j *= mask_j[:ASA_BINS,:]
dist *= mask
ss_i = norm(ss_i)
ss_j = norm(ss_j)
phi_i = norm(phi_i)
phi_j = norm(phi_j)
psi_i = norm(psi_i)
psi_j = norm(psi_j)
asa_i = norm(asa_i)
asa_j = norm(asa_j)
dist = norm(dist)
dim_i = int(dist.shape[1])
dim_j = int(dist.shape[2])
start_i = np.max([0, i-32])
start_j = np.max([0, j-32])
ss_sum[:,start_i:(start_i+dim_i)] += ss_i
ss_sum[:,start_j:(start_j+dim_j)] += ss_j
phi_sum[:,start_i:(start_i+dim_i)] += phi_i
phi_sum[:,start_j:(start_j+dim_j)] += phi_j
psi_sum[:,start_i:(start_i+dim_i)] += psi_i
psi_sum[:,start_j:(start_j+dim_j)] += psi_j
asa_sum[:,start_i:(start_i+dim_i)] += asa_i
asa_sum[:,start_j:(start_j+dim_j)] += asa_j
dist_sum[:,start_i:(start_i+dim_i),start_j:(start_j+dim_j)] += dist
#keep track of what's been predicted where and with which mask values
dim2_ct[:,start_i:(start_i+int(dim_i))] += mask_i
dim2_ct[:,start_j:(start_j+int(dim_j))] += mask_j
dim3_ct[:,start_i:(start_i+int(dim_i)),start_j:(start_j+int(dim_j))] += mask
#adjust predictions by masking sums
dist_ct = dim3_ct
ss_ct = dim2_ct[:SS_BINS,:]
angle_ct = dim2_ct
asa_ct = dim2_ct[:ASA_BINS,:]
dist_avg = dist_sum / dist_ct
ss_avg = ss_sum / ss_ct
phi_avg = phi_sum / angle_ct
psi_avg = psi_sum / angle_ct
asa_avg = asa_sum / asa_ct
return {'dist':dist_avg, 'ss':ss_avg, 'phi':phi_avg, 'psi':psi_avg, 'asa':asa_avg}
|
import copy
class Solution:
def permute(self, nums):
returnList = []
def DFS(remain, stack):
if remain == []:
returnList.append(stack[:])
else:
for i in range(len(remain)):
r = copy.copy(remain)
s = r.pop(i)
DFS(r, stack + [s])
DFS(nums, [])
return returnList
sol = Solution()
input = [1,2,3]
print(sol.permute(input))
|
import pyttsx3
# One time initialization
engine = pyttsx3.init()
# Set properties _before_ you add things to say
engine.setProperty('rate', 150) # Speed percent (can go over 100)
engine.setProperty('volume', 0.9) # Volume 0-1
# Queue up things to say.
# There will be a short break between each one
# when spoken, like a pause between sentences.
engine.say("The project is a python-based application for visually impaired persons using speech to text voice response, thus enabling everyone to control their mail accounts using their voice only and to be able to read,send, and perform all the other useful tasks. The system will prompt the user with voice commands to perform certain action and the user will respond to the same. The main benefit of this system is that the use of mouse is completely eliminated, the user will have to respond through voice only.")
# Flush the say() queue and play the audio
engine.runAndWait()
# Program will not continue execution until
# all speech is done talking
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.