index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,300 | a8852ee788103b611fb33644da004dc3cbc97b6d | # basic_view = {
# "influencer": {
# "gender": True, "name": True, "mail":
# }
# }
#
# pro_view = {
#
# }
#
# prime_view = {
#
# }
from Core.Option_values import get_profile_pictures_sources
from Core.dbconn import get_database_connection
def get_topic_name(index: int) -> str:
"""
:param index:
:return:
"""
topics_de = ['Auto und Motorrad', 'Beauty / Kosmetik', 'Bildung', 'Computer und Elektronik', 'DIY', 'Design',
'Erfolg und Karriere',
'Essen und Trinken / Food', 'Fashion / Mode', 'Finanzen', 'Fotografie', 'Frauen / Männer',
'Garten und Natur',
'Gesellschaft und Politik', 'Gesundheit', 'Home und Living', 'Humor und Spaß', 'Kinder und Familie',
'Kino, Film, TV',
'Kunst und Kultur', 'Liebe und Sexualität', 'Lifestyle', 'Luxus', 'Marketing', 'Musik',
'Recht und Gesetz',
'Reise / Travel', 'Social Media', 'Spiele und Gaming', 'Sport und Fitness', 'Tattos', 'Technik',
'Tiere']
return topics_de[index]
def get_language_name(index: int) -> str:
"""
:param index:
:return:
"""
languages_de = ["Englisch", "Deutsch"]
return languages_de[index]
def get_deal_name(index: int) -> str:
"""
:param index:
:return:
"""
deals_de = ["Barzahlung", "WKZ", "nach Absprache"]
return deals_de[index]
def get_content_type_name(index: int) -> str:
"""
Convert the index to a human interpretable string
:param index:
:return: String, that a human being will understand instead of the index
"""
return ["Post", "Story", "Video"][index - 1]
def get_data_for_profile_view(influencer_identifier: int):
"""
:param influencer_identifier:
:return:
"""
influencer_data = {}
channel_identifiers = {
"is_listed_on_facebook": 2,
"is_listed_on_instagram": 1,
"is_listed_on_personal_blog": 5,
"is_listed_on_pinterest": 4,
"is_listed_on_youtube": 3
}
for channel in ["is_listed_on_facebook", "is_listed_on_instagram", "is_listed_on_personal_blog",
"is_listed_on_pinterest", "is_listed_on_youtube"]:
dbconnection = get_database_connection()
cursor = dbconnection.cursor()
cursor.execute("SELECT * FROM " + channel + " WHERE influencer_identifier = %s AND listing_on = 1;",
(influencer_identifier,))
print(cursor.statement)
results = cursor.fetchone()
keys = cursor.column_names
cursor.close()
if results is not None:
cursor = dbconnection.cursor()
cursor.execute(
"""SELECT content_type_identifier FROM content_of_channel WHERE influencer_identifier=%s and channel_identifier=%s;""",
(
influencer_identifier, channel_identifiers[channel]
))
content_types = list()
for content_type in cursor.fetchall():
content_types.append(get_content_type_name(content_type[0]))
try:
channel_data = {}
for index in range(0, len(keys)):
try:
channel_data[keys[index]] = results[index]
except KeyError:
break
channel_data["content_types"] = content_types
influencer_data[channel] = channel_data
except IndexError:
pass
cursor = dbconnection.cursor()
cursor.execute("SELECT * FROM influencer WHERE influencer_identifier = %s", (influencer_identifier,))
results = cursor.fetchone()
keys = cursor.column_names
cursor.close()
for index in range(0, len(keys)):
influencer_data[keys[index]] = results[index]
cursor = dbconnection.cursor()
cursor.execute("SELECT topic_identifier FROM influencer_covers_topic WHERE influencer_identifier = %s",
(influencer_identifier,))
results = cursor.fetchall()
topics = []
try:
for entry in results:
topics.append(get_topic_name(entry[0]))
except IndexError:
pass
influencer_data["topics"] = topics
cursor.close()
cursor = dbconnection.cursor()
cursor.execute("SELECT language_identifer FROM influencer_channel_language WHERE influencer_identifier = %s",
(influencer_identifier,))
results = cursor.fetchall()
languages = []
try:
for entry in results:
languages.append(get_language_name(entry[0]))
except IndexError:
pass
influencer_data["languages"] = languages
cursor.close()
cursor = dbconnection.cursor()
cursor.execute("SELECT deal_identifier FROM influencer_deal WHERE influencer_identifier = %s",
(influencer_identifier,))
results = cursor.fetchall()
deals = []
try:
for entry in results:
deals.append(get_deal_name(entry[0]))
except IndexError:
pass
influencer_data["deal_types"] = deals
cursor.close()
cursor = dbconnection.cursor()
cursor.execute("SELECT * FROM influencer_had_previous_cooperation WHERE influencer_identifier = %s;", (
influencer_identifier,
))
results = cursor.fetchall()
keys = cursor.column_names
return_list = []
for dataset in results:
temp_dict = {}
for key_index in range(0, len(keys)):
temp_dict[keys[key_index]] = dataset[key_index]
return_list.append(temp_dict)
influencer_data["cooperations"] = return_list
cursor.close()
dbconnection.close()
influencer_data["profile_pictures"] = get_profile_pictures_sources(user_id=influencer_identifier)
print(influencer_data)
return influencer_data
|
996,301 | 2e5dddc9a9ea8ab8b4e0e788146b95140afd0004 | # Generated by Django 3.0.7 on 2020-09-27 19:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('manag_app', '0012_payment_ids'),
]
operations = [
migrations.DeleteModel(
name='Payment_ids',
),
]
|
996,302 | 75e5bf22ce8e059ed11785c600bfca4eae61a4cc | # https://github.com/keras-team/keras/blob/master/examples/mnist_mlp.py
# https://nextjournal.com/schmudde/ml4a-mnist
# https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py (CONVOLUTIONAL)
# https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/01_Simple_Linear_Model.ipynb (TF)
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import matplotlib.pyplot as plt
import numpy as np
import random
'''
TRY:
-> Loss functions:
Log loss
KL Divergence
Mean Squared Error
Categorical_crossentropy (atual)
-> Optimizers:
RMSprop (atual)
'''
batch_size = 128
num_classes = 10 # {0, 1, 2, 3, 4, 5, 6, 7, 8, 9} = 10
epochs = 20
(x_train, y_train), (x_test, y_test) = mnist.load_data()
plt.figure()
numbers = np.concatenate([np.concatenate([x_train[i] for i in [int(random.random() * len(x_train)) for i in range(9)]], axis=1) for i in range(9)], axis=0)
plt.imshow(numbers, cmap='gist_gray', interpolation='none')
plt.xticks([])
plt.yticks([])
plt.xlabel('Alguns números do MNIST')
plt.show()
'''
r = int(random.random() * len(x_train))
numbers2 = np.array([x_train[x] for x in range(r-5, r)])
plt.xlabel('Alguns números do MNIST, começando de ' + str(r-5))
plt.imshow(numbers2.reshape(int(numbers2.size/28), 28), cmap='gist_gray')
'''
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255 # Cores [0, 255] -> [0, 1]
x_test /= 255
print(x_train.shape[0], 'números/arquivos de treino')
print(x_test.shape[0], 'números/arquivos de teste')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
classifier = Sequential()
classifier.add(Dense(512, activation='relu', input_shape=(784,)))
classifier.add(Dropout(0.2))
classifier.add(Dense(512, activation='relu'))
classifier.add(Dropout(0.2))
classifier.add(Dense(num_classes, activation='softmax'))
#classifier.summary()
classifier.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
classifier.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
score = classifier.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1]) |
996,303 | 148cebc47169053d5cd0d2196bbb1ddf84d5b50b | # Generated by Django 3.0.5 on 2020-10-02 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tsdemo', '0013_auto_20201002_1428'),
]
operations = [
migrations.AlterField(
model_name='apitest',
name='tspara',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='请求参数'),
),
]
|
996,304 | 4d62ab12dcc0689dab41aa8c9ba95f9659e0c205 | class stars:
def __init__(self, owner, star_data):
self.__owner = owner
self.__star_data = star_data
self.__stars = self.__parse()
self.__set = set(self.__stars)
def __parse(self):
colon_index = self.__star_data.index(':')
before_colon = self.__star_data[:colon_index]
star_definitions = before_colon.split('|')
split_definitions = [star_definition.split(',') for star_definition in star_definitions]
star_coordinates = [(int(x), int(y)) for x, y, color in split_definitions]
return star_coordinates
def unlock(self, key_object):
assert key_object.__class__ is key
owner, constellations = key_object.get_data()
master_list = self.__stars
master_set = self.__set
answers = []
print 'Stars Owner =', self.__owner
print ' Key Owner =', owner
found = 0
for number, current_constellation in enumerate(constellations):
answers = self.__find_constellations(current_constellation, master_list)
for answer in answers:
print ' Constellation', number + 1, 'was found at the following coordinates:'
print ' ' + str(answer)[1:-1]
found += 1
if not found:
print ' No constellations could be found.'
def __find_constellations(self, constellation, sky):
answers = []
for star in sky:
x_diff = star[0] - constellation[0][0]
y_diff = star[1] - constellation[0][1]
new_constellation = set([(x + x_diff, y + y_diff) for x, y in constellation])
same = self.__set & new_constellation
if len(same) == len(constellation):
answers.append(list(same))
return answers
class key:
def __init__(self, owner, star_data):
self.__owner = owner
self.__star_data = star_data
self.__constellations = self.__parse()
def __parse(self):
colon_index = self.__star_data.index(':')
after_colon = self.__star_data[colon_index + 1:]
constellation_definitions = after_colon.split('|')
parsed_definitions = [self.__parse_definition(constellation_definition) for constellation_definition in constellation_definitions]
pruned_definitions = [self.__prune_definition(parsed_definition) for parsed_definition in parsed_definitions]
return pruned_definitions
def __parse_definition(self, constellation_definition):
bang_index = constellation_definition.index('!')
after_bang = constellation_definition[bang_index + 1:]
segment_definitions = after_bang.split('#')
star_definitions = [star_definition for segment_definition in segment_definitions for star_definition in segment_definition.split(';')]
split_definitions = [star_definition.split(',') for star_definition in star_definitions]
star_coordinates = [(int(x), int(y)) for x, y in split_definitions]
return star_coordinates
def __prune_definition(self, parsed_definition):
stars = parsed_definition
index = 0
while index < len(stars):
delete = []
for pointer in range(index + 1, len(stars)):
if self.__equals(stars[index], stars[pointer]):
delete.append(pointer)
delete.reverse()
for pointer in delete:
del stars[pointer]
index += 1
return stars
def __equals(self, star1, star2):
return star1[0] == star2[0] and star1[1] == star2[1]
def get_data(self):
return self.__owner, self.__constellations
|
996,305 | 13d50f4473796c8403383fa91b0a9c2e0bbed2f5 | from .interface import Interface
from settings import tooltip_width, tooltip_height, tooltip_x ,tooltip_icon, tooltip_margin
import environment
import pygame
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 15)
class ToolTip(Interface):
def __init__(self, map_, y, text):
Interface.__init__(self, map_, tooltip_x, y, tooltip_width, tooltip_height, tooltip_icon)
self.screen = environment.Game.screen
self.text = text
# self.width = math.floor(tower_select.width * .90)
# self.height = math.floor(tower_select.height * .1)
# self.icon = pygame.transform.scale( pygame.image.load('images/start_button.png'), (self.width, self.height))
def render(self):
# self.x = x
# self.y = y
self.screen.blit(self.icon, (self.x, self.y))
# textsurface = myfont.render(f'{self.text}', False, (0, 0, 0))
# self.screen.blit(textsurface,(self.x + tooltip_margin, (self.y + tooltip_margin)))
pos = (self.x + tooltip_margin, (self.y + tooltip_margin))
self.blit_text(pos)
# pygame.transform.scale(self.icon, (self.width, self.height))
def handle_mouse_down(self, x, y):
self.map.tower_select.tooltip = None
# if ( x <= self.x + self.width and x >= self.x and
# y <= self.y + self.height and y >= self.y and
# not self.map.round.is_started):
# self.map.round.start()
def blit_text(self, pos, color=pygame.Color('black')):
words = [word.split(' ') for word in self.text.splitlines()] # 2D array where each row is a list of words.
space = myfont.size(' ')[0] # The width of a space.
max_width, max_height = self.screen.get_size()
x, y = pos
for line in words:
for word in line:
word_surface = myfont.render(word, 0, color)
word_width, word_height = word_surface.get_size()
if x + word_width >= max_width:
x = pos[0] # Reset the x.
y += word_height # Start on new row.
self.screen.blit(word_surface, (x, y))
x += word_width + space
x = pos[0] # Reset the x.
y += word_height # Start on new row. |
996,306 | 5e0cef11f0e77f9a694cfc104ea4dc481eaa6ddd | import sys
sys.path.append('/home/aistudio/external-libraries')
#图像增强
import os
import paddlex as pdx
from paddlex.det import transforms
train_transforms = transforms.Compose([
transforms.Normalize()
])
eval_transforms = transforms.Compose([
transforms.Normalize()
])
os.chdir('/home/aistudio/work')
train_dataset = pdx.datasets.VOCDetection(
data_dir='/home/aistudio/work/',
file_list='./train_list_voc.txt',
label_list='./labels.txt',
transforms=train_transforms,
shuffle=True)
num_classes = len(train_dataset.labels) + 1
model = pdx.det.FasterRCNN(num_classes=num_classes)
model.train(
num_epochs=12,
train_dataset=train_dataset,
train_batch_size=2,
# eval_dataset=eval_dataset,
learning_rate=0.0025,
lr_decay_epochs=[8, 11],
save_interval_epochs=1,
save_dir='output/faster_rcnn_r50_fpn',
use_vdl=True) |
996,307 | daf5f0d049d0852eda58dd10b1fa66d0fee38fb4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 01:34:54 2019
@author: edwin
"""
# It is very impractical to find all Pythagorean triples in the required range by brute force, but there is another way.
# We generate all primitive Pythagorean triples as follows:
# Pick m>n s.t. m+n odd, gcd(m,n)=1. Then set a=m^2-n^2, b=2mn, c=m^2+n^2. It is clear that this gives
# a primitive Pythagorean triple, but not that it gives all of them. To see this, suppose that (a,b,c) is
# a primitive Pythagorean triple. Exactly one of a,b is even (squares can only be 0 or 1 mod 4 preventing both
# from being odd, and comprimality prevents both from being even). WLOG b is even. We note that
# c^2-a^2 = b^2 = (a+c)*(c-a)
# where a,c are necessarily both odd. We note also that (a+c),(c-a) share no odd factors: any factor shared by
# (a+c) and (c-a) is also shared by 2a and 2c (addition and subtraction), which we know share no factors
# apart from the factor of 2 by assumption. This means that each prime power in b^2 goes entirely into
# either (a+c) or (c-a), so in fact (a+c) and (c-a) are both twice a square. Therefore we write
# a+c = 2*m^2, c-a = 2*n^2
# and therefore a=m^2-n^2, b=2mn, c=m^2+n^2. All that remains to show is gcd(m,n) = 1 (trivial from a,c coprime)
# and m+n odd (m,n cannot have the same parity since this again violates a,c coprime). So we are done.
# All that remains now is to implement this knowlege in an algorithm:
import gcd
import math
def main():
limit = 1500000
sumCount = {}
# We have a+b+c = 2m^2 + 2mn > 2m^2, so we need only test check m up to floor(sqrt(limit/2))
for m in range(1,math.floor(math.sqrt(limit/2))):
# m,n have different parity
n = 1+(m%2)
while n<m:
#Establishing coprimality condition
if gcd.main(m,n) == 1:
#Taking care of nonprimitive triples
sidelength = 2*m*m+2*m*n
mult = 1
while sidelength*mult <= limit:
sumCount[mult*(2*m*m+2*m*n)]= 1+sumCount.get(mult*(2*m*m+2*m*n),0)
mult += 1
n += 2
total = 0
for i in sumCount:
if sumCount.get(i) == 1:
total += 1
return total
|
996,308 | 285cdbc2695f3806ece1dd6169b292cc7a2b47a4 | from .staff_factory import AbstractStaffFactory
from .manager_factory import ManagerFactory
from .salesperson_factory import SalespersonFactory
from .receptionist_factory import ReceptionistFactory
from src.user.employee import Staff
import random as rd
class RandStaffFactory(AbstractStaffFactory):
def make(self, name: str) -> Staff:
factory = self._createFactory()
staff = factory.make(name)
staff.active = self._randActiveStatus()
return staff
def make_many(self, names: list[str]) -> list[Staff]:
managers = []
for name in names:
managers.append(self.make(name))
return managers
def _createFactory(self) -> Staff:
staff_type = rd.randrange(3)
if staff_type == 0:
return ManagerFactory()
elif staff_type == 1:
return SalespersonFactory()
else:
return ReceptionistFactory()
def _randActiveStatus(self) -> bool:
status = rd.randrange(2)
if status == 0:
return True
else:
return False
|
996,309 | a0eb1e91c2ee24e22ed90a9178c392bacd0a560c | import enum
from typing import Optional
import numpy as np
import scipy.linalg
import scipy.odr
import scipy.optimize
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
_gaussian_fwhm_factor = 2 * (2 * np.log(2)) ** 0.5 # FWHM = _gaussian_fwhm_factor * sigma
def LorentzPeak(x, A, x0, fwhm, y0):
return A / (1 + (2 * (x - x0) / fwhm) ** 2) + y0
def GaussPeak(x, A, x0, fwhm, y0):
sigma2 = (fwhm / _gaussian_fwhm_factor) ** 2
return A / (2 * np.pi * sigma2) ** 0.5 * np.exp(- (x - x0) ** 2 / (2 * sigma2)) + y0
def AsymmetricLorentzPeak(x, A, x0, fwhm1, fwhm2, y0):
result = np.empty_like(x)
result[x <= x0] = LorentzPeak(x[x <= x0], A, x0, fwhm1, y0)
result[x > x0] = LorentzPeak(x[x > x0], A, x0, fwhm2, y0)
return result
def AsymmetricGaussPeak(x, A, x0, fwhm1, fwhm2, y0):
result = np.empty_like(x)
result[x <= x0] = GaussPeak(x[x <= x0], A, x0, fwhm1, y0)
result[x > x0] = GaussPeak(x[x > x0], A, x0, fwhm2, y0)
return result
class PeakType(enum.Enum):
Lorentzian = LorentzPeak
Gaussian = GaussPeak
AsymmetricLorentzian = AsymmetricLorentzPeak
AsymmetricGaussian = AsymmetricGaussPeak
def fitpeak(x: np.ndarray, y: np.ndarray, dy: Optional[np.ndarray], dx: Optional[np.ndarray],
peaktype: PeakType = PeakType.Lorentzian):
# guess parameters
parameter_guess = [
y.max() - y.min(), # amplitude
0.5 * (x.max() + x.min()), # center
(x.max() - x.min()), # FWHM
y.min(), # offset
]
bounds = [
(0, x.min(), 0, -np.inf), # lower bounds
(np.inf, x.max(), np.inf, np.inf), # upper bounds
]
diff_step = [(y.max() - y.min())*1e-4, (x.max()-x.min())*1e-4, (x.max() - x.min())*1e-4, (y.max()-y.min())*1e-4]
if peaktype in [PeakType.AsymmetricGaussian, PeakType.AsymmetricLorentzian]:
parameter_guess = parameter_guess[:3] + [parameter_guess[2]] + parameter_guess[3:]
bounds = [
bounds[0][:3] + (bounds[0][2],) + bounds[0][3:],
bounds[1][:3] + (bounds[1][2],) + bounds[1][3:]
]
diff_step = diff_step[:3] + [diff_step[2]] + diff_step[3:]
if dx is None:
# do an ordinary least-squares fit with/without error bars
result = scipy.optimize.least_squares(
fun=(lambda parameters, x, y, dy: (y - peaktype(x, *parameters)) / dy) if dy is not None else (
lambda parameters, x, y: y - peaktype(x, *parameters)),
x0=parameter_guess,
x_scale='jac',
diff_step=diff_step,
bounds=bounds,
method='trf',
args=(x, y, dy) if dy is not None else (x, y),
)
values = result.x
_, s, VT = scipy.linalg.svd(result.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(result.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
try:
covar = np.dot(VT.T / s ** 2, VT)
except ValueError:
covar = np.ones((len(result.x), len(result.x))) * np.nan
return values, covar, lambda x: peaktype(x, *values)
elif (dx is not None) and (dy is not None):
# orthogonal distance least-squares
model = scipy.odr.Model(lambda params, x: peaktype(x, *params))
data = scipy.odr.RealData(x, y, dx, dy)
odr = scipy.odr.ODR(data, model, parameter_guess)
result = odr.run()
return result.beta, result.cov_beta, lambda x: peaktype(x, *result.beta)
else:
raise ValueError('Cannot fit with x errors present and y errors absent.')
|
996,310 | 509141aa05d28e038ed75be9896b749846581fc8 | from random import randint
from time import sleep
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 5. Tente adivinhar...')
print('-=-' * 20)
n = randint(0, 5)
resposta = int(input('Em que número eu pensei? '))
print('PROCESSANDO...')
sleep(0.9)
if n == resposta:
print('PERDI! Foi no número {} que pensei.'.format(n))
else:
print('GANHEI! Eu pensei no número {} e não no {}.'.format(n, resposta))
|
996,311 | 826bae511f0b0977bbcd37af13770db4e7c762b3 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 29 15:34:05 2019
@author: NUS
"""
import numpy as np
#####################################################K-means clustering###########################################
# randomly select the centroids
import random
import math
def randCent(data,k):
"""random gengerate the centroids
parameters
------------
data: <class 'numpy.ndarray'>, shape=[n_samples, n_features], input data to be randomly select centorids.
k: <class 'int'> the number of the centroids
------------
return
centroids: <class 'numpy.ndarray'>, shape=[k, n_features]
"""
index = set()
while len(index) != k:
index.add(random.randint(0, data.shape[0]))
index = list(index)
centroids = data[index]
return centroids
def KMeans(data,k):
""" KMeans algorithm
parameters
------------
data: <class 'numpy.ndarray'>, shape=[n_samples, n_features], input data to be randomly select centorids.
k: <class 'int'> the number of the centroids
------------
return
centroids: <class 'numpy.ndarray'>, shape=[k, n_features]
clusterAssment: <class 'numpy.matrix'>, shape=[n_samples, 1]
"""
centroids = randCent(data, k)
clusterAssment = np.zeros(shape=(data.shape[0],1))
flag = True
# time_of_it = 0
while flag:
# print("enter " + str(time_of_it) + " \'s iteration!")
# time_of_it += 1
# print("centers:")
# print(centroids)
for i in range(data.shape[0]):
point = data[i]
distance_list = [0.0 for n in range(0, k)]
distance = np.array(distance_list)
for j in range(k):
temp_centroid = centroids[j]
# print("point")
# print(point)
# print("center")
# print(temp_centroid)
distance[j] = math.pow((temp_centroid[0] - point[0]), 2) + math.pow((temp_centroid[1] - point[1]), 2)
# print("distance")
# print(distance)
# print("index")
cluster = distance.argmin()
clusterAssment[i] = cluster
# print(cluster)
new_centroids = np.zeros(shape=(k,data.shape[1]))
for i in range(k):
points_index = np.where(clusterAssment==i)
points = data[points_index[0]]
new_centroids[i] = np.mean(points, axis=0)
# print("point_index")
# print(points_index)
# print("new_centroids:")
# print(new_centroids)
# print("difference:" + str(np.sum(np.abs(new_centroids - centroids))))
# print(new_centroids.shape)
# print(centroids.shape)
# print(np.abs(new_centroids - centroids))
if np.sum(np.abs(new_centroids - centroids)) < 1e-10:
flag = False
centroids = new_centroids
return centroids, clusterAssment
##############################################color #############################################################
def colors(k):
""" generate the color for the plt.scatter
parameters
------------
k: <class 'int'> the number of the centroids
------------
return
ret: <class 'list'>, len = k
"""
ret = []
for i in range(k):
ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))
return ret
############################################mean shift clustering##############################################
from collections import defaultdict
import warnings
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._joblib import Parallel
from sklearn.utils._joblib import delayed
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
"""mean shift cluster for single seed.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
nbrs: NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
max_iter: max interations
return:
mean(center) and the total number of pixels which is in the sphere
"""
# For each seed, climb gradient until convergence or max_iter
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=None):
"""pipline of mean shift clustering
Parameters
----------
X : array-like, shape=[n_samples, n_features]
bandwidth: the radius of the sphere
seeds: whether use the bin seed algorithm to generate the initial seeds
bin_size: bin_size = bandwidth.
min_bin_freq: for each bin_seed, the minimize of the points should cover
return:
cluster_centers <class 'numpy.ndarray'> shape=[n_cluster, n_features] ,labels <class 'list'>, len = n_samples
"""
print(get_bin_seeds(X, bin_seeding))
# find the points within the sphere
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
##########################################parallel computing############################
center_intensity_dict = {}
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)#
##########################################parallel computing############################
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""generate the initial seeds, in order to use the parallel computing
Parameters
----------
X : array-like, shape=[n_samples, n_features]
bin_size: bin_size = bandwidth.
min_bin_freq: for each bin_seed, the minimize of the points should cover
return:
bin_seeds: dict-like bin_seeds = {key=seed, key_value=he total number of pixels which is in the sphere }
"""
# Bin points
pseudo_labels = np.round(X/bin_size)
bin_seeds = {}
for i in pseudo_labels:
i = tuple(i)
if i not in bin_seeds:
bin_seeds[i] = 0
else:
bin_seeds[i] += 1
return bin_seeds
|
996,312 | 0e1818fa38ea961ac4ebcaabc7043da830d13af2 | # Authors:
# Trevor Perrin
# Google - added reqCAs parameter
# Google (adapted by Sam Rushing and Marcelo Fernandez) - NPN support
# Google - FALLBACK_SCSV
# Dimitris Moraitis - Anon ciphersuites
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
# Hubert Kario - complete refactoring of key exchange methods, addition
# of ECDH support
#
# See the LICENSE file for legal information regarding use of this file.
"""
MAIN CLASS FOR TLS LITE (START HERE!).
"""
from __future__ import division
import random
import time
import socket
from itertools import chain
from .utils.compat import formatExceptionTrace
from .tlsrecordlayer import TLSRecordLayer
from .session import Session
from .constants import *
from .utils.cryptomath import derive_secret, getRandomBytes, HKDF_expand_label
from .utils.dns_utils import is_valid_hostname
from .utils.lists import getFirstMatching
from .errors import *
from .messages import *
from .mathtls import *
from .handshakesettings import HandshakeSettings, KNOWN_VERSIONS, CURVE_ALIASES
from .handshakehashes import HandshakeHashes
from .utils.tackwrapper import *
from .utils.deprecations import deprecated_params
from .keyexchange import KeyExchange, RSAKeyExchange, DHE_RSAKeyExchange, \
ECDHE_RSAKeyExchange, SRPKeyExchange, ADHKeyExchange, \
AECDHKeyExchange, FFDHKeyExchange, ECDHKeyExchange
from .handshakehelpers import HandshakeHelpers
from .utils.cipherfactory import createAESCCM, createAESCCM_8, \
createAESGCM, createCHACHA20
class TLSConnection(TLSRecordLayer):
"""
This class wraps a socket and provides TLS handshaking and data transfer.
To use this class, create a new instance, passing a connected
socket into the constructor. Then call some handshake function.
If the handshake completes without raising an exception, then a TLS
connection has been negotiated. You can transfer data over this
connection as if it were a socket.
This class provides both synchronous and asynchronous versions of
its key functions. The synchronous versions should be used when
writing single-or multi-threaded code using blocking sockets. The
asynchronous versions should be used when performing asynchronous,
event-based I/O with non-blocking sockets.
Asynchronous I/O is a complicated subject; typically, you should
not use the asynchronous functions directly, but should use some
framework like asyncore or Twisted which TLS Lite integrates with
(see
:py:class:`~.integration.tlsasyncdispatchermixin.TLSAsyncDispatcherMixIn`).
"""
def __init__(self, sock):
"""Create a new TLSConnection instance.
:param sock: The socket data will be transmitted on. The
socket should already be connected. It may be in blocking or
non-blocking mode.
:type sock: socket.socket
"""
TLSRecordLayer.__init__(self, sock)
self.serverSigAlg = None
self.ecdhCurve = None
self.dhGroupSize = None
self.extendedMasterSecret = False
self._clientRandom = bytearray(0)
self._serverRandom = bytearray(0)
self.next_proto = None
# whether the CCS was already sent in the connection (for hello retry)
self._ccs_sent = False
# if and how big is the limit on records peer is willing to accept
# used only for TLS 1.2 and earlier
self._peer_record_size_limit = None
self._pha_supported = False
def keyingMaterialExporter(self, label, length=20):
"""Return keying material as described in RFC 5705
:type label: bytearray
:param label: label to be provided for the exporter
:type length: int
:param length: number of bytes of the keying material to export
"""
if label in (b'server finished', b'client finished',
b'master secret', b'key expansion'):
raise ValueError("Forbidden label value")
if self.version < (3, 1):
raise ValueError("Supported only in TLSv1.0 and later")
elif self.version < (3, 3):
return PRF(self.session.masterSecret, label,
self._clientRandom + self._serverRandom,
length)
elif self.version == (3, 3):
if self.session.cipherSuite in CipherSuite.sha384PrfSuites:
return PRF_1_2_SHA384(self.session.masterSecret, label,
self._clientRandom + self._serverRandom,
length)
else:
return PRF_1_2(self.session.masterSecret, label,
self._clientRandom + self._serverRandom,
length)
elif self.version == (3, 4):
prf = 'sha256'
if self.session.cipherSuite in CipherSuite.sha384PrfSuites:
prf = 'sha384'
secret = derive_secret(self.session.exporterMasterSecret, label,
None, prf)
ctxhash = secureHash(bytearray(b''), prf)
return HKDF_expand_label(secret, b"exporter", ctxhash, length, prf)
else:
raise AssertionError("Unknown protocol version")
#*********************************************************
# Client Handshake Functions
#*********************************************************
@deprecated_params({"async_": "async"},
"'{old_name}' is a keyword in Python 3.7, use"
"'{new_name}'")
def handshakeClientAnonymous(self, session=None, settings=None,
checker=None, serverName=None,
async_=False):
"""Perform an anonymous handshake in the role of client.
This function performs an SSL or TLS handshake using an
anonymous Diffie Hellman ciphersuite.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
:type session: ~tlslite.session.Session
:param session: A TLS session to attempt to resume. If the
resumption does not succeed, a full handshake will be
performed.
:type settings: ~tlslite.handshakesettings.HandshakeSettings
:param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
:type checker: ~tlslite.checker.Checker
:param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
:type serverName: string
:param serverName: The ServerNameIndication TLS Extension.
:type async_: bool
:param async_: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
:rtype: None or an iterable
:returns: If 'async_' is True, a generator object will be
returned.
:raises socket.error: If a socket error occurs.
:raises tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
:raises tlslite.errors.TLSAlert: If a TLS alert is signalled.
:raises tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = self._handshakeClientAsync(anonParams=(True),
session=session,
settings=settings,
checker=checker,
serverName=serverName)
if async_:
return handshaker
for result in handshaker:
pass
@deprecated_params({"async_": "async"},
"'{old_name}' is a keyword in Python 3.7, use"
"'{new_name}'")
def handshakeClientSRP(self, username, password, session=None,
settings=None, checker=None,
reqTack=True, serverName=None,
async_=False):
"""Perform an SRP handshake in the role of client.
This function performs a TLS/SRP handshake. SRP mutually
authenticates both parties to each other using only a
username and password. This function may also perform a
combined SRP and server-certificate handshake, if the server
chooses to authenticate itself with a certificate chain in
addition to doing SRP.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
:type username: bytearray
:param username: The SRP username.
:type password: bytearray
:param password: The SRP password.
:type session: ~tlslite.session.Session
:param session: A TLS session to attempt to resume. This
session must be an SRP session performed with the same username
and password as were passed in. If the resumption does not
succeed, a full SRP handshake will be performed.
:type settings: ~tlslite.handshakesettings.HandshakeSettings
:param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
:type checker: ~tlslite.checker.Checker
:param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
:type reqTack: bool
:param reqTack: Whether or not to send a "tack" TLS Extension,
requesting the server return a TackExtension if it has one.
:type serverName: string
:param serverName: The ServerNameIndication TLS Extension.
:type async_: bool
:param async_: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
:rtype: None or an iterable
:returns: If 'async_' is True, a generator object will be
returned.
:raises socket.error: If a socket error occurs.
:raises tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
:raises tlslite.errors.TLSAlert: If a TLS alert is signalled.
:raises tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
# TODO add deprecation warning
if isinstance(username, str):
username = bytearray(username, 'utf-8')
if isinstance(password, str):
password = bytearray(password, 'utf-8')
handshaker = self._handshakeClientAsync(srpParams=(username, password),
session=session, settings=settings, checker=checker,
reqTack=reqTack, serverName=serverName)
# The handshaker is a Python Generator which executes the handshake.
# It allows the handshake to be run in a "piecewise", asynchronous
# fashion, returning 1 when it is waiting to able to write, 0 when
# it is waiting to read.
#
# If 'async_' is True, the generator is returned to the caller,
# otherwise it is executed to completion here.
if async_:
return handshaker
for result in handshaker:
pass
@deprecated_params({"async_": "async"},
"'{old_name}' is a keyword in Python 3.7, use"
"'{new_name}'")
def handshakeClientCert(self, certChain=None, privateKey=None,
session=None, settings=None, checker=None,
nextProtos=None, reqTack=True, serverName=None,
async_=False, alpn=None):
"""Perform a certificate-based handshake in the role of client.
This function performs an SSL or TLS handshake. The server
will authenticate itself using an X.509 certificate
chain. If the handshake succeeds, the server's certificate
chain will be stored in the session's serverCertChain attribute.
Unless a checker object is passed in, this function does no
validation or checking of the server's certificate chain.
If the server requests client authentication, the
client will send the passed-in certificate chain, and use the
passed-in private key to authenticate itself. If no
certificate chain and private key were passed in, the client
will attempt to proceed without client authentication. The
server may or may not allow this.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
:type certChain: ~tlslite.x509certchain.X509CertChain
:param certChain: The certificate chain to be used if the
server requests client authentication.
:type privateKey: ~tlslite.utils.rsakey.RSAKey
:param privateKey: The private key to be used if the server
requests client authentication.
:type session: ~tlslite.session.Session
:param session: A TLS session to attempt to resume. If the
resumption does not succeed, a full handshake will be
performed.
:type settings: ~tlslite.handshakesettings.HandshakeSettings
:param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
:type checker: ~tlslite.checker.Checker
:param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
:type nextProtos: list of str
:param nextProtos: A list of upper layer protocols ordered by
preference, to use in the Next-Protocol Negotiation Extension.
:type reqTack: bool
:param reqTack: Whether or not to send a "tack" TLS Extension,
requesting the server return a TackExtension if it has one.
:type serverName: string
:param serverName: The ServerNameIndication TLS Extension.
:type async_: bool
:param async_: If False, this function will block until the
handshake is completed. If True, this function will return a
generator. Successive invocations of the generator will
return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or will raise StopIteration if
the handshake operation is completed.
:type alpn: list of bytearrays
:param alpn: protocol names to advertise to server as supported by
client in the Application Layer Protocol Negotiation extension.
Example items in the array include b'http/1.1' or b'h2'.
:rtype: None or an iterable
:returns: If 'async_' is True, a generator object will be
returned.
:raises socket.error: If a socket error occurs.
:raises tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
:raises tlslite.errors.TLSAlert: If a TLS alert is signalled.
:raises tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
handshaker = \
self._handshakeClientAsync(certParams=(certChain, privateKey),
session=session, settings=settings,
checker=checker,
serverName=serverName,
nextProtos=nextProtos,
reqTack=reqTack,
alpn=alpn)
# The handshaker is a Python Generator which executes the handshake.
# It allows the handshake to be run in a "piecewise", asynchronous
# fashion, returning 1 when it is waiting to able to write, 0 when
# it is waiting to read.
#
# If 'async_' is True, the generator is returned to the caller,
# otherwise it is executed to completion here.
if async_:
return handshaker
for result in handshaker:
pass
def _handshakeClientAsync(self, srpParams=(), certParams=(), anonParams=(),
session=None, settings=None, checker=None,
nextProtos=None, serverName=None, reqTack=True,
alpn=None):
handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams,
certParams=certParams,
anonParams=anonParams,
session=session,
settings=settings,
serverName=serverName,
nextProtos=nextProtos,
reqTack=reqTack,
alpn=alpn)
for result in self._handshakeWrapperAsync(handshaker, checker):
yield result
def _handshakeClientAsyncHelper(self, srpParams, certParams, anonParams,
session, settings, serverName, nextProtos,
reqTack, alpn):
self._handshakeStart(client=True)
#Unpack parameters
srpUsername = None # srpParams[0]
password = None # srpParams[1]
clientCertChain = None # certParams[0]
privateKey = None # certParams[1]
# Allow only one of (srpParams, certParams, anonParams)
if srpParams:
assert(not certParams)
assert(not anonParams)
srpUsername, password = srpParams
if certParams:
assert(not srpParams)
assert(not anonParams)
clientCertChain, privateKey = certParams
if anonParams:
assert(not srpParams)
assert(not certParams)
#Validate parameters
if srpUsername and not password:
raise ValueError("Caller passed a username but no password")
if password and not srpUsername:
raise ValueError("Caller passed a password but no username")
if clientCertChain and not privateKey:
raise ValueError("Caller passed a cert_chain but no privateKey")
if privateKey and not clientCertChain:
raise ValueError("Caller passed a privateKey but no cert_chain")
if reqTack:
if not tackpyLoaded:
reqTack = False
if not settings or not settings.useExperimentalTackExtension:
reqTack = False
if nextProtos is not None:
if len(nextProtos) == 0:
raise ValueError("Caller passed no nextProtos")
if alpn is not None and not alpn:
raise ValueError("Caller passed empty alpn list")
# reject invalid hostnames but accept empty/None ones
if serverName and not is_valid_hostname(serverName):
raise ValueError("Caller provided invalid server host name: {0}"
.format(serverName))
# Validates the settings and filters out any unsupported ciphers
# or crypto libraries that were requested
if not settings:
settings = HandshakeSettings()
settings = settings.validate()
self.sock.padding_cb = settings.padding_cb
if clientCertChain:
if not isinstance(clientCertChain, X509CertChain):
raise ValueError("Unrecognized certificate type")
if "x509" not in settings.certificateTypes:
raise ValueError("Client certificate doesn't match "\
"Handshake Settings")
if session:
# session.valid() ensures session is resumable and has
# non-empty sessionID
if not session.valid():
session = None #ignore non-resumable sessions...
elif session.resumable:
if session.srpUsername != srpUsername:
raise ValueError("Session username doesn't match")
if session.serverName != serverName:
raise ValueError("Session servername doesn't match")
#Add Faults to parameters
if srpUsername and self.fault == Fault.badUsername:
srpUsername += bytearray(b"GARBAGE")
if password and self.fault == Fault.badPassword:
password += bytearray(b"GARBAGE")
# Tentatively set the client's record version.
# We'll use this for the ClientHello, and if an error occurs
# parsing the Server Hello, we'll use this version for the response
# in TLS 1.3 it always needs to be set to TLS 1.0
self.version = \
(3, 1) if settings.maxVersion > (3, 3) else settings.maxVersion
# OK Start sending messages!
# *****************************
# Send the ClientHello.
for result in self._clientSendClientHello(settings, session,
srpUsername, srpParams, certParams,
anonParams, serverName, nextProtos,
reqTack, alpn):
if result in (0,1): yield result
else: break
clientHello = result
#Get the ServerHello.
for result in self._clientGetServerHello(settings, session,
clientHello):
if result in (0,1): yield result
else: break
serverHello = result
cipherSuite = serverHello.cipher_suite
# Check the serverHello.random if it includes the downgrade protection
# values as described in RFC8446 section 4.1.3
# For TLS1.3
if (settings.maxVersion > (3, 3) and self.version <= (3, 3)) and \
(serverHello.random[-8:] == TLS_1_2_DOWNGRADE_SENTINEL or
serverHello.random[-8:] == TLS_1_1_DOWNGRADE_SENTINEL):
for result in self._sendError(AlertDescription.illegal_parameter,
"Connection terminated because "
"of downgrade protection."):
yield result
# For TLS1.2
if settings.maxVersion == (3, 3) and self.version < (3, 3) and \
serverHello.random[-8:] == TLS_1_1_DOWNGRADE_SENTINEL:
for result in self._sendError(AlertDescription.illegal_parameter,
"Connection terminated because "
"of downgrade protection."):
yield result
# if we're doing tls1.3, use the new code as the negotiation is much
# different
ext = serverHello.getExtension(ExtensionType.supported_versions)
if ext and ext.version > (3, 3):
for result in self._clientTLS13Handshake(settings, session,
clientHello,
clientCertChain,
privateKey,
serverHello):
if result in (0, 1):
yield result
else:
break
if result in ["finished", "resumed_and_finished"]:
self._handshakeDone(resumed=(result == "resumed_and_finished"))
self._serverRandom = serverHello.random
self._clientRandom = clientHello.random
return
else:
raise Exception("unexpected return")
# Choose a matching Next Protocol from server list against ours
# (string or None)
nextProto = self._clientSelectNextProto(nextProtos, serverHello)
# Check if server selected encrypt-then-MAC
if serverHello.getExtension(ExtensionType.encrypt_then_mac):
self._recordLayer.encryptThenMAC = True
if serverHello.getExtension(ExtensionType.extended_master_secret):
self.extendedMasterSecret = True
#If the server elected to resume the session, it is handled here.
for result in self._clientResume(session, serverHello,
clientHello.random,
settings.cipherImplementations,
nextProto, settings):
if result in (0,1): yield result
else: break
if result == "resumed_and_finished":
self._handshakeDone(resumed=True)
self._serverRandom = serverHello.random
self._clientRandom = clientHello.random
# alpn protocol is independent of resumption and renegotiation
# and needs to be negotiated every time
alpnExt = serverHello.getExtension(ExtensionType.alpn)
if alpnExt:
session.appProto = alpnExt.protocol_names[0]
return
#If the server selected an SRP ciphersuite, the client finishes
#reading the post-ServerHello messages, then derives a
#premasterSecret and sends a corresponding ClientKeyExchange.
if cipherSuite in CipherSuite.srpAllSuites:
keyExchange = SRPKeyExchange(cipherSuite, clientHello,
serverHello, None, None,
srpUsername=srpUsername,
password=password,
settings=settings)
#If the server selected an anonymous ciphersuite, the client
#finishes reading the post-ServerHello messages.
elif cipherSuite in CipherSuite.dhAllSuites:
keyExchange = DHE_RSAKeyExchange(cipherSuite, clientHello,
serverHello, None)
elif cipherSuite in CipherSuite.ecdhAllSuites:
acceptedCurves = self._curveNamesToList(settings)
keyExchange = ECDHE_RSAKeyExchange(cipherSuite, clientHello,
serverHello, None,
acceptedCurves)
#If the server selected a certificate-based RSA ciphersuite,
#the client finishes reading the post-ServerHello messages. If
#a CertificateRequest message was sent, the client responds with
#a Certificate message containing its certificate chain (if any),
#and also produces a CertificateVerify message that signs the
#ClientKeyExchange.
else:
keyExchange = RSAKeyExchange(cipherSuite, clientHello,
serverHello, None)
# we'll send few messages here, send them in single TCP packet
self.sock.buffer_writes = True
for result in self._clientKeyExchange(settings, cipherSuite,
clientCertChain,
privateKey,
serverHello.certificate_type,
serverHello.tackExt,
clientHello.random,
serverHello.random,
keyExchange):
if result in (0, 1):
yield result
else: break
(premasterSecret, serverCertChain, clientCertChain,
tackExt) = result
#After having previously sent a ClientKeyExchange, the client now
#initiates an exchange of Finished messages.
# socket buffering is turned off in _clientFinished
for result in self._clientFinished(premasterSecret,
clientHello.random,
serverHello.random,
cipherSuite, settings.cipherImplementations,
nextProto, settings):
if result in (0,1): yield result
else: break
masterSecret = result
# check if an application layer protocol was negotiated
alpnProto = None
alpnExt = serverHello.getExtension(ExtensionType.alpn)
if alpnExt:
alpnProto = alpnExt.protocol_names[0]
# Create the session object which is used for resumptions
self.session = Session()
self.session.create(masterSecret, serverHello.session_id, cipherSuite,
srpUsername, clientCertChain, serverCertChain,
tackExt, (serverHello.tackExt is not None),
serverName,
encryptThenMAC=self._recordLayer.encryptThenMAC,
extendedMasterSecret=self.extendedMasterSecret,
appProto=alpnProto,
# NOTE it must be a reference not a copy
tickets=self.tickets)
self._handshakeDone(resumed=False)
self._serverRandom = serverHello.random
self._clientRandom = clientHello.random
@staticmethod
def _get_GREASE_version():
n = random.randint(1, 10)
ns = n * 16 + 10
value = (ns, ns)
return value
@staticmethod
def _get_GREASE():
values = [0x1a1a, 0x2a2a, 0x3a3a, 0x4a4a, 0x5a5a, 0x6a6a, 0x7a7a, 0x8a8a, 0x9a9a, 0xaaaa, 0xbaba]
return random.choice(values)
def _clientSendClientHello(self, settings, session, srpUsername,
srpParams, certParams, anonParams,
serverName, nextProtos, reqTack, alpn):
#Initialize acceptable ciphersuites
# cipherSuites = [CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
# if srpParams:
# cipherSuites += CipherSuite.getSrpAllSuites(settings)
# elif certParams:
# cipherSuites += CipherSuite.getTLS13Suites(settings)
# cipherSuites += CipherSuite.getEcdsaSuites(settings)
# cipherSuites += CipherSuite.getEcdheCertSuites(settings)
# cipherSuites += CipherSuite.getDheCertSuites(settings)
# cipherSuites += CipherSuite.getCertSuites(settings)
# cipherSuites += CipherSuite.getDheDsaSuites(settings)
# elif anonParams:
# cipherSuites += CipherSuite.getEcdhAnonSuites(settings)
# cipherSuites += CipherSuite.getAnonSuites(settings)
# else:
# assert False
cipherSuites = [
self._get_GREASE(),
CipherSuite.TLS_AES_128_GCM_SHA256, # 1301
CipherSuite.TLS_AES_256_GCM_SHA384, # 1302
CipherSuite.TLS_CHACHA20_POLY1305_SHA256, # 1303
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, # C02B
CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, # C02F
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, # C02C
CipherSuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, # C030
CipherSuite.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, # cca9
CipherSuite.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, # cca8
CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, # c013
CipherSuite.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, # c014
CipherSuite.TLS_RSA_WITH_AES_128_GCM_SHA256, # 009c
CipherSuite.TLS_RSA_WITH_AES_256_GCM_SHA384, # 009d
CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA, # 002f
CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA, # 0035
]
#Add any SCSVs. These are not real cipher suites, but signaling
#values which reuse the cipher suite field in the ClientHello.
wireCipherSuites = list(cipherSuites)
if settings.sendFallbackSCSV:
wireCipherSuites.append(CipherSuite.TLS_FALLBACK_SCSV)
#Initialize acceptable certificate types
certificateTypes = None # settings.getCertificateTypes()
extensions = []
extensions.append(TLSExtension().\
create(self._get_GREASE(),
bytearray(0)))
if serverName:
serverName = bytearray(serverName, "utf-8")
sni_ext = SNIExtension().create(serverName)
extensions.append(sni_ext)
extensions.append(TLSExtension().create(ExtensionType.extended_master_secret, bytearray(0)))
extensions.append(TLSExtension().create(ExtensionType.renegotiation_info, bytearray(1)))
groups = [self._get_GREASE(), 0x001d, 0x0017, 0x0018]
extensions.append(SupportedGroupsExtension().create(groups))
extensions.append(ECPointFormatsExtension().create([ECPointFormat.uncompressed]))
extensions.append(TLSExtension().create(ExtensionType.session_ticket, bytearray(0)))
extensions.append(ALPNExtension().create(alpn))
extensions.append(StatusRequestExtension().create())
# In TLS1.2 advertise support for additional signature types
# sigList = self._sigHashesToList(settings)
# assert len(sigList) > 0
sigList = [
(4, 3),
(8, 4),
(4, 1),
(5, 3),
(8, 5),
(5, 1),
(8, 6),
(6, 1),
]
extensions.append(SignatureAlgorithmsExtension().create(sigList))
extensions.append(TLSExtension().create(ExtensionType.signed_certificate_timestamp, bytearray(0)))
shares = []
grease_key_share = KeyShareEntry().create(self._get_GREASE(), bytearray(1))
shares.append(grease_key_share)
for group_name in ["x25519"]:
group_id = getattr(GroupName, group_name)
key_share = self._genKeyShareEntry(group_id, (3, 4))
shares.append(key_share)
# if TLS 1.3 is enabled, key_share must always be sent
# (unless only static PSK is used)
extensions.append(ClientKeyShareExtension().create(shares))
# add info on types of PSKs supported (also used for
# NewSessionTicket so send basically always)
psk_modes = ["psk_dhe_ke",]
ext = PskKeyExchangeModesExtension().create([getattr(PskKeyExchangeMode, i) for i in psk_modes])
extensions.append(ext)
versions = [self._get_GREASE_version(), (3, 4), (3, 3)]
extensions.append(SupportedVersionsExtension().create(versions))
algorithms = [
(0, 2) # brotli
]
extensions.append(CompressCertificateExtension().create(algorithms))
alpn = [bytearray(b"h2")]
extensions.append(ApplicationSettingsExtension().create(alpn))
GREASE_ID = self._get_GREASE()
extensions.append(TLSExtension().create(GREASE_ID, bytearray(1)))
# when TLS 1.3 advertised, add key shares, set fake session_id
# shares = None
session_id = getRandomBytes(32)
# don't send empty list of extensions or extensions in SSLv3
if not extensions or settings.maxVersion == (3, 0):
extensions = None
sent_version = min(settings.maxVersion, (3, 3))
#Either send ClientHello (with a resumable session)...
# if session and session.sessionID:
# #If it's resumable, then its
# #ciphersuite must be one of the acceptable ciphersuites
# if session.cipherSuite not in cipherSuites:
# raise ValueError("Session's cipher suite not consistent "\
# "with parameters")
# else:
# clientHello = ClientHello()
# clientHello.create(sent_version, getRandomBytes(32),
# session.sessionID, wireCipherSuites,
# certificateTypes,
# session.srpUsername,
# reqTack, nextProtos is not None,
# session.serverName,
# extensions=extensions)
#
# #Or send ClientHello (without)
# else:
clientHello = ClientHello()
clientHello.create(sent_version, getRandomBytes(32),
session_id, wireCipherSuites,
certificateTypes,
srpUsername,
reqTack, nextProtos is not None,
serverName,
extensions=extensions)
# Check if padding extension should be added
# we want to add extensions even when using just SSLv3
if settings.usePaddingExtension:
HandshakeHelpers.alignClientHelloPadding(clientHello)
# because TLS 1.3 PSK is sent in ClientHello and signs the ClientHello
# we need to send it as the last extension
if (settings.pskConfigs or (session and session.tickets)) \
and settings.maxVersion >= (3, 4):
ext = PreSharedKeyExtension()
idens = []
binders = []
# if we have a previous session, include it in PSKs too
if session and session.tickets:
now = time.time()
# clean the list from obsolete ones
# RFC says that the tickets MUST NOT be cached longer than
# 7 days
session.tickets[:] = (i for i in session.tickets if
i.time + i.ticket_lifetime > now and
i.time + 7 * 24 * 60 * 60 > now)
if session.tickets:
ticket = session.tickets[0]
# ticket.time is in seconds while the obfuscated time
# is in ms
ticket_time = int(
time.time() * 1000 -
ticket.time * 1000 +
ticket.ticket_age_add) % 2**32
idens.append(PskIdentity().create(ticket.ticket,
ticket_time))
binder_len = 48 if session.cipherSuite in \
CipherSuite.sha384PrfSuites else 32
binders.append(bytearray(binder_len))
for psk in settings.pskConfigs:
# skip PSKs with no identities as they're TLS1.3 incompatible
if not psk[0]:
continue
idens.append(PskIdentity().create(psk[0], 0))
psk_hash = psk[2] if len(psk) > 2 else 'sha256'
assert psk_hash in set(['sha256', 'sha384'])
# create fake binder values to create correct length fields
binders.append(bytearray(32 if psk_hash == 'sha256' else 48))
if idens:
ext.create(idens, binders)
clientHello.extensions.append(ext)
# for HRR case we'll need 1st CH and HRR in handshake hashes,
# so pass them in, truncated CH will be added by the helpers to
# the copy of the hashes
HandshakeHelpers.update_binders(clientHello,
self._handshake_hash,
settings.pskConfigs,
session.tickets if session
else None,
session.resumptionMasterSecret
if session else None)
for result in self._sendMsg(clientHello):
yield result
yield clientHello
def _clientGetServerHello(self, settings, session, clientHello):
client_hello_hash = self._handshake_hash.copy()
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello):
if result in (0,1): yield result
else: break
hello_retry = None
ext = result.getExtension(ExtensionType.supported_versions)
if result.random == TLS_1_3_HRR and ext and ext.version > (3, 3):
self.version = ext.version
hello_retry = result
# create synthetic handshake hash
prf_name, prf_size = self._getPRFParams(hello_retry.cipher_suite)
self._handshake_hash = HandshakeHashes()
writer = Writer()
writer.add(HandshakeType.message_hash, 1)
writer.addVarSeq(client_hello_hash.digest(prf_name), 1, 3)
self._handshake_hash.update(writer.bytes)
self._handshake_hash.update(hello_retry.write())
# check if all extensions in the HRR were present in client hello
ch_ext_types = set(i.extType for i in clientHello.extensions)
ch_ext_types.add(ExtensionType.cookie)
bad_ext = next((i for i in hello_retry.extensions
if i.extType not in ch_ext_types), None)
if bad_ext:
bad_ext = ExtensionType.toStr(bad_ext)
for result in self._sendError(AlertDescription
.unsupported_extension,
("Unexpected extension in HRR: "
"{0}").format(bad_ext)):
yield result
# handle cookie extension
cookie = hello_retry.getExtension(ExtensionType.cookie)
if cookie:
clientHello.addExtension(cookie)
# handle key share extension
sr_key_share_ext = hello_retry.getExtension(ExtensionType
.key_share)
if sr_key_share_ext:
group_id = sr_key_share_ext.selected_group
# check if group selected by server is valid
groups_ext = clientHello.getExtension(ExtensionType
.supported_groups)
if group_id not in groups_ext.groups:
for result in self._sendError(AlertDescription
.illegal_parameter,
"Server selected group we "
"did not advertise"):
yield result
cl_key_share_ext = clientHello.getExtension(ExtensionType
.key_share)
# check if the server didn't ask for a group we already sent
if next((entry for entry in cl_key_share_ext.client_shares
if entry.group == group_id), None):
for result in self._sendError(AlertDescription
.illegal_parameter,
"Server selected group we "
"did sent the key share "
"for"):
yield result
key_share = self._genKeyShareEntry(group_id, (3, 4))
# old key shares need to be removed
cl_key_share_ext.client_shares = [key_share]
if not cookie and not sr_key_share_ext:
# HRR did not result in change to Client Hello
for result in self._sendError(AlertDescription.
illegal_parameter,
"Received HRR did not cause "
"update to Client Hello"):
yield result
if clientHello.session_id != hello_retry.session_id:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Received HRR session_id does not match the one in "
"ClientHello"):
yield result
ext = clientHello.getExtension(ExtensionType.pre_shared_key)
if ext:
# move the extension to end (in case extension like cookie was
# added
clientHello.extensions.remove(ext)
clientHello.extensions.append(ext)
HandshakeHelpers.update_binders(clientHello,
self._handshake_hash,
settings.pskConfigs,
session.tickets if session
else None,
session.resumptionMasterSecret
if session else None)
# resend the client hello with performed changes
msgs = []
if clientHello.session_id:
ccs = ChangeCipherSpec().create()
msgs.append(ccs)
msgs.append(clientHello)
for result in self._sendMsgs(msgs):
yield result
self._ccs_sent = True
# retry getting server hello
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello):
if result in (0, 1):
yield result
else:
break
serverHello = result
#Get the server version. Do this before anything else, so any
#error alerts will use the server's version
real_version = serverHello.server_version
if serverHello.server_version >= (3, 3):
ext = serverHello.getExtension(ExtensionType.supported_versions)
if ext:
real_version = ext.version
self.version = real_version
#Check ServerHello
if hello_retry and \
hello_retry.cipher_suite != serverHello.cipher_suite:
for result in self._sendError(AlertDescription.illegal_parameter,
"server selected different cipher "
"in HRR and Server Hello"):
yield result
if real_version < settings.minVersion:
for result in self._sendError(
AlertDescription.protocol_version,
"Too old version: {0} (min: {1})"
.format(real_version, settings.minVersion)):
yield result
if real_version > settings.maxVersion and \
real_version not in settings.versions:
for result in self._sendError(
AlertDescription.protocol_version,
"Too new version: {0} (max: {1})"
.format(real_version, settings.maxVersion)):
yield result
if real_version > (3, 3) and \
serverHello.session_id != clientHello.session_id:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Received ServerHello session_id does not match the one "
"in ClientHello"):
yield result
cipherSuites = CipherSuite.filterForVersion(clientHello.cipher_suites,
minVersion=real_version,
maxVersion=real_version)
if serverHello.cipher_suite not in cipherSuites:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect ciphersuite"):
yield result
if serverHello.certificate_type not in clientHello.certificate_types:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect certificate type"):
yield result
if serverHello.compression_method != 0:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with incorrect compression method"):
yield result
if serverHello.tackExt:
if not clientHello.tack:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with unrequested Tack Extension"):
yield result
if not serverHello.tackExt.verifySignatures():
for result in self._sendError(\
AlertDescription.decrypt_error,
"TackExtension contains an invalid signature"):
yield result
if serverHello.next_protos and not clientHello.supports_npn:
for result in self._sendError(\
AlertDescription.illegal_parameter,
"Server responded with unrequested NPN Extension"):
yield result
if not serverHello.getExtension(ExtensionType.extended_master_secret)\
and settings.requireExtendedMasterSecret:
for result in self._sendError(
AlertDescription.insufficient_security,
"Negotiation of Extended master Secret failed"):
yield result
alpnExt = serverHello.getExtension(ExtensionType.alpn)
if alpnExt:
if not alpnExt.protocol_names or \
len(alpnExt.protocol_names) != 1:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Server responded with invalid ALPN extension"):
yield result
clntAlpnExt = clientHello.getExtension(ExtensionType.alpn)
if not clntAlpnExt:
for result in self._sendError(
AlertDescription.unsupported_extension,
"Server sent ALPN extension without one in "
"client hello"):
yield result
if alpnExt.protocol_names[0] not in clntAlpnExt.protocol_names:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Server selected ALPN protocol we did not advertise"):
yield result
heartbeat_ext = serverHello.getExtension(ExtensionType.heartbeat)
if heartbeat_ext:
if not settings.use_heartbeat_extension:
for result in self._sendError(
AlertDescription.unsupported_extension,
"Server sent Heartbeat extension without one in "
"client hello"):
yield result
if heartbeat_ext.mode == HeartbeatMode.PEER_ALLOWED_TO_SEND and \
settings.heartbeat_response_callback:
self.heartbeat_can_send = True
self.heartbeat_response_callback = settings.\
heartbeat_response_callback
elif heartbeat_ext.mode == HeartbeatMode.\
PEER_NOT_ALLOWED_TO_SEND or not settings.\
heartbeat_response_callback:
self.heartbeat_can_send = False
else:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Server responded with invalid Heartbeat extension"):
yield result
self.heartbeat_supported = True
size_limit_ext = serverHello.getExtension(
ExtensionType.record_size_limit)
if size_limit_ext:
if size_limit_ext.record_size_limit is None:
for result in self._sendError(
AlertDescription.decode_error,
"Malformed record_size_limit extension"):
yield result
# if we got the extension in ServerHello it means we're doing
# TLS 1.2 so the max value for extension is 2^14
if not 64 <= size_limit_ext.record_size_limit <= 2**14:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Server responed with invalid value in "
"record_size_limit extension"):
yield result
self._peer_record_size_limit = size_limit_ext.record_size_limit
yield serverHello
@staticmethod
def _getKEX(group, version):
"""Get object for performing key exchange."""
if group in GroupName.allFF:
return FFDHKeyExchange(group, version)
return ECDHKeyExchange(group, version)
@classmethod
def _genKeyShareEntry(cls, group, version):
"""Generate KeyShareEntry object from randomly selected private value.
"""
kex = cls._getKEX(group, version)
private = kex.get_random_private_key()
share = kex.calc_public_value(private)
return KeyShareEntry().create(group, share, private)
@staticmethod
def _getPRFParams(cipher_suite):
"""Return name of hash used for PRF and the hash output size."""
if cipher_suite in CipherSuite.sha384PrfSuites:
return 'sha384', 48
return 'sha256', 32
def _clientTLS13Handshake(self, settings, session, clientHello,
clientCertChain, privateKey, serverHello):
"""Perform TLS 1.3 handshake as a client."""
prfName, prf_size = self._getPRFParams(serverHello.cipher_suite)
# we have client and server hello in TLS 1.3 so we have the necessary
# key shares to derive the handshake receive key
sr_kex = serverHello.getExtension(ExtensionType.key_share)
sr_psk = serverHello.getExtension(ExtensionType.pre_shared_key)
if not sr_kex and not sr_psk:
raise TLSIllegalParameterException("Server did not select PSK nor "
"an (EC)DH group")
if sr_kex:
sr_kex = sr_kex.server_share
self.ecdhCurve = sr_kex.group
cl_key_share_ex = clientHello.getExtension(ExtensionType.key_share)
cl_kex = next((i for i in cl_key_share_ex.client_shares
if i.group == sr_kex.group), None)
if cl_kex is None:
raise TLSIllegalParameterException("Server selected not "
"advertised group.")
kex = self._getKEX(sr_kex.group, self.version)
shared_sec = kex.calc_shared_key(cl_kex.private,
sr_kex.key_exchange)
else:
shared_sec = bytearray(prf_size)
# if server agreed to perform resumption, find the matching secret key
resuming = False
if sr_psk:
clPSK = clientHello.getExtension(ExtensionType.pre_shared_key)
ident = clPSK.identities[sr_psk.selected]
psk = [i[1] for i in settings.pskConfigs if i[0] == ident.identity]
if psk:
psk = psk[0]
else:
resuming = True
psk = HandshakeHelpers.calc_res_binder_psk(
ident, session.resumptionMasterSecret,
session.tickets)
else:
psk = bytearray(prf_size)
secret = bytearray(prf_size)
# Early Secret
secret = secureHMAC(secret, psk, prfName)
# Handshake Secret
secret = derive_secret(secret, bytearray(b'derived'),
None, prfName)
secret = secureHMAC(secret, shared_sec, prfName)
sr_handshake_traffic_secret = derive_secret(secret,
bytearray(b's hs traffic'),
self._handshake_hash,
prfName)
cl_handshake_traffic_secret = derive_secret(secret,
bytearray(b'c hs traffic'),
self._handshake_hash,
prfName)
# prepare for reading encrypted messages
self._recordLayer.calcTLS1_3PendingState(
serverHello.cipher_suite,
cl_handshake_traffic_secret,
sr_handshake_traffic_secret,
settings.cipherImplementations)
self._changeReadState()
for result in self._getMsg(ContentType.handshake,
HandshakeType.encrypted_extensions):
if result in (0, 1):
yield result
else:
break
encrypted_extensions = result
assert isinstance(encrypted_extensions, EncryptedExtensions)
size_limit_ext = encrypted_extensions.getExtension(
ExtensionType.record_size_limit)
if size_limit_ext and not settings.record_size_limit:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Server sent record_size_limit extension despite us not "
"advertising it"):
yield result
if size_limit_ext:
if size_limit_ext.record_size_limit is None:
for result in self._sendError(
AlertDescription.decode_error,
"Malformed record_size_limit extension"):
yield result
if not 64 <= size_limit_ext.record_size_limit <= 2**14+1:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Invalid valid in record_size_limit extension"):
yield result
# the record layer code expects a limit that excludes content type
# from the value while extension is defined including it
self._send_record_limit = size_limit_ext.record_size_limit - 1
self._recv_record_limit = min(2**14, settings.record_size_limit - 1)
# if we negotiated PSK then Certificate is not sent
certificate_request = None
certificate = None
if not sr_psk:
for result in self._getMsg(ContentType.handshake,
(HandshakeType.certificate_request,
HandshakeType.certificate,
HandshakeType.compressed_certificate),
CertificateType.x509):
if result in (0, 1):
yield result
else:
break
if isinstance(result, CertificateRequest):
certificate_request = result
# we got CertificateRequest so now we'll get Certificate
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate,
CertificateType.x509):
if result in (0, 1):
yield result
else:
break
certificate = result
assert isinstance(certificate, Certificate)
srv_cert_verify_hh = self._handshake_hash.copy()
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate_verify):
if result in (0, 1):
yield result
else:
break
certificate_verify = result
assert isinstance(certificate_verify, CertificateVerify)
signature_scheme = certificate_verify.signatureAlgorithm
self.serverSigAlg = signature_scheme
signature_context = KeyExchange.calcVerifyBytes((3, 4),
srv_cert_verify_hh,
signature_scheme,
None, None, None,
prfName, b'server')
for result in self._clientGetKeyFromChain(certificate, settings):
if result in (0, 1):
yield result
else:
break
publicKey, serverCertChain, tackExt = result
if signature_scheme in (SignatureScheme.ed25519,
SignatureScheme.ed448):
pad_type = None
hash_name = "intrinsic"
salt_len = None
method = publicKey.hashAndVerify
elif signature_scheme[1] == SignatureAlgorithm.ecdsa:
pad_type = None
hash_name = HashAlgorithm.toRepr(signature_scheme[0])
matching_hash = self._curve_name_to_hash_name(
publicKey.curve_name)
if hash_name != matching_hash:
raise TLSIllegalParameterException(
"server selected signature method invalid for the "
"certificate it presented (curve mismatch)")
salt_len = None
method = publicKey.verify
else:
scheme = SignatureScheme.toRepr(signature_scheme)
pad_type = SignatureScheme.getPadding(scheme)
hash_name = SignatureScheme.getHash(scheme)
salt_len = getattr(hashlib, hash_name)().digest_size
method = publicKey.verify
if not method(certificate_verify.signature,
signature_context,
pad_type,
hash_name,
salt_len):
raise TLSDecryptionFailed("server Certificate Verify "
"signature "
"verification failed")
transcript_hash = self._handshake_hash.digest(prfName)
for result in self._getMsg(ContentType.handshake,
HandshakeType.finished,
prf_size):
if result in (0, 1):
yield result
else:
break
finished = result
server_finish_hs = self._handshake_hash.copy()
assert isinstance(finished, Finished)
finished_key = HKDF_expand_label(sr_handshake_traffic_secret,
b"finished", b'', prf_size, prfName)
verify_data = secureHMAC(finished_key, transcript_hash, prfName)
if finished.verify_data != verify_data:
raise TLSDecryptionFailed("Finished value is not valid")
# now send client set of messages
self._changeWriteState()
# Master secret
secret = derive_secret(secret, bytearray(b'derived'), None, prfName)
secret = secureHMAC(secret, bytearray(prf_size), prfName)
cl_app_traffic = derive_secret(secret, bytearray(b'c ap traffic'),
server_finish_hs, prfName)
sr_app_traffic = derive_secret(secret, bytearray(b's ap traffic'),
server_finish_hs, prfName)
if certificate_request:
client_certificate = Certificate(serverHello.certificate_type,
self.version)
if clientCertChain:
# Check to make sure we have the same type of certificates the
# server requested
if serverHello.certificate_type == CertificateType.x509 \
and not isinstance(clientCertChain, X509CertChain):
for result in self._sendError(
AlertDescription.handshake_failure,
"Client certificate is of wrong type"):
yield result
client_certificate.create(clientCertChain)
# we need to send the message even if we don't have a certificate
for result in self._sendMsg(client_certificate):
yield result
if clientCertChain and privateKey:
valid_sig_algs = certificate_request.supported_signature_algs
if not valid_sig_algs:
for result in self._sendError(
AlertDescription.missing_extension,
"No Signature Algorithms found"):
yield result
availSigAlgs = self._sigHashesToList(settings, privateKey,
clientCertChain,
version=(3, 4))
signature_scheme = getFirstMatching(availSigAlgs,
valid_sig_algs)
scheme = SignatureScheme.toRepr(signature_scheme)
signature_scheme = getattr(SignatureScheme, scheme)
signature_context = \
KeyExchange.calcVerifyBytes((3, 4), self._handshake_hash,
signature_scheme, None, None,
None, prfName, b'client')
if signature_scheme in (SignatureScheme.ed25519,
SignatureScheme.ed448):
pad_type = None
hash_name = "intrinsic"
salt_len = None
sig_func = privateKey.hashAndSign
ver_func = privateKey.hashAndVerify
elif signature_scheme[1] == SignatureAlgorithm.ecdsa:
pad_type = None
hash_name = HashAlgorithm.toRepr(signature_scheme[0])
salt_len = None
sig_func = privateKey.sign
ver_func = privateKey.verify
else:
pad_type = SignatureScheme.getPadding(scheme)
hash_name = SignatureScheme.getHash(scheme)
salt_len = getattr(hashlib, hash_name)().digest_size
sig_func = privateKey.sign
ver_func = privateKey.verify
signature = sig_func(signature_context,
pad_type,
hash_name,
salt_len)
if not ver_func(signature, signature_context,
pad_type,
hash_name,
salt_len):
for result in self._sendError(
AlertDescription.internal_error,
"Certificate Verify signature failed"):
yield result
certificate_verify = CertificateVerify(self.version)
certificate_verify.create(signature, signature_scheme)
for result in self._sendMsg(certificate_verify):
yield result
# Do after client cert and verify messages has been sent.
exporter_master_secret = derive_secret(secret,
bytearray(b'exp master'),
self._handshake_hash, prfName)
self._recordLayer.calcTLS1_3PendingState(
serverHello.cipher_suite,
cl_app_traffic,
sr_app_traffic,
settings.cipherImplementations)
# be ready to process alert messages from the server, which
# MUST be encrypted with ap traffic secret when they are sent after
# Finished
self._changeReadState()
cl_finished_key = HKDF_expand_label(cl_handshake_traffic_secret,
b"finished", b'',
prf_size, prfName)
cl_verify_data = secureHMAC(
cl_finished_key,
self._handshake_hash.digest(prfName),
prfName)
cl_finished = Finished(self.version, prf_size)
cl_finished.create(cl_verify_data)
if not self._ccs_sent and clientHello.session_id:
ccs = ChangeCipherSpec().create()
msgs = [ccs, cl_finished]
else:
msgs = [cl_finished]
for result in self._sendMsgs(msgs):
yield result
# CCS messages are not allowed in post handshake authentication
self._middlebox_compat_mode = False
# fully switch to application data
self._changeWriteState()
self._first_handshake_hashes = self._handshake_hash.copy()
resumption_master_secret = derive_secret(secret,
bytearray(b'res master'),
self._handshake_hash, prfName)
self.session = Session()
self.extendedMasterSecret = True
serverName = None
if clientHello.server_name:
serverName = clientHello.server_name.decode("utf-8")
appProto = None
alpnExt = encrypted_extensions.getExtension(ExtensionType.alpn)
if alpnExt:
appProto = alpnExt.protocol_names[0]
heartbeat_ext = encrypted_extensions.getExtension(ExtensionType.heartbeat)
if heartbeat_ext:
if not settings.use_heartbeat_extension:
for result in self._sendError(
AlertDescription.unsupported_extension,
"Server sent Heartbeat extension without one in "
"client hello"):
yield result
if heartbeat_ext.mode == HeartbeatMode.PEER_ALLOWED_TO_SEND and \
settings.heartbeat_response_callback:
self.heartbeat_can_send = True
self.heartbeat_response_callback = settings.\
heartbeat_response_callback
elif heartbeat_ext.mode == HeartbeatMode.\
PEER_NOT_ALLOWED_TO_SEND or not settings.\
heartbeat_response_callback:
self.heartbeat_can_send = False
else:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Server responded with invalid Heartbeat extension"):
yield result
self.heartbeat_supported = True
self.session.create(secret,
bytearray(b''), # no session_id in TLS 1.3
serverHello.cipher_suite,
None, # no SRP
clientCertChain,
certificate.cert_chain if certificate else None,
None, # no TACK
False, # no TACK in hello
serverName,
encryptThenMAC=False, # all ciphers are AEAD
extendedMasterSecret=True, # all TLS1.3 are EMS
appProto=appProto,
cl_app_secret=cl_app_traffic,
sr_app_secret=sr_app_traffic,
exporterMasterSecret=exporter_master_secret,
resumptionMasterSecret=resumption_master_secret,
# NOTE it must be a reference, not a copy!
tickets=self.tickets)
yield "finished" if not resuming else "resumed_and_finished"
def _clientSelectNextProto(self, nextProtos, serverHello):
# nextProtos is None or non-empty list of strings
# serverHello.next_protos is None or possibly-empty list of strings
#
# !!! We assume the client may have specified nextProtos as a list of
# strings so we convert them to bytearrays (it's awkward to require
# the user to specify a list of bytearrays or "bytes", and in
# Python 2.6 bytes() is just an alias for str() anyways...
if nextProtos is not None and serverHello.next_protos is not None:
for p in nextProtos:
if bytearray(p) in serverHello.next_protos:
return bytearray(p)
else:
# If the client doesn't support any of server's protocols,
# or the server doesn't advertise any (next_protos == [])
# the client SHOULD select the first protocol it supports.
return bytearray(nextProtos[0])
return None
def _clientResume(self, session, serverHello, clientRandom,
cipherImplementations, nextProto, settings):
#If the server agrees to resume
if session and session.sessionID and \
serverHello.session_id == session.sessionID:
if serverHello.cipher_suite != session.cipherSuite:
for result in self._sendError(\
AlertDescription.illegal_parameter,\
"Server's ciphersuite doesn't match session"):
yield result
#Calculate pending connection states
self._calcPendingStates(session.cipherSuite,
session.masterSecret,
clientRandom, serverHello.random,
cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._getFinished(session.masterSecret,
session.cipherSuite):
yield result
# buffer writes so that CCS and Finished go out in one TCP packet
self.sock.buffer_writes = True
for result in self._sendFinished(session.masterSecret,
session.cipherSuite,
nextProto,
settings=settings):
yield result
self.sock.flush()
self.sock.buffer_writes = False
#Set the session for this connection
self.session = session
yield "resumed_and_finished"
def _clientKeyExchange(self, settings, cipherSuite,
clientCertChain, privateKey,
certificateType,
tackExt, clientRandom, serverRandom,
keyExchange):
"""Perform the client side of key exchange"""
# if server chose cipher suite with authentication, get the certificate
if cipherSuite in CipherSuite.certAllSuites or \
cipherSuite in CipherSuite.ecdheEcdsaSuites or \
cipherSuite in CipherSuite.dheDsaSuites:
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate,
certificateType):
if result in (0, 1):
yield result
else: break
serverCertificate = result
else:
serverCertificate = None
# if server chose RSA key exchange, we need to skip SKE message
if cipherSuite not in CipherSuite.certSuites:
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_key_exchange,
cipherSuite):
if result in (0, 1):
yield result
else: break
serverKeyExchange = result
else:
serverKeyExchange = None
for result in self._getMsg(ContentType.handshake,
(HandshakeType.certificate_request,
HandshakeType.server_hello_done)):
if result in (0, 1):
yield result
else: break
certificateRequest = None
if isinstance(result, CertificateRequest):
certificateRequest = result
#abort if Certificate Request with inappropriate ciphersuite
if cipherSuite not in CipherSuite.certAllSuites \
and cipherSuite not in CipherSuite.ecdheEcdsaSuites \
and CipherSuite not in CipherSuite.dheDsaSuites\
or cipherSuite in CipherSuite.srpAllSuites:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Certificate Request with incompatible cipher suite"):
yield result
# we got CertificateRequest so now we'll get ServerHelloDone
for result in self._getMsg(ContentType.handshake,
HandshakeType.server_hello_done):
if result in (0, 1):
yield result
else: break
serverHelloDone = result
serverCertChain = None
publicKey = None
if cipherSuite in CipherSuite.certAllSuites or \
cipherSuite in CipherSuite.ecdheEcdsaSuites or \
cipherSuite in CipherSuite.dheDsaSuites:
# get the certificate
for result in self._clientGetKeyFromChain(serverCertificate,
settings,
tackExt):
if result in (0, 1):
yield result
else: break
publicKey, serverCertChain, tackExt = result
#Check the server's signature, if the server chose an authenticated
# PFS-enabled ciphersuite
if serverKeyExchange:
valid_sig_algs = \
self._sigHashesToList(settings,
certList=serverCertChain)
try:
KeyExchange.verifyServerKeyExchange(serverKeyExchange,
publicKey,
clientRandom,
serverRandom,
valid_sig_algs)
except TLSIllegalParameterException:
for result in self._sendError(AlertDescription.\
illegal_parameter):
yield result
except TLSDecryptionFailed:
for result in self._sendError(\
AlertDescription.decrypt_error):
yield result
if serverKeyExchange:
# store key exchange metadata for user applications
if self.version >= (3, 3) \
and (cipherSuite in CipherSuite.certAllSuites or
cipherSuite in CipherSuite.ecdheEcdsaSuites) \
and cipherSuite not in CipherSuite.certSuites:
self.serverSigAlg = (serverKeyExchange.hashAlg,
serverKeyExchange.signAlg)
if cipherSuite in CipherSuite.dhAllSuites:
self.dhGroupSize = numBits(serverKeyExchange.dh_p)
if cipherSuite in CipherSuite.ecdhAllSuites:
self.ecdhCurve = serverKeyExchange.named_curve
#Send Certificate if we were asked for it
if certificateRequest:
# if a peer doesn't advertise support for any algorithm in TLSv1.2,
# support for SHA1+RSA can be assumed
if self.version == (3, 3)\
and not [sig for sig in \
certificateRequest.supported_signature_algs\
if sig[1] == SignatureAlgorithm.rsa]:
for result in self._sendError(\
AlertDescription.handshake_failure,
"Server doesn't accept any sigalgs we support: " +
str(certificateRequest.supported_signature_algs)):
yield result
clientCertificate = Certificate(certificateType)
if clientCertChain:
#Check to make sure we have the same type of
#certificates the server requested
if certificateType == CertificateType.x509 \
and not isinstance(clientCertChain, X509CertChain):
for result in self._sendError(\
AlertDescription.handshake_failure,
"Client certificate is of wrong type"):
yield result
clientCertificate.create(clientCertChain)
# we need to send the message even if we don't have a certificate
for result in self._sendMsg(clientCertificate):
yield result
else:
#Server didn't ask for cer, zeroise so session doesn't store them
privateKey = None
clientCertChain = None
try:
ske = serverKeyExchange
premasterSecret = keyExchange.processServerKeyExchange(publicKey,
ske)
except TLSInsufficientSecurity as e:
for result in self._sendError(\
AlertDescription.insufficient_security, e):
yield result
except TLSIllegalParameterException as e:
for result in self._sendError(\
AlertDescription.illegal_parameter, e):
yield result
clientKeyExchange = keyExchange.makeClientKeyExchange()
#Send ClientKeyExchange
for result in self._sendMsg(clientKeyExchange):
yield result
# the Extended Master Secret calculation uses the same handshake
# hashes as the Certificate Verify calculation so we need to
# make a copy of it
self._certificate_verify_handshake_hash = self._handshake_hash.copy()
#if client auth was requested and we have a private key, send a
#CertificateVerify
if certificateRequest and privateKey:
valid_sig_algs = self._sigHashesToList(settings, privateKey,
clientCertChain)
try:
certificateVerify = KeyExchange.makeCertificateVerify(
self.version,
self._certificate_verify_handshake_hash,
valid_sig_algs,
privateKey,
certificateRequest,
premasterSecret,
clientRandom,
serverRandom)
except TLSInternalError as exception:
for result in self._sendError(
AlertDescription.internal_error, exception):
yield result
for result in self._sendMsg(certificateVerify):
yield result
yield (premasterSecret, serverCertChain, clientCertChain, tackExt)
def _clientFinished(self, premasterSecret, clientRandom, serverRandom,
cipherSuite, cipherImplementations, nextProto,
settings):
if self.extendedMasterSecret:
cvhh = self._certificate_verify_handshake_hash
# in case of session resumption, or when the handshake doesn't
# use the certificate authentication, the hashes are the same
if not cvhh:
cvhh = self._handshake_hash
masterSecret = calc_key(self.version, premasterSecret,
cipherSuite, b"extended master secret",
handshake_hashes=cvhh,
output_length=48)
else:
masterSecret = calc_key(self.version, premasterSecret,
cipherSuite, b"master secret",
client_random=clientRandom,
server_random=serverRandom,
output_length=48)
self._calcPendingStates(cipherSuite, masterSecret,
clientRandom, serverRandom,
cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._sendFinished(masterSecret, cipherSuite, nextProto,
settings=settings):
yield result
self.sock.flush()
self.sock.buffer_writes = False
for result in self._getFinished(masterSecret,
cipherSuite,
nextProto=nextProto):
yield result
yield masterSecret
def _check_certchain_with_settings(self, cert_chain, settings):
"""
Verify that the key parameters match enabled ones.
Checks if the certificate key size matches the minimum and maximum
sizes set or that it uses curves enabled in settings
"""
#Get and check public key from the cert chain
publicKey = cert_chain.getEndEntityPublicKey()
cert_type = cert_chain.x509List[0].certAlg
if cert_type == "ecdsa":
curve_name = publicKey.curve_name
for name, aliases in CURVE_ALIASES.items():
if curve_name in aliases:
curve_name = name
break
if self.version <= (3, 3) and curve_name not in settings.eccCurves:
for result in self._sendError(
AlertDescription.handshake_failure,
"Peer sent certificate with curve we did not "
"advertise support for: {0}".format(curve_name)):
yield result
if self.version >= (3, 4):
if curve_name not in ('secp256r1', 'secp384r1', 'secp521r1'):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Peer sent certificate with curve not supported "
"in TLS 1.3: {0}".format(curve_name)):
yield result
if curve_name == 'secp256r1':
sig_alg_for_curve = 'sha256'
elif curve_name == 'secp384r1':
sig_alg_for_curve = 'sha384'
else:
assert curve_name == 'secp521r1'
sig_alg_for_curve = 'sha512'
if sig_alg_for_curve not in settings.ecdsaSigHashes:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Peer selected certificate with ECDSA curve we "
"did not advertise support for: {0}"
.format(curve_name)):
yield result
elif cert_type in ("Ed25519", "Ed448"):
if self.version < (3, 3):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Peer sent certificate incompatible with negotiated "
"TLS version"):
yield result
if cert_type not in settings.more_sig_schemes:
for result in self._sendError(
AlertDescription.handshake_failure,
"Peer sent certificate we did not advertise support "
"for: {0}".format(cert_type)):
yield result
else:
# for RSA and DSA keys
if len(publicKey) < settings.minKeySize:
for result in self._sendError(
AlertDescription.handshake_failure,
"Other party's public key too small: %d" %
len(publicKey)):
yield result
if len(publicKey) > settings.maxKeySize:
for result in self._sendError(
AlertDescription.handshake_failure,
"Other party's public key too large: %d" %
len(publicKey)):
yield result
yield publicKey
def _clientGetKeyFromChain(self, certificate, settings, tack_ext=None):
#Get and check cert chain from the Certificate message
cert_chain = certificate.cert_chain
if not cert_chain or cert_chain.getNumCerts() == 0:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Other party sent a Certificate message without "\
"certificates"):
yield result
for result in self._check_certchain_with_settings(
cert_chain,
settings):
if result in (0, 1):
yield result
else: break
public_key = result
# If there's no TLS Extension, look for a TACK cert
if tackpyLoaded:
if not tack_ext:
tack_ext = cert_chain.getTackExt()
# If there's a TACK (whether via TLS or TACK Cert), check that it
# matches the cert chain
if tack_ext and tack_ext.tacks:
for tack in tack_ext.tacks:
if not cert_chain.checkTack(tack):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Other party's TACK doesn't match their public key"):
yield result
yield public_key, cert_chain, tack_ext
#*********************************************************
# Server Handshake Functions
#*********************************************************
def handshakeServer(self, verifierDB=None,
certChain=None, privateKey=None, reqCert=False,
sessionCache=None, settings=None, checker=None,
reqCAs = None,
tacks=None, activationFlags=0,
nextProtos=None, anon=False, alpn=None, sni=None):
"""Perform a handshake in the role of server.
This function performs an SSL or TLS handshake. Depending on
the arguments and the behavior of the client, this function can
perform an SRP, or certificate-based handshake. It
can also perform a combined SRP and server-certificate
handshake.
Like any handshake function, this can be called on a closed
TLS connection, or on a TLS connection that is already open.
If called on an open connection it performs a re-handshake.
This function does not send a Hello Request message before
performing the handshake, so if re-handshaking is required,
the server must signal the client to begin the re-handshake
through some other means.
If the function completes without raising an exception, the
TLS connection will be open and available for data transfer.
If an exception is raised, the connection will have been
automatically closed (if it was ever open).
:type verifierDB: ~tlslite.verifierdb.VerifierDB
:param verifierDB: A database of SRP password verifiers
associated with usernames. If the client performs an SRP
handshake, the session's srpUsername attribute will be set.
:type certChain: ~tlslite.x509certchain.X509CertChain
:param certChain: The certificate chain to be used if the
client requests server certificate authentication and no virtual
host defined in HandshakeSettings matches ClientHello.
:type privateKey: ~tlslite.utils.rsakey.RSAKey
:param privateKey: The private key to be used if the client
requests server certificate authentication and no virtual host
defined in HandshakeSettings matches ClientHello.
:type reqCert: bool
:param reqCert: Whether to request client certificate
authentication. This only applies if the client chooses server
certificate authentication; if the client chooses SRP
authentication, this will be ignored. If the client
performs a client certificate authentication, the sessions's
clientCertChain attribute will be set.
:type sessionCache: ~tlslite.sessioncache.SessionCache
:param sessionCache: An in-memory cache of resumable sessions.
The client can resume sessions from this cache. Alternatively,
if the client performs a full handshake, a new session will be
added to the cache.
:type settings: ~tlslite.handshakesettings.HandshakeSettings
:param settings: Various settings which can be used to control
the ciphersuites and SSL/TLS version chosen by the server.
:type checker: ~tlslite.checker.Checker
:param checker: A Checker instance. This instance will be
invoked to examine the other party's authentication
credentials, if the handshake completes succesfully.
:type reqCAs: list of bytearray
:param reqCAs: A collection of DER-encoded DistinguishedNames that
will be sent along with a certificate request to help client pick
a certificates. This does not affect verification.
:type nextProtos: list of str
:param nextProtos: A list of upper layer protocols to expose to the
clients through the Next-Protocol Negotiation Extension,
if they support it. Deprecated, use the `virtual_hosts` in
HandshakeSettings.
:type alpn: list of bytearray
:param alpn: names of application layer protocols supported.
Note that it will be used instead of NPN if both were advertised by
client. Deprecated, use the `virtual_hosts` in HandshakeSettings.
:type sni: bytearray
:param sni: expected virtual name hostname. Deprecated, use the
`virtual_hosts` in HandshakeSettings.
:raises socket.error: If a socket error occurs.
:raises tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
:raises tlslite.errors.TLSAlert: If a TLS alert is signalled.
:raises tlslite.errors.TLSAuthenticationError: If the checker
doesn't like the other party's authentication credentials.
"""
for result in self.handshakeServerAsync(verifierDB,
certChain, privateKey, reqCert, sessionCache, settings,
checker, reqCAs,
tacks=tacks, activationFlags=activationFlags,
nextProtos=nextProtos, anon=anon, alpn=alpn, sni=sni):
pass
def handshakeServerAsync(self, verifierDB=None,
certChain=None, privateKey=None, reqCert=False,
sessionCache=None, settings=None, checker=None,
reqCAs=None,
tacks=None, activationFlags=0,
nextProtos=None, anon=False, alpn=None, sni=None
):
"""Start a server handshake operation on the TLS connection.
This function returns a generator which behaves similarly to
handshakeServer(). Successive invocations of the generator
will return 0 if it is waiting to read from the socket, 1 if it is
waiting to write to the socket, or it will raise StopIteration
if the handshake operation is complete.
:rtype: iterable
:returns: A generator; see above for details.
"""
handshaker = self._handshakeServerAsyncHelper(\
verifierDB=verifierDB, cert_chain=certChain,
privateKey=privateKey, reqCert=reqCert,
sessionCache=sessionCache, settings=settings,
reqCAs=reqCAs,
tacks=tacks, activationFlags=activationFlags,
nextProtos=nextProtos, anon=anon, alpn=alpn, sni=sni)
for result in self._handshakeWrapperAsync(handshaker, checker):
yield result
def _handshakeServerAsyncHelper(self, verifierDB,
cert_chain, privateKey, reqCert, sessionCache,
settings, reqCAs,
tacks, activationFlags,
nextProtos, anon, alpn, sni):
self._handshakeStart(client=False)
if not settings:
settings = HandshakeSettings()
settings = settings.validate()
if (not verifierDB) and (not cert_chain) and not anon and \
not settings.pskConfigs and not settings.virtual_hosts:
raise ValueError("Caller passed no authentication credentials")
if cert_chain and not privateKey:
raise ValueError("Caller passed a cert_chain but no privateKey")
if privateKey and not cert_chain:
raise ValueError("Caller passed a privateKey but no cert_chain")
if reqCAs and not reqCert:
raise ValueError("Caller passed reqCAs but not reqCert")
if cert_chain and not isinstance(cert_chain, X509CertChain):
raise ValueError("Unrecognized certificate type")
if activationFlags and not tacks:
raise ValueError("Nonzero activationFlags requires tacks")
if tacks:
if not tackpyLoaded:
raise ValueError("tackpy is not loaded")
if not settings.useExperimentalTackExtension:
raise ValueError("useExperimentalTackExtension not enabled")
if alpn is not None and not alpn:
raise ValueError("Empty list of ALPN protocols")
self.sock.padding_cb = settings.padding_cb
# OK Start exchanging messages
# ******************************
# Handle ClientHello and resumption
for result in self._serverGetClientHello(settings, privateKey,
cert_chain,
verifierDB, sessionCache,
anon, alpn, sni):
if result in (0,1): yield result
elif result == None:
self._handshakeDone(resumed=True)
return # Handshake was resumed, we're done
else: break
(clientHello, version, cipherSuite, sig_scheme, privateKey,
cert_chain) = result
# in TLS 1.3 the handshake is completely different
# (extensions go into different messages, format of messages is
# different, etc.)
if version > (3, 3):
for result in self._serverTLS13Handshake(settings, clientHello,
cipherSuite,
privateKey, cert_chain,
version, sig_scheme,
alpn, reqCert):
if result in (0, 1):
yield result
else:
break
if result == "finished":
self._handshakeDone(resumed=False)
return
#If not a resumption...
# Create the ServerHello message
if sessionCache:
sessionID = getRandomBytes(32)
else:
sessionID = bytearray(0)
if not clientHello.supports_npn:
nextProtos = None
alpnExt = clientHello.getExtension(ExtensionType.alpn)
if alpnExt and alpn:
# if there's ALPN, don't do NPN
nextProtos = None
# If not doing a certificate-based suite, discard the TACK
if not cipherSuite in CipherSuite.certAllSuites and \
not cipherSuite in CipherSuite.ecdheEcdsaSuites:
tacks = None
# Prepare a TACK Extension if requested
if clientHello.tack:
tackExt = TackExtension.create(tacks, activationFlags)
else:
tackExt = None
extensions = []
# Prepare other extensions if requested
if settings.useEncryptThenMAC and \
clientHello.getExtension(ExtensionType.encrypt_then_mac) and \
cipherSuite not in CipherSuite.streamSuites and \
cipherSuite not in CipherSuite.aeadSuites:
extensions.append(TLSExtension().create(ExtensionType.
encrypt_then_mac,
bytearray(0)))
self._recordLayer.encryptThenMAC = True
if settings.useExtendedMasterSecret:
if clientHello.getExtension(ExtensionType.extended_master_secret):
extensions.append(TLSExtension().create(ExtensionType.
extended_master_secret,
bytearray(0)))
self.extendedMasterSecret = True
elif settings.requireExtendedMasterSecret:
for result in self._sendError(
AlertDescription.insufficient_security,
"Failed to negotiate Extended Master Secret"):
yield result
selectedALPN = None
if alpnExt and alpn:
for protoName in alpnExt.protocol_names:
if protoName in alpn:
selectedALPN = protoName
ext = ALPNExtension().create([protoName])
extensions.append(ext)
break
else:
for result in self._sendError(
AlertDescription.no_application_protocol,
"No mutually supported application layer protocols"):
yield result
# notify client that we understood its renegotiation info extension
# or SCSV
secureRenego = False
renegoExt = clientHello.getExtension(ExtensionType.renegotiation_info)
if renegoExt:
if renegoExt.renegotiated_connection:
for result in self._sendError(
AlertDescription.handshake_failure,
"Non empty renegotiation info extension in "
"initial Client Hello"):
yield result
secureRenego = True
elif CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV in \
clientHello.cipher_suites:
secureRenego = True
if secureRenego:
extensions.append(RenegotiationInfoExtension()
.create(bytearray(0)))
# tell the client what point formats we support
if clientHello.getExtension(ExtensionType.ec_point_formats):
# even though the selected cipher may not use ECC, client may want
# to send a CA certificate with ECDSA...
extensions.append(ECPointFormatsExtension().create(
[ECPointFormat.uncompressed]))
# if client sent Heartbeat extension
if clientHello.getExtension(ExtensionType.heartbeat):
# and we want to accept it
if settings.use_heartbeat_extension:
extensions.append(HeartbeatExtension().create(
HeartbeatMode.PEER_ALLOWED_TO_SEND))
if clientHello.getExtension(ExtensionType.record_size_limit) and \
settings.record_size_limit:
# in TLS 1.2 and earlier we can select at most 2^14B records
extensions.append(RecordSizeLimitExtension().create(
min(2**14, settings.record_size_limit)))
# don't send empty list of extensions
if not extensions:
extensions = None
serverHello = ServerHello()
# RFC 8446, section 4.1.3
random = getRandomBytes(32)
if version == (3, 3) and settings.maxVersion > (3, 3):
random[-8:] = TLS_1_2_DOWNGRADE_SENTINEL
if version < (3, 3) and settings.maxVersion >= (3, 3):
random[-8:] = TLS_1_1_DOWNGRADE_SENTINEL
serverHello.create(self.version, random, sessionID,
cipherSuite, CertificateType.x509, tackExt,
nextProtos, extensions=extensions)
# Perform the SRP key exchange
clientCertChain = None
if cipherSuite in CipherSuite.srpAllSuites:
for result in self._serverSRPKeyExchange(clientHello, serverHello,
verifierDB, cipherSuite,
privateKey, cert_chain,
settings):
if result in (0, 1):
yield result
else: break
premasterSecret, privateKey, cert_chain = result
# Perform a certificate-based key exchange
elif (cipherSuite in CipherSuite.certSuites or
cipherSuite in CipherSuite.dheCertSuites or
cipherSuite in CipherSuite.dheDsaSuites or
cipherSuite in CipherSuite.ecdheCertSuites or
cipherSuite in CipherSuite.ecdheEcdsaSuites):
try:
sig_hash_alg, cert_chain, privateKey = \
self._pickServerKeyExchangeSig(settings,
clientHello,
cert_chain,
privateKey)
except TLSHandshakeFailure as alert:
for result in self._sendError(
AlertDescription.handshake_failure,
str(alert)):
yield result
if cipherSuite in CipherSuite.certSuites:
keyExchange = RSAKeyExchange(cipherSuite,
clientHello,
serverHello,
privateKey)
elif cipherSuite in CipherSuite.dheCertSuites or \
cipherSuite in CipherSuite.dheDsaSuites:
dhGroups = self._groupNamesToList(settings)
keyExchange = DHE_RSAKeyExchange(cipherSuite,
clientHello,
serverHello,
privateKey,
settings.dhParams,
dhGroups)
elif cipherSuite in CipherSuite.ecdheCertSuites or \
cipherSuite in CipherSuite.ecdheEcdsaSuites:
acceptedCurves = self._curveNamesToList(settings)
defaultCurve = getattr(GroupName, settings.defaultCurve)
keyExchange = ECDHE_RSAKeyExchange(cipherSuite,
clientHello,
serverHello,
privateKey,
acceptedCurves,
defaultCurve)
else:
assert(False)
for result in self._serverCertKeyExchange(clientHello, serverHello,
sig_hash_alg, cert_chain, keyExchange,
reqCert, reqCAs, cipherSuite,
settings):
if result in (0,1): yield result
else: break
(premasterSecret, clientCertChain) = result
# Perform anonymous Diffie Hellman key exchange
elif (cipherSuite in CipherSuite.anonSuites or
cipherSuite in CipherSuite.ecdhAnonSuites):
if cipherSuite in CipherSuite.anonSuites:
dhGroups = self._groupNamesToList(settings)
keyExchange = ADHKeyExchange(cipherSuite, clientHello,
serverHello, settings.dhParams,
dhGroups)
else:
acceptedCurves = self._curveNamesToList(settings)
defaultCurve = getattr(GroupName, settings.defaultCurve)
keyExchange = AECDHKeyExchange(cipherSuite, clientHello,
serverHello, acceptedCurves,
defaultCurve)
for result in self._serverAnonKeyExchange(serverHello, keyExchange,
cipherSuite):
if result in (0,1): yield result
else: break
premasterSecret = result
else:
assert(False)
# Exchange Finished messages
for result in self._serverFinished(premasterSecret,
clientHello.random, serverHello.random,
cipherSuite, settings.cipherImplementations,
nextProtos, settings):
if result in (0,1): yield result
else: break
masterSecret = result
#Create the session object
self.session = Session()
if cipherSuite in CipherSuite.certAllSuites or \
cipherSuite in CipherSuite.ecdheEcdsaSuites:
serverCertChain = cert_chain
else:
serverCertChain = None
srpUsername = None
serverName = None
if clientHello.srp_username:
srpUsername = clientHello.srp_username.decode("utf-8")
if clientHello.server_name:
serverName = clientHello.server_name.decode("utf-8")
self.session.create(masterSecret, serverHello.session_id, cipherSuite,
srpUsername, clientCertChain, serverCertChain,
tackExt, (serverHello.tackExt is not None),
serverName,
encryptThenMAC=self._recordLayer.encryptThenMAC,
extendedMasterSecret=self.extendedMasterSecret,
appProto=selectedALPN,
# NOTE it must be a reference, not a copy!
tickets=self.tickets)
#Add the session object to the session cache
if sessionCache and sessionID:
sessionCache[sessionID] = self.session
self._handshakeDone(resumed=False)
self._serverRandom = serverHello.random
self._clientRandom = clientHello.random
def request_post_handshake_auth(self, settings=None):
"""
Request Post-handshake Authentication from client.
The PHA process is asynchronous, and client may send some data before
its certificates are added to Session object. Calling this generator
will only request for the new identity of client, it will not wait for
it.
"""
if self.version != (3, 4):
raise ValueError("PHA is supported only in TLS 1.3")
if self._client:
raise ValueError("PHA can only be requested by server")
if not self._pha_supported:
raise ValueError("PHA not supported by client")
settings = settings or HandshakeSettings()
settings = settings.validate()
valid_sig_algs = self._sigHashesToList(settings)
if not valid_sig_algs:
raise ValueError("No signature algorithms enabled in "
"HandshakeSettings")
context = bytes(getRandomBytes(32))
certificate_request = CertificateRequest(self.version)
certificate_request.create(context=context, sig_algs=valid_sig_algs)
self._cert_requests[context] = certificate_request
for result in self._sendMsg(certificate_request):
yield result
@staticmethod
def _derive_key_iv(nonce, user_key, settings):
"""Derive the IV and key for session ticket encryption."""
if settings.ticketCipher == "aes128gcm":
prf_name = "sha256"
prf_size = 32
else:
prf_name = "sha384"
prf_size = 48
# mix the nonce with the key set by user
secret = bytearray(prf_size)
secret = secureHMAC(secret, nonce, prf_name)
secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)
secret = secureHMAC(secret, user_key, prf_name)
ticket_secret = derive_secret(secret,
bytearray(b'SessionTicket secret'),
None, prf_name)
key = HKDF_expand_label(ticket_secret, b"key", b"", len(user_key),
prf_name)
# all AEADs use 12 byte long IV
iv = HKDF_expand_label(ticket_secret, b"iv", b"", 12, prf_name)
return key, iv
def _serverSendTickets(self, settings):
"""Send session tickets to client."""
if not settings.ticketKeys:
return
for _ in range(settings.ticket_count):
# prepare the ticket
ticket = SessionTicketPayload()
ticket.create(self.session.resumptionMasterSecret,
self.version,
self.session.cipherSuite,
int(time.time()),
getRandomBytes(len(settings.ticketKeys[0])),
client_cert_chain=self.session.clientCertChain)
# encrypt the ticket
# generate keys for the encryption
nonce = getRandomBytes(32)
key, iv = self._derive_key_iv(nonce, settings.ticketKeys[0],
settings)
if settings.ticketCipher in ("aes128gcm", "aes256gcm"):
cipher = createAESGCM(key,
settings.cipherImplementations)
elif settings.ticketCipher in ("aes128ccm", "aes256ccm"):
cipher = createAESCCM(key, settings.cipherImplementations)
elif settings.ticketCipher in ("aes128ccm_8", "aes256ccm_8"):
cipher = createAESCCM_8(key, settings.cipherImplementations)
else:
assert settings.ticketCipher == "chacha20-poly1305"
cipher = createCHACHA20(key,
settings.cipherImplementations)
encrypted_ticket = cipher.seal(iv, ticket.write(), b'')
# encapsulate the ticket and send to client
new_ticket = NewSessionTicket()
new_ticket.create(settings.ticketLifetime,
getRandomNumber(1, 8**4),
ticket.nonce,
nonce + encrypted_ticket,
[])
self._queue_message(new_ticket)
# send tickets to client
if settings.ticket_count:
for result in self._queue_flush():
yield result
def _tryDecrypt(self, settings, identity):
if not settings.ticketKeys:
return None, None
if len(identity.identity) < 33:
# too small for an encrypted ticket
return None, None
nonce, encrypted_ticket = identity.identity[:32], identity.identity[32:]
for user_key in settings.ticketKeys:
key, iv = self._derive_key_iv(nonce, user_key, settings)
if settings.ticketCipher in ("aes128gcm", "aes256gcm"):
cipher = createAESGCM(key, settings.cipherImplementations)
elif settings.ticketCipher in ("aes128ccm", "aes256ccm"):
cipher = createAESCCM(key, settings.cipherImplementations)
elif settings.ticketCipher in ("aes128ccm_8", "aes256ccm_8"):
cipher = createAESCCM_8(key, settings.cipherImplementations)
else:
assert settings.ticketCipher == "chacha20-poly1305"
cipher = createCHACHA20(key, settings.cipherImplementations)
ticket = cipher.open(iv, encrypted_ticket, b'')
if not ticket:
continue
parser = Parser(ticket)
try:
ticket = SessionTicketPayload().parse(parser)
except ValueError:
continue
prf = 'sha384' if ticket.cipher_suite \
in CipherSuite.sha384PrfSuites else 'sha256'
new_sess_ticket = NewSessionTicket()
new_sess_ticket.ticket_nonce = ticket.nonce
new_sess_ticket.ticket = identity.identity
psk = HandshakeHelpers.calc_res_binder_psk(identity,
ticket.master_secret,
[new_sess_ticket])
return ((identity.identity, psk, prf), ticket)
# no keys
return None, None
def _serverTLS13Handshake(self, settings, clientHello, cipherSuite,
privateKey, serverCertChain, version, scheme,
srv_alpns, reqCert):
"""Perform a TLS 1.3 handshake"""
prf_name, prf_size = self._getPRFParams(cipherSuite)
secret = bytearray(prf_size)
share = clientHello.getExtension(ExtensionType.key_share)
if share:
share_ids = [i.group for i in share.client_shares]
for group_name in chain(settings.keyShares, settings.eccCurves,
settings.dhGroups):
selected_group = getattr(GroupName, group_name)
if selected_group in share_ids:
cl_key_share = next(i for i in share.client_shares
if i.group == selected_group)
break
else:
for result in self._sendError(AlertDescription.internal_error,
"HRR did not work?!"):
yield result
psk = None
selected_psk = None
resumed_client_cert_chain = None
psks = clientHello.getExtension(ExtensionType.pre_shared_key)
psk_types = clientHello.getExtension(
ExtensionType.psk_key_exchange_modes)
if psks and (PskKeyExchangeMode.psk_dhe_ke in psk_types.modes or
PskKeyExchangeMode.psk_ke in psk_types.modes) and \
(settings.pskConfigs or settings.ticketKeys):
for i, ident in enumerate(psks.identities):
ticket = None
external = True
match = [j for j in settings.pskConfigs
if j[0] == ident.identity]
if not match:
(match, ticket) = self._tryDecrypt(settings, ident)
external = False
if not match:
continue
match = [match]
# check if PSK can be used with selected cipher suite
psk_hash = match[0][2] if len(match[0]) > 2 else 'sha256'
if psk_hash != prf_name:
continue
psk = match[0][1]
selected_psk = i
if ticket:
resumed_client_cert_chain = ticket.client_cert_chain
try:
HandshakeHelpers.verify_binder(
clientHello,
self._pre_client_hello_handshake_hash,
selected_psk,
psk,
psk_hash,
external)
except TLSIllegalParameterException as e:
for result in self._sendError(
AlertDescription.illegal_parameter,
str(e)):
yield result
break
sh_extensions = []
# we need to gen key share either when we selected psk_dhe_ke or
# regular certificate authenticated key exchange (the default)
if (psk and
PskKeyExchangeMode.psk_dhe_ke in psk_types.modes and
"psk_dhe_ke" in settings.psk_modes) or\
(psk is None and privateKey):
self.ecdhCurve = selected_group
kex = self._getKEX(selected_group, version)
key_share = self._genKeyShareEntry(selected_group, version)
try:
shared_sec = kex.calc_shared_key(key_share.private,
cl_key_share.key_exchange)
except TLSIllegalParameterException as alert:
for result in self._sendError(
AlertDescription.illegal_parameter,
str(alert)):
yield result
sh_extensions.append(ServerKeyShareExtension().create(key_share))
elif (psk is not None and
PskKeyExchangeMode.psk_ke in psk_types.modes and
"psk_ke" in settings.psk_modes):
shared_sec = bytearray(prf_size)
else:
for result in self._sendError(
AlertDescription.handshake_failure,
"Could not find acceptable PSK identity nor certificate"):
yield result
if psk is None:
psk = bytearray(prf_size)
sh_extensions.append(SrvSupportedVersionsExtension().create(version))
if selected_psk is not None:
sh_extensions.append(SrvPreSharedKeyExtension()
.create(selected_psk))
serverHello = ServerHello()
# in TLS1.3 the version selected is sent in extension, (3, 3) is
# just dummy value to workaround broken middleboxes
serverHello.create((3, 3), getRandomBytes(32),
clientHello.session_id,
cipherSuite, extensions=sh_extensions)
msgs = []
msgs.append(serverHello)
if not self._ccs_sent and clientHello.session_id:
ccs = ChangeCipherSpec().create()
msgs.append(ccs)
for result in self._sendMsgs(msgs):
yield result
# Early secret
secret = secureHMAC(secret, psk, prf_name)
# Handshake Secret
secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)
secret = secureHMAC(secret, shared_sec, prf_name)
sr_handshake_traffic_secret = derive_secret(secret,
bytearray(b's hs traffic'),
self._handshake_hash,
prf_name)
cl_handshake_traffic_secret = derive_secret(secret,
bytearray(b'c hs traffic'),
self._handshake_hash,
prf_name)
self.version = version
self._recordLayer.calcTLS1_3PendingState(
cipherSuite,
cl_handshake_traffic_secret,
sr_handshake_traffic_secret,
settings.cipherImplementations)
self._changeWriteState()
ee_extensions = []
if clientHello.getExtension(ExtensionType.record_size_limit) and \
settings.record_size_limit:
ee_extensions.append(RecordSizeLimitExtension().create(
min(2**14+1, settings.record_size_limit)))
# a bit of a hack to detect if the HRR was sent
# as that means that original key share didn't match what we wanted
# send the client updated list of shares we support,
# preferred ones first
if clientHello.getExtension(ExtensionType.cookie):
ext = SupportedGroupsExtension()
groups = [getattr(GroupName, i) for i in settings.keyShares]
groups += [getattr(GroupName, i) for i in settings.eccCurves
if getattr(GroupName, i) not in groups]
groups += [getattr(GroupName, i) for i in settings.dhGroups
if getattr(GroupName, i) not in groups]
if groups:
ext.create(groups)
ee_extensions.append(ext)
alpn_ext = clientHello.getExtension(ExtensionType.alpn)
if alpn_ext:
# error handling was done when receiving ClientHello
matched = [i for i in alpn_ext.protocol_names if i in srv_alpns]
if matched:
ext = ALPNExtension().create([matched[0]])
ee_extensions.append(ext)
if clientHello.getExtension(ExtensionType.heartbeat):
if settings.use_heartbeat_extension:
ee_extensions.append(HeartbeatExtension().create(
HeartbeatMode.PEER_ALLOWED_TO_SEND))
encryptedExtensions = EncryptedExtensions().create(ee_extensions)
self._queue_message(encryptedExtensions)
if selected_psk is None:
# optionally send the client a certificate request
if reqCert:
# the context SHALL be zero length except in post-handshake
ctx = b''
# Get list of valid Signing Algorithms
# we don't support DSA for client certificates yet
cr_settings = settings.validate()
cr_settings.dsaSigHashes = []
valid_sig_algs = self._sigHashesToList(cr_settings)
assert valid_sig_algs
certificate_request = CertificateRequest(self.version)
certificate_request.create(context=ctx, sig_algs=valid_sig_algs)
self._queue_message(certificate_request)
certificate = Certificate(CertificateType.x509, self.version)
certificate.create(serverCertChain, bytearray())
self._queue_message(certificate)
certificate_verify = CertificateVerify(self.version)
signature_scheme = getattr(SignatureScheme, scheme)
signature_context = \
KeyExchange.calcVerifyBytes((3, 4), self._handshake_hash,
signature_scheme, None, None, None,
prf_name, b'server')
if signature_scheme in (SignatureScheme.ed25519,
SignatureScheme.ed448):
hashName = "intrinsic"
padType = None
saltLen = None
sig_func = privateKey.hashAndSign
ver_func = privateKey.hashAndVerify
elif signature_scheme[1] == SignatureAlgorithm.ecdsa:
hashName = HashAlgorithm.toRepr(signature_scheme[0])
padType = None
saltLen = None
sig_func = privateKey.sign
ver_func = privateKey.verify
else:
padType = SignatureScheme.getPadding(scheme)
hashName = SignatureScheme.getHash(scheme)
saltLen = getattr(hashlib, hashName)().digest_size
sig_func = privateKey.sign
ver_func = privateKey.verify
signature = sig_func(signature_context,
padType,
hashName,
saltLen)
if not ver_func(signature, signature_context,
padType,
hashName,
saltLen):
for result in self._sendError(
AlertDescription.internal_error,
"Certificate Verify signature failed"):
yield result
certificate_verify.create(signature, signature_scheme)
self._queue_message(certificate_verify)
finished_key = HKDF_expand_label(sr_handshake_traffic_secret,
b"finished", b'', prf_size, prf_name)
verify_data = secureHMAC(finished_key,
self._handshake_hash.digest(prf_name),
prf_name)
finished = Finished(self.version, prf_size).create(verify_data)
self._queue_message(finished)
for result in self._queue_flush():
yield result
self._changeReadState()
# Master secret
secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)
secret = secureHMAC(secret, bytearray(prf_size), prf_name)
cl_app_traffic = derive_secret(secret, bytearray(b'c ap traffic'),
self._handshake_hash, prf_name)
sr_app_traffic = derive_secret(secret, bytearray(b's ap traffic'),
self._handshake_hash, prf_name)
self._recordLayer.calcTLS1_3PendingState(serverHello.cipher_suite,
cl_app_traffic,
sr_app_traffic,
settings
.cipherImplementations)
# all the messages sent by the server after the Finished message
# MUST be encrypted with ap traffic secret, even if they regard
# problems in processing client Certificate, CertificateVerify or
# Finished messages
self._changeWriteState()
client_cert_chain = None
#Get [Certificate,] (if was requested)
if reqCert and selected_psk is None:
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate,
CertificateType.x509):
if result in (0, 1):
yield result
else:
break
client_certificate = result
assert isinstance(client_certificate, Certificate)
client_cert_chain = client_certificate.cert_chain
#Get and check CertificateVerify, if relevant
cli_cert_verify_hh = self._handshake_hash.copy()
if client_cert_chain and client_cert_chain.getNumCerts():
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate_verify):
if result in (0, 1):
yield result
else: break
certificate_verify = result
assert isinstance(certificate_verify, CertificateVerify)
signature_scheme = certificate_verify.signatureAlgorithm
valid_sig_algs = self._sigHashesToList(settings,
certList=client_cert_chain,
version=(3, 4))
if signature_scheme not in valid_sig_algs:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Invalid signature on Certificate Verify"):
yield result
signature_context = \
KeyExchange.calcVerifyBytes((3, 4), cli_cert_verify_hh,
signature_scheme, None, None, None,
prf_name, b'client')
public_key = client_cert_chain.getEndEntityPublicKey()
if signature_scheme in (SignatureScheme.ed25519,
SignatureScheme.ed448):
hash_name = "intrinsic"
pad_type = None
salt_len = None
ver_func = public_key.hashAndVerify
elif signature_scheme[1] == SignatureAlgorithm.ecdsa:
hash_name = HashAlgorithm.toRepr(signature_scheme[0])
pad_type = None
salt_len = None
ver_func = public_key.verify
else:
scheme = SignatureScheme.toRepr(signature_scheme)
pad_type = SignatureScheme.getPadding(scheme)
hash_name = SignatureScheme.getHash(scheme)
salt_len = getattr(hashlib, hash_name)().digest_size
ver_func = public_key.verify
if not ver_func(certificate_verify.signature,
signature_context,
pad_type,
hash_name,
salt_len):
for result in self._sendError(
AlertDescription.decrypt_error,
"signature verification failed"):
yield result
# as both exporter and resumption master secrets include handshake
# transcript, we need to derive them early
exporter_master_secret = derive_secret(secret,
bytearray(b'exp master'),
self._handshake_hash,
prf_name)
# verify Finished of client
cl_finished_key = HKDF_expand_label(cl_handshake_traffic_secret,
b"finished", b'',
prf_size, prf_name)
cl_verify_data = secureHMAC(cl_finished_key,
self._handshake_hash.digest(prf_name),
prf_name)
for result in self._getMsg(ContentType.handshake,
HandshakeType.finished,
prf_size):
if result in (0, 1):
yield result
else:
break
cl_finished = result
assert isinstance(cl_finished, Finished)
if cl_finished.verify_data != cl_verify_data:
for result in self._sendError(
AlertDescription.decrypt_error,
"Finished value is not valid"):
yield result
# disallow CCS messages after handshake
self._middlebox_compat_mode = False
resumption_master_secret = derive_secret(secret,
bytearray(b'res master'),
self._handshake_hash,
prf_name)
self._first_handshake_hashes = self._handshake_hash.copy()
self.session = Session()
self.extendedMasterSecret = True
server_name = None
if clientHello.server_name:
server_name = clientHello.server_name.decode('utf-8')
app_proto = None
alpnExt = encryptedExtensions.getExtension(ExtensionType.alpn)
if alpnExt:
app_proto = alpnExt.protocol_names[0]
if not client_cert_chain and resumed_client_cert_chain:
client_cert_chain = resumed_client_cert_chain
self.session.create(secret,
bytearray(b''), # no session_id
serverHello.cipher_suite,
bytearray(b''), # no SRP
client_cert_chain,
serverCertChain,
None,
False,
server_name,
encryptThenMAC=False,
extendedMasterSecret=True,
appProto=app_proto,
cl_app_secret=cl_app_traffic,
sr_app_secret=sr_app_traffic,
exporterMasterSecret=exporter_master_secret,
resumptionMasterSecret=resumption_master_secret,
# NOTE it must be a reference, not a copy
tickets=self.tickets)
# switch to application_traffic_secret for client packets
self._changeReadState()
for result in self._serverSendTickets(settings):
yield result
yield "finished"
def _serverGetClientHello(self, settings, private_key, cert_chain,
verifierDB,
sessionCache, anon, alpn, sni):
# Tentatively set version to most-desirable version, so if an error
# occurs parsing the ClientHello, this will be the version we'll use
# for the error alert
# If TLS 1.3 is enabled, use the "compatible" TLS 1.2 version
self.version = min(settings.maxVersion, (3, 3))
self._pre_client_hello_handshake_hash = self._handshake_hash.copy()
#Get ClientHello
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_hello):
if result in (0,1): yield result
else: break
clientHello = result
# check if the ClientHello and its extensions are well-formed
#If client's version is too low, reject it
real_version = clientHello.client_version
if real_version >= (3, 3):
ext = clientHello.getExtension(ExtensionType.supported_versions)
if ext:
for v in ext.versions:
if v in KNOWN_VERSIONS and v > real_version:
real_version = v
if real_version < settings.minVersion:
self.version = settings.minVersion
for result in self._sendError(\
AlertDescription.protocol_version,
"Too old version: %s" % str(clientHello.client_version)):
yield result
# there MUST be at least one value in both of those
if not clientHello.cipher_suites or \
not clientHello.compression_methods:
for result in self._sendError(
AlertDescription.decode_error,
"Malformed Client Hello message"):
yield result
# client hello MUST advertise uncompressed method
if 0 not in clientHello.compression_methods:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client Hello missing uncompressed method"):
yield result
# the list of signatures methods is defined as <2..2^16-2>, which
# means it can't be empty, but it's only applicable to TLSv1.2 protocol
ext = clientHello.getExtension(ExtensionType.signature_algorithms)
if clientHello.client_version >= (3, 3) and ext and not ext.sigalgs:
for result in self._sendError(
AlertDescription.decode_error,
"Malformed signature_algorithms extension"):
yield result
# Sanity check the ALPN extension
alpnExt = clientHello.getExtension(ExtensionType.alpn)
if alpnExt:
if not alpnExt.protocol_names:
for result in self._sendError(
AlertDescription.decode_error,
"Client sent empty list of ALPN names"):
yield result
for protocolName in alpnExt.protocol_names:
if not protocolName:
for result in self._sendError(
AlertDescription.decode_error,
"Client sent empty name in ALPN extension"):
yield result
# Sanity check the SNI extension
sniExt = clientHello.getExtension(ExtensionType.server_name)
# check if extension is well formed
if sniExt and (not sniExt.extData or not sniExt.serverNames):
for result in self._sendError(
AlertDescription.decode_error,
"Recevived SNI extension is malformed"):
yield result
if sniExt and sniExt.hostNames:
# RFC 6066 limitation
if len(sniExt.hostNames) > 1:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client sent multiple host names in SNI extension"):
yield result
if not sniExt.hostNames[0]:
for result in self._sendError(
AlertDescription.decode_error,
"Received SNI extension is malformed"):
yield result
try:
name = sniExt.hostNames[0].decode('ascii', 'strict')
except UnicodeDecodeError:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Host name in SNI is not valid ASCII"):
yield result
if not is_valid_hostname(name):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Host name in SNI is not valid DNS name"):
yield result
# sanity check the EMS extension
emsExt = clientHello.getExtension(ExtensionType.extended_master_secret)
if emsExt and emsExt.extData:
for result in self._sendError(
AlertDescription.decode_error,
"Non empty payload of the Extended "
"Master Secret extension"):
yield result
# sanity check the TLS 1.3 extensions
ver_ext = clientHello.getExtension(ExtensionType.supported_versions)
if ver_ext and (3, 4) in ver_ext.versions:
psk = clientHello.getExtension(ExtensionType.pre_shared_key)
psk_modes = clientHello.getExtension(
ExtensionType.psk_key_exchange_modes)
key_share = clientHello.getExtension(ExtensionType.key_share)
sup_groups = clientHello.getExtension(
ExtensionType.supported_groups)
pha = clientHello.getExtension(ExtensionType.post_handshake_auth)
if pha:
if pha.extData:
for result in self._sendError(
AlertDescription.decode_error,
"Invalid encoding of post_handshake_auth extension"
):
yield result
self._pha_supported = True
key_exchange = None
if psk_modes:
if not psk_modes.modes:
for result in self._sendError(
AlertDescription.decode_error,
"Empty psk_key_exchange_modes extension"):
yield result
# psk_ke
if psk:
if not psk.identities:
for result in self._sendError(
AlertDescription.decode_error,
"No identities in PSK extension"):
yield result
if not psk.binders:
for result in self._sendError(
AlertDescription.decode_error,
"No binders in PSK extension"):
yield result
if len(psk.identities) != len(psk.binders):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Number of identities does not match number of "
"binders in PSK extension"):
yield result
if any(not i.identity for i in psk.identities):
for result in self._sendError(
AlertDescription.decoder_error,
"Empty identity in PSK extension"):
yield result
if any(not i for i in psk.binders):
for result in self._sendError(
AlertDescription.decoder_error,
"Empty binder in PSK extension"):
yield result
if psk is not clientHello.extensions[-1]:
for result in self._sendError(
AlertDescription.illegal_parameter,
"PSK extension not last in client hello"):
yield result
if not psk_modes:
for result in self._sendError(
AlertDescription.missing_extension,
"PSK extension without psk_key_exchange_modes "
"extension"):
yield result
if PskKeyExchangeMode.psk_dhe_ke not in psk_modes.modes:
key_exchange = "psk_ke"
# cert
if not key_exchange:
if not sup_groups:
for result in self._sendError(
AlertDescription.missing_extension,
"Missing supported_groups extension"):
yield result
if not key_share:
for result in self._sendError(
AlertDescription.missing_extension,
"Missing key_share extension"):
yield result
if not sup_groups.groups:
for result in self._sendError(
AlertDescription.decode_error,
"Empty supported_groups extension"):
yield result
if key_share.client_shares is None:
for result in self._sendError(
AlertDescription.decode_error,
"Empty key_share extension"):
yield result
# check supported_groups
if TLS_1_3_FORBIDDEN_GROUPS.intersection(sup_groups.groups):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client advertised in TLS 1.3 Client Hello a key "
"exchange group forbidden in TLS 1.3"):
yield result
# Check key_share
mismatch = next((i for i in key_share.client_shares
if i.group not in sup_groups.groups), None)
if mismatch:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client sent key share for "
"group it did not advertise "
"support for: {0}"
.format(GroupName.toStr(mismatch))):
yield result
key_share_ids = [i.group for i in key_share.client_shares]
if len(set(key_share_ids)) != len(key_share_ids):
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client sent multiple key shares for the same "
"group"):
yield result
group_ids = sup_groups.groups
diff = set(group_ids) - set(key_share_ids)
if key_share_ids != [i for i in group_ids if i not in diff]:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client sent key shares in different order than "
"the advertised groups."):
yield result
sig_algs = clientHello.getExtension(
ExtensionType.signature_algorithms)
if (not psk_modes or not psk) and sig_algs:
key_exchange = "cert"
# psk_dhe_ke
if not key_exchange and psk:
key_exchange = "psk_dhe_ke"
if not key_exchange:
for result in self._sendError(
AlertDescription.missing_extension,
"Missing extension"):
yield result
early_data = clientHello.getExtension(ExtensionType.early_data)
if early_data:
if early_data.extData:
for result in self._sendError(
AlertDescription.decode_error,
"malformed early_data extension"):
yield result
if not psk:
for result in self._sendError(
AlertDescription.illegal_parameter,
"early_data without PSK extension"):
yield result
# if early data comes from version we don't support, client
# MUST (section D.3 draft 28) abort the connection so we
# enable early data tolerance only when versions match
self._recordLayer.max_early_data = settings.max_early_data
self._recordLayer.early_data_ok = True
# negotiate the protocol version for the connection
high_ver = None
if ver_ext:
high_ver = getFirstMatching(settings.versions,
ver_ext.versions)
if not high_ver:
for result in self._sendError(
AlertDescription.protocol_version,
"supported_versions did not include version we "
"support"):
yield result
if high_ver:
# when we selected TLS 1.3, we cannot set the record layer to
# it as well as that also switches it to a mode where the
# content type is encrypted
# use the backwards compatible TLS 1.2 version instead
self.version = min((3, 3), high_ver)
version = high_ver
elif clientHello.client_version > settings.maxVersion:
# in TLS 1.3 the version is negotiatied with extension,
# but the settings use the (3, 4) as the max version
self.version = min(settings.maxVersion, (3, 3))
version = self.version
else:
#Set the version to the client's version
self.version = min(clientHello.client_version, (3, 3))
version = self.version
#Detect if the client performed an inappropriate fallback.
if version < settings.maxVersion and \
CipherSuite.TLS_FALLBACK_SCSV in clientHello.cipher_suites:
for result in self._sendError(
AlertDescription.inappropriate_fallback):
yield result
# TODO when TLS 1.3 is final, check the client hello random for
# downgrade too
# start negotiating the parameters of the connection
sni_ext = clientHello.getExtension(ExtensionType.server_name)
if sni_ext:
name = sni_ext.hostNames[0].decode('ascii', 'strict')
# warn the client if the name didn't match the expected value
if sni and sni != name:
alert = Alert().create(AlertDescription.unrecognized_name,
AlertLevel.warning)
for result in self._sendMsg(alert):
yield result
#Check if there's intersection between supported curves by client and
#server
clientGroups = clientHello.getExtension(ExtensionType.supported_groups)
# in case the client didn't advertise any curves, we can pick any so
# enable ECDHE
ecGroupIntersect = True
# if there is no extension, then enable DHE
ffGroupIntersect = True
if clientGroups is not None:
clientGroups = clientGroups.groups
if not clientGroups:
for result in self._sendError(
AlertDescription.decode_error,
"Received malformed supported_groups extension"):
yield result
serverGroups = self._curveNamesToList(settings)
ecGroupIntersect = getFirstMatching(clientGroups, serverGroups)
# RFC 7919 groups
serverGroups = self._groupNamesToList(settings)
ffGroupIntersect = getFirstMatching(clientGroups, serverGroups)
# if there is no overlap, but there are no FFDHE groups listed,
# allow DHE, prohibit otherwise
if not ffGroupIntersect:
if clientGroups and \
any(i for i in clientGroups if i in range(256, 512)):
ffGroupIntersect = False
else:
ffGroupIntersect = True
# Check and save clients heartbeat extension mode
heartbeat_ext = clientHello.getExtension(ExtensionType.heartbeat)
if heartbeat_ext:
if heartbeat_ext.mode == HeartbeatMode.PEER_ALLOWED_TO_SEND:
if settings.heartbeat_response_callback:
self.heartbeat_can_send = True
self.heartbeat_response_callback = settings.\
heartbeat_response_callback
elif heartbeat_ext.mode == HeartbeatMode.PEER_NOT_ALLOWED_TO_SEND:
self.heartbeat_can_send = False
else:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Received invalid value in Heartbeat extension"):
yield result
self.heartbeat_supported = True
self.heartbeat_can_receive = True
size_limit_ext = clientHello.getExtension(
ExtensionType.record_size_limit)
if size_limit_ext:
if size_limit_ext.record_size_limit is None:
for result in self._sendError(
AlertDescription.decode_error,
"Malformed record_size_limit extension"):
yield result
if not 64 <= size_limit_ext.record_size_limit:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Invalid value in record_size_limit extension"):
yield result
if settings.record_size_limit:
# in TLS 1.3 handshake is encrypted so we need to switch
# to sending smaller messages right away
if version >= (3, 4):
# the client can send bigger values because it may
# know protocol versions or extensions we don't know about
# (but we need to still clamp it to protocol limit)
self._send_record_limit = min(
2**14, size_limit_ext.record_size_limit - 1)
# the record layer excludes content type, extension doesn't
# thus the "-1)
self._recv_record_limit = min(2**14,
settings.record_size_limit - 1)
else:
# but in TLS 1.2 and earlier we need to postpone it till
# handling of Finished
self._peer_record_size_limit = min(
2**14, size_limit_ext.record_size_limit)
#Now that the version is known, limit to only the ciphers available to
#that version and client capabilities.
cipherSuites = []
if verifierDB:
if cert_chain:
cipherSuites += \
CipherSuite.getSrpCertSuites(settings, version)
cipherSuites += CipherSuite.getSrpSuites(settings, version)
elif cert_chain:
if ecGroupIntersect or ffGroupIntersect:
cipherSuites += CipherSuite.getTLS13Suites(settings,
version)
if ecGroupIntersect:
cipherSuites += CipherSuite.getEcdsaSuites(settings, version)
cipherSuites += CipherSuite.getEcdheCertSuites(settings,
version)
if ffGroupIntersect:
cipherSuites += CipherSuite.getDheCertSuites(settings,
version)
cipherSuites += CipherSuite.getDheDsaSuites(settings,
version)
cipherSuites += CipherSuite.getCertSuites(settings, version)
elif anon:
cipherSuites += CipherSuite.getAnonSuites(settings, version)
cipherSuites += CipherSuite.getEcdhAnonSuites(settings,
version)
elif settings.pskConfigs:
cipherSuites += CipherSuite.getTLS13Suites(settings,
version)
else:
assert False
cipherSuites = CipherSuite.filterForVersion(cipherSuites,
minVersion=version,
maxVersion=version)
#If resumption was requested and we have a session cache...
if clientHello.session_id and sessionCache:
session = None
# Check if the session there is good enough and consistent with
# new Client Hello
try:
session = sessionCache[clientHello.session_id]
if not session.resumable:
raise AssertionError()
# Check if we are willing to use that old cipher still
if session.cipherSuite not in cipherSuites:
session = None
raise KeyError()
# Check for consistency with ClientHello
# see RFC 5246 section 7.4.1.2, description of
# cipher_suites
if session.cipherSuite not in clientHello.cipher_suites:
for result in self._sendError(
AlertDescription.illegal_parameter):
yield result
if clientHello.srp_username:
if not session.srpUsername or \
clientHello.srp_username != \
bytearray(session.srpUsername, "utf-8"):
for result in self._sendError(
AlertDescription.handshake_failure):
yield result
if clientHello.server_name:
if not session.serverName or \
clientHello.server_name != \
bytearray(session.serverName, "utf-8"):
for result in self._sendError(
AlertDescription.handshake_failure):
yield result
if session.encryptThenMAC and \
not clientHello.getExtension(
ExtensionType.encrypt_then_mac):
for result in self._sendError(
AlertDescription.illegal_parameter):
yield result
# if old session used EMS, new connection MUST use EMS
if session.extendedMasterSecret and \
not clientHello.getExtension(
ExtensionType.extended_master_secret):
# RFC 7627, section 5.2 explicitly requires
# handshake_failure
for result in self._sendError(
AlertDescription.handshake_failure):
yield result
# if old session didn't use EMS but new connection
# advertises EMS, create a new session
elif not session.extendedMasterSecret and \
clientHello.getExtension(
ExtensionType.extended_master_secret):
session = None
except KeyError:
pass
#If a session is found..
if session:
#Send ServerHello
extensions = []
if session.encryptThenMAC:
self._recordLayer.encryptThenMAC = True
mte = TLSExtension().create(ExtensionType.encrypt_then_mac,
bytearray(0))
extensions.append(mte)
if session.extendedMasterSecret:
ems = TLSExtension().create(ExtensionType.
extended_master_secret,
bytearray(0))
extensions.append(ems)
secureRenego = False
renegoExt = clientHello.\
getExtension(ExtensionType.renegotiation_info)
if renegoExt:
if renegoExt.renegotiated_connection:
for result in self._sendError(
AlertDescription.handshake_failure):
yield result
secureRenego = True
elif CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV in \
clientHello.cipher_suites:
secureRenego = True
if secureRenego:
extensions.append(RenegotiationInfoExtension()
.create(bytearray(0)))
selectedALPN = None
if alpn:
alpnExt = clientHello.getExtension(ExtensionType.alpn)
if alpnExt:
for protocolName in alpnExt.protocol_names:
if protocolName in alpn:
ext = ALPNExtension().create([protocolName])
extensions.append(ext)
selectedALPN = protocolName
break
else:
for result in self._sendError(
AlertDescription.no_application_protocol,
"No commonly supported application layer"
"protocol supported"):
yield result
heartbeat_ext = clientHello.getExtension(
ExtensionType.heartbeat)
if heartbeat_ext:
if heartbeat_ext.mode == HeartbeatMode.PEER_ALLOWED_TO_SEND:
self.heartbeat_can_send = True
elif heartbeat_ext.mode == \
HeartbeatMode.PEER_NOT_ALLOWED_TO_SEND:
self.heartbeat_can_send = False
else:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Client sent invalid Heartbeat extension"):
yield result
heartbeat = HeartbeatExtension().create(
HeartbeatMode.PEER_ALLOWED_TO_SEND)
self.heartbeat_can_receive = True
self.heartbeat_supported = True
extensions.append(heartbeat)
record_limit = clientHello.getExtension(
ExtensionType.record_size_limit)
if record_limit and settings.record_size_limit:
extensions.append(RecordSizeLimitExtension().create(
min(2**14, settings.record_size_limit)))
# don't send empty extensions
if not extensions:
extensions = None
serverHello = ServerHello()
serverHello.create(version, getRandomBytes(32),
session.sessionID, session.cipherSuite,
CertificateType.x509, None, None,
extensions=extensions)
for result in self._sendMsg(serverHello):
yield result
#Calculate pending connection states
self._calcPendingStates(session.cipherSuite,
session.masterSecret,
clientHello.random,
serverHello.random,
settings.cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._sendFinished(session.masterSecret,
session.cipherSuite,
settings=settings):
yield result
for result in self._getFinished(session.masterSecret,
session.cipherSuite):
yield result
#Set the session
self.session = session
self._clientRandom = clientHello.random
self._serverRandom = serverHello.random
self.session.appProto = selectedALPN
yield None # Handshake done!
#Calculate the first cipher suite intersection.
#This is the 'privileged' ciphersuite. We'll use it if we're
#doing a new negotiation. In fact,
#the only time we won't use it is if we're resuming a
#session, in which case we use the ciphersuite from the session.
#
#Given the current ciphersuite ordering, this means we prefer SRP
#over non-SRP.
try:
cipherSuite, sig_scheme, cert_chain, private_key = \
self._server_select_certificate(settings, clientHello,
cipherSuites, cert_chain,
private_key, version)
except TLSHandshakeFailure as err:
for result in self._sendError(
AlertDescription.handshake_failure,
str(err)):
yield result
except TLSInsufficientSecurity as err:
for result in self._sendError(
AlertDescription.insufficient_security,
str(err)):
yield result
except TLSIllegalParameterException as err:
for result in self._sendError(
AlertDescription.illegal_parameter,
str(err)):
yield result
#If an RSA suite is chosen, check for certificate type intersection
if (cipherSuite in CipherSuite.certAllSuites or
cipherSuite in CipherSuite.ecdheEcdsaSuites) \
and CertificateType.x509 \
not in clientHello.certificate_types:
for result in self._sendError(\
AlertDescription.handshake_failure,
"the client doesn't support my certificate type"):
yield result
# when we have selected TLS 1.3, check if we don't have to ask for
# a new client hello
if version > (3, 3):
self.version = version
hrr_ext = []
# check if we have good key share
share = clientHello.getExtension(ExtensionType.key_share)
if share:
share_ids = [i.group for i in share.client_shares]
acceptable_ids = [getattr(GroupName, i) for i in
chain(settings.keyShares, settings.eccCurves,
settings.dhGroups)]
for selected_group in acceptable_ids:
if selected_group in share_ids:
cl_key_share = next(i for i in share.client_shares
if i.group == selected_group)
break
else:
# if no key share is acceptable, pick one of the supported
# groups that we support
supported = clientHello.getExtension(ExtensionType
.supported_groups)
supported_ids = supported.groups
selected_group = next((i for i in acceptable_ids
if i in supported_ids), None)
if not selected_group:
for result in self._sendError(AlertDescription
.handshake_failure,
"No acceptable group "
"advertised by client"):
yield result
hrr_ks = HRRKeyShareExtension().create(selected_group)
hrr_ext.append(hrr_ks)
if hrr_ext:
cookie = TLSExtension(extType=ExtensionType.cookie)
cookie = cookie.create(bytearray(b'\x00\x20') +
getRandomBytes(32))
hrr_ext.append(cookie)
if hrr_ext:
clientHello1 = clientHello
# create synthetic handshake hash of the first Client Hello
prf_name, prf_size = self._getPRFParams(cipherSuite)
client_hello_hash = self._handshake_hash.digest(prf_name)
self._handshake_hash = HandshakeHashes()
writer = Writer()
writer.add(HandshakeType.message_hash, 1)
writer.addVarSeq(client_hello_hash, 1, 3)
self._handshake_hash.update(writer.bytes)
# send the version that was really selected
vers = SrvSupportedVersionsExtension().create(version)
hrr_ext.append(vers)
# send the HRR
hrr = ServerHello()
# version is hardcoded in TLS 1.3, and real version
# is sent as extension
hrr.create((3, 3), TLS_1_3_HRR, clientHello.session_id,
cipherSuite, extensions=hrr_ext)
msgs = [hrr]
if clientHello.session_id:
ccs = ChangeCipherSpec().create()
msgs.append(ccs)
for result in self._sendMsgs(msgs):
yield result
self._ccs_sent = True
# copy for calculating PSK binders
self._pre_client_hello_handshake_hash = \
self._handshake_hash.copy()
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_hello):
if result in (0, 1):
yield result
else:
break
clientHello = result
# verify that the new key share is present
ext = clientHello.getExtension(ExtensionType.key_share)
if not ext:
for result in self._sendError(AlertDescription
.missing_extension,
"Key share missing in "
"Client Hello"):
yield result
# here we're assuming that the HRR was sent because of
# missing key share, that may not always be the case
if len(ext.client_shares) != 1:
for result in self._sendError(AlertDescription
.illegal_parameter,
"Multiple key shares in "
"second Client Hello"):
yield result
if ext.client_shares[0].group != selected_group:
for result in self._sendError(AlertDescription
.illegal_parameter,
"Client key share does not "
"match Hello Retry Request"):
yield result
# here we're assuming no 0-RTT and possibly no session
# resumption
# verify that new client hello is like the old client hello
# with the exception of changes requested in HRR
old_ext = clientHello1.getExtension(ExtensionType.key_share)
new_ext = clientHello.getExtension(ExtensionType.key_share)
old_ext.client_shares = new_ext.client_shares
# TODO when 0-RTT supported, remove early_data from old hello
if cookie:
# insert the extension at the same place in the old hello
# as it is in the new hello so that later binary compare
# works
for i, ext in enumerate(clientHello.extensions):
if ext.extType == ExtensionType.cookie:
if ext.extData != cookie.extData:
eType = AlertDescription.illegal_parameter
eText = "Malformed cookie extension"
for result in self._sendError(eType, eText):
yield result
clientHello1.extensions.insert(i, ext)
break
else:
for result in self._sendError(AlertDescription
.missing_extension,
"Second client hello "
"does not contain "
"cookie extension"):
yield result
# also padding extension may change
old_ext = clientHello1.getExtension(
ExtensionType.client_hello_padding)
new_ext = clientHello.getExtension(
ExtensionType.client_hello_padding)
if old_ext != new_ext:
if old_ext is None and new_ext:
for i, ext in enumerate(clientHello.extensions):
if ext.extType == \
ExtensionType.client_hello_padding:
clientHello1.extensions.insert(i, ext)
break
elif old_ext and new_ext is None:
# extension was removed, so remove it here too
clientHello1.extensions[:] = \
(i for i in clientHello1.extensions
if i.extType !=
ExtensionType.client_hello_padding)
else:
old_ext.paddingData = new_ext.paddingData
# PSKs not compatible with cipher suite MAY
# be removed, but must have updated obfuscated ticket age
# and binders
old_ext = clientHello1.getExtension(
ExtensionType.pre_shared_key)
new_ext = clientHello.getExtension(
ExtensionType.pre_shared_key)
if new_ext and old_ext:
clientHello1.extensions[-1] = new_ext
if clientHello.extensions[-1] is not new_ext:
for result in self._sendError(
AlertDescription.illegal_parameter,
"PSK extension not last in client hello"):
yield result
# early_data extension MUST be dropped
old_ext = clientHello1.getExtension(ExtensionType.early_data)
if old_ext:
clientHello1.extensions.remove(old_ext)
if clientHello1 != clientHello:
for result in self._sendError(AlertDescription
.illegal_parameter,
"Old Client Hello does not "
"match the updated Client "
"Hello"):
yield result
# If resumption was not requested, or
# we have no session cache, or
# the client's session_id was not found in cache:
#pylint: disable = undefined-loop-variable
yield (clientHello, version, cipherSuite, sig_scheme, private_key,
cert_chain)
#pylint: enable = undefined-loop-variable
def _serverSRPKeyExchange(self, clientHello, serverHello, verifierDB,
cipherSuite, privateKey, serverCertChain,
settings):
"""Perform the server side of SRP key exchange"""
try:
sigHash, serverCertChain, privateKey = \
self._pickServerKeyExchangeSig(settings, clientHello,
serverCertChain,
privateKey)
except TLSHandshakeFailure as alert:
for result in self._sendError(
AlertDescription.handshake_failure,
str(alert)):
yield result
keyExchange = SRPKeyExchange(cipherSuite,
clientHello,
serverHello,
privateKey,
verifierDB)
#Create ServerKeyExchange, signing it if necessary
try:
serverKeyExchange = keyExchange.makeServerKeyExchange(sigHash)
except TLSUnknownPSKIdentity:
for result in self._sendError(
AlertDescription.unknown_psk_identity):
yield result
except TLSInsufficientSecurity:
for result in self._sendError(
AlertDescription.insufficient_security):
yield result
#Send ServerHello[, Certificate], ServerKeyExchange,
#ServerHelloDone
msgs = []
msgs.append(serverHello)
if cipherSuite in CipherSuite.srpCertSuites:
certificateMsg = Certificate(CertificateType.x509)
certificateMsg.create(serverCertChain)
msgs.append(certificateMsg)
msgs.append(serverKeyExchange)
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
#Get and check ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1): yield result
else: break
try:
premasterSecret = keyExchange.processClientKeyExchange(result)
except TLSIllegalParameterException:
for result in self._sendError(AlertDescription.illegal_parameter,
"Suspicious A value"):
yield result
except TLSDecodeError as alert:
for result in self._sendError(AlertDescription.decode_error,
str(alert)):
yield result
yield premasterSecret, privateKey, serverCertChain
def _server_select_certificate(self, settings, client_hello,
cipher_suites, cert_chain,
private_key, version):
"""
This method makes the decision on which certificate/key pair,
signature algorithm and cipher to use based on the certificate.
"""
last_cert = False
possible_certs = []
# Get client groups
client_groups = client_hello. \
getExtension(ExtensionType.supported_groups)
if client_groups is not None:
client_groups = client_groups.groups
# If client did send signature_algorithms_cert use it,
# otherwise fallback to signature_algorithms.
# Client can also decide not to send sigalg extension
client_sigalgs = \
client_hello. \
getExtension(ExtensionType.signature_algorithms_cert)
if client_sigalgs is not None:
client_sigalgs = \
client_hello. \
getExtension(ExtensionType.signature_algorithms_cert). \
sigalgs
else:
client_sigalgs = \
client_hello. \
getExtension(ExtensionType.signature_algorithms)
if client_sigalgs is not None:
client_sigalgs = \
client_hello. \
getExtension(ExtensionType.signature_algorithms). \
sigalgs
else:
client_sigalgs = []
# Get all the certificates we can offer
alt_certs = ((X509CertChain(i.certificates), i.key) for vh in
settings.virtual_hosts for i in vh.keys)
certs = [(cert, key)
for cert, key in chain([(cert_chain, private_key)], alt_certs)]
for cert, key in certs:
# Check if this is the last (cert, key) pair we have to check
if (cert, key) == certs[-1]:
last_cert = True
# Mandatory checks. If any one of these checks fail, the certificate
# is not usuable.
try:
# Find a suitable ciphersuite based on the certificate
ciphers = CipherSuite.filter_for_certificate(cipher_suites, cert)
for cipher in ciphers:
if cipher in client_hello.cipher_suites:
break
else:
if client_groups and \
any(i in range(256, 512) for i in client_groups) and \
any(i in CipherSuite.dhAllSuites
for i in client_hello.cipher_suites):
raise TLSInsufficientSecurity(
"FFDHE groups not acceptable and no other common "
"ciphers")
raise TLSHandshakeFailure("No mutual ciphersuite")
# Find a signature algorithm based on the certificate
try:
sig_scheme, _, _ = \
self._pickServerKeyExchangeSig(settings,
client_hello,
cert,
key,
version,
False)
except TLSHandshakeFailure:
raise TLSHandshakeFailure(
"No common signature algorithms")
# If the certificate is ECDSA, we must check curve compatibility
if cert and cert.x509List[0].certAlg == 'ecdsa' and \
client_groups and client_sigalgs:
public_key = cert.getEndEntityPublicKey()
curve = public_key.curve_name
for name, aliases in CURVE_ALIASES.items():
if curve in aliases:
curve = getattr(GroupName, name)
break
if version <= (3, 3) and curve not in client_groups:
raise TLSHandshakeFailure(
"The curve in the public key is not "
"supported by the client: {0}" \
.format(GroupName.toRepr(curve)))
if version >= (3, 4):
if GroupName.toRepr(curve) not in \
('secp256r1', 'secp384r1', 'secp521r1'):
raise TLSIllegalParameterException(
"Curve in public key is not supported "
"in TLS1.3")
# If all mandatory checks passed add
# this as possible certificate we can use.
possible_certs.append((cipher, sig_scheme, cert, key))
except Exception:
if last_cert and not possible_certs:
raise
continue
# Non-mandatory checks, if these fail the certificate is still usable
# but we should try to find one that passes all the checks
# Check if every certificate(except the self-signed root CA)
# in the certificate chain is signed with a signature algorithm
# supported by the client.
if cert:
cert_chain_ok = True
for i in range(len(cert.x509List)):
if cert.x509List[i].issuer != cert.x509List[i].subject:
if cert.x509List[i].sigalg not in client_sigalgs:
cert_chain_ok = False
break
if not cert_chain_ok:
if not last_cert:
continue
break
# If all mandatory and non-mandatory checks passed
# return the (cert, key) pair, cipher and sig_scheme
return cipher, sig_scheme, cert, key
# If we can't find cert that passed all the checks, return the first usable one.
return possible_certs[0]
def _serverCertKeyExchange(self, clientHello, serverHello, sigHashAlg,
serverCertChain, keyExchange,
reqCert, reqCAs, cipherSuite,
settings):
#Send ServerHello, Certificate[, ServerKeyExchange]
#[, CertificateRequest], ServerHelloDone
msgs = []
# If we verify a client cert chain, return it
clientCertChain = None
msgs.append(serverHello)
msgs.append(Certificate(CertificateType.x509).create(serverCertChain))
try:
serverKeyExchange = keyExchange.makeServerKeyExchange(sigHashAlg)
except TLSInternalError as alert:
for result in self._sendError(
AlertDescription.internal_error,
str(alert)):
yield result
except TLSInsufficientSecurity as alert:
for result in self._sendError(
AlertDescription.insufficient_security,
str(alert)):
yield result
if serverKeyExchange is not None:
msgs.append(serverKeyExchange)
if reqCert:
certificateRequest = CertificateRequest(self.version)
if not reqCAs:
reqCAs = []
cr_settings = settings.validate()
# we don't support DSA in client certificates yet
cr_settings.dsaSigHashes = []
valid_sig_algs = self._sigHashesToList(cr_settings)
certificateRequest.create([ClientCertificateType.rsa_sign,
ClientCertificateType.ecdsa_sign],
reqCAs,
valid_sig_algs)
msgs.append(certificateRequest)
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
#Get [Certificate,] (if was requested)
if reqCert:
if self.version == (3,0):
for result in self._getMsg((ContentType.handshake,
ContentType.alert),
HandshakeType.certificate,
CertificateType.x509):
if result in (0,1): yield result
else: break
msg = result
if isinstance(msg, Alert):
#If it's not a no_certificate alert, re-raise
alert = msg
if alert.description != \
AlertDescription.no_certificate:
self._shutdown(False)
raise TLSRemoteAlert(alert)
elif isinstance(msg, Certificate):
clientCertificate = msg
if clientCertificate.cert_chain and \
clientCertificate.cert_chain.getNumCerts() != 0:
clientCertChain = clientCertificate.cert_chain
else:
raise AssertionError()
elif self.version in ((3,1), (3,2), (3,3)):
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate,
CertificateType.x509):
if result in (0,1): yield result
else: break
clientCertificate = result
if clientCertificate.cert_chain and \
clientCertificate.cert_chain.getNumCerts() != 0:
clientCertChain = clientCertificate.cert_chain
else:
raise AssertionError()
#Get ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1): yield result
else: break
clientKeyExchange = result
#Process ClientKeyExchange
try:
premasterSecret = \
keyExchange.processClientKeyExchange(clientKeyExchange)
except TLSIllegalParameterException as alert:
for result in self._sendError(AlertDescription.illegal_parameter,
str(alert)):
yield result
except TLSDecodeError as alert:
for result in self._sendError(AlertDescription.decode_error,
str(alert)):
yield result
#Get and check CertificateVerify, if relevant
self._certificate_verify_handshake_hash = self._handshake_hash.copy()
if clientCertChain:
for result in self._getMsg(ContentType.handshake,
HandshakeType.certificate_verify):
if result in (0, 1):
yield result
else: break
certificateVerify = result
signatureAlgorithm = None
if self.version == (3, 3):
valid_sig_algs = \
self._sigHashesToList(settings,
certList=clientCertChain)
if certificateVerify.signatureAlgorithm not in valid_sig_algs:
for result in self._sendError(
AlertDescription.illegal_parameter,
"Invalid signature algorithm in Certificate "
"Verify"):
yield result
signatureAlgorithm = certificateVerify.signatureAlgorithm
if not signatureAlgorithm and \
clientCertChain.x509List[0].certAlg == "ecdsa":
signatureAlgorithm = (HashAlgorithm.sha1,
SignatureAlgorithm.ecdsa)
cvhh = self._certificate_verify_handshake_hash
verify_bytes = KeyExchange.calcVerifyBytes(
self.version,
cvhh,
signatureAlgorithm,
premasterSecret,
clientHello.random,
serverHello.random,
key_type=clientCertChain.x509List[0].certAlg)
for result in self._check_certchain_with_settings(
clientCertChain,
settings):
if result in (0, 1):
yield result
else: break
public_key = result
if signatureAlgorithm and signatureAlgorithm in (
SignatureScheme.ed25519, SignatureScheme.ed448):
hash_name = "intrinsic"
salt_len = None
padding = None
ver_func = public_key.hashAndVerify
elif not signatureAlgorithm or \
signatureAlgorithm[1] != SignatureAlgorithm.ecdsa:
scheme = SignatureScheme.toRepr(signatureAlgorithm)
# for pkcs1 signatures hash is used to add PKCS#1 prefix, but
# that was already done by calcVerifyBytes
hash_name = None
salt_len = 0
if scheme is None:
padding = 'pkcs1'
else:
padding = SignatureScheme.getPadding(scheme)
if padding == 'pss':
hash_name = SignatureScheme.getHash(scheme)
salt_len = getattr(hashlib, hash_name)().digest_size
ver_func = public_key.verify
else:
hash_name = HashAlgorithm.toStr(signatureAlgorithm[0])
verify_bytes = verify_bytes[
:public_key.public_key.curve.baselen]
padding = None
salt_len = None
ver_func = public_key.verify
if not ver_func(certificateVerify.signature,
verify_bytes,
padding,
hash_name,
salt_len):
for result in self._sendError(
AlertDescription.decrypt_error,
"Signature failed to verify"):
yield result
yield (premasterSecret, clientCertChain)
def _serverAnonKeyExchange(self, serverHello, keyExchange, cipherSuite):
# Create ServerKeyExchange
serverKeyExchange = keyExchange.makeServerKeyExchange()
# Send ServerHello[, Certificate], ServerKeyExchange,
# ServerHelloDone
msgs = []
msgs.append(serverHello)
msgs.append(serverKeyExchange)
msgs.append(ServerHelloDone())
for result in self._sendMsgs(msgs):
yield result
# Get and check ClientKeyExchange
for result in self._getMsg(ContentType.handshake,
HandshakeType.client_key_exchange,
cipherSuite):
if result in (0,1):
yield result
else:
break
cke = result
try:
premasterSecret = keyExchange.processClientKeyExchange(cke)
except TLSIllegalParameterException as alert:
for result in self._sendError(AlertDescription.illegal_parameter,
str(alert)):
yield result
except TLSDecodeError as alert:
for result in self._sendError(AlertDescription.decode_error,
str(alert)):
yield result
yield premasterSecret
def _serverFinished(self, premasterSecret, clientRandom, serverRandom,
cipherSuite, cipherImplementations, nextProtos,
settings):
if self.extendedMasterSecret:
cvhh = self._certificate_verify_handshake_hash
# in case of resumption or lack of certificate authentication,
# the CVHH won't be initialised, but then it would also be equal
# to regular handshake hash
if not cvhh:
cvhh = self._handshake_hash
masterSecret = calc_key(self.version, premasterSecret,
cipherSuite, b"extended master secret",
handshake_hashes=cvhh,
output_length=48)
else:
masterSecret = calc_key(self.version, premasterSecret,
cipherSuite, b"master secret",
client_random=clientRandom,
server_random=serverRandom,
output_length=48)
#Calculate pending connection states
self._calcPendingStates(cipherSuite, masterSecret,
clientRandom, serverRandom,
cipherImplementations)
#Exchange ChangeCipherSpec and Finished messages
for result in self._getFinished(masterSecret,
cipherSuite,
expect_next_protocol=nextProtos is not None):
yield result
for result in self._sendFinished(masterSecret, cipherSuite,
settings=settings):
yield result
yield masterSecret
#*********************************************************
# Shared Handshake Functions
#*********************************************************
def _sendFinished(self, masterSecret, cipherSuite=None, nextProto=None,
settings=None):
# send the CCS and Finished in single TCP packet
self.sock.buffer_writes = True
#Send ChangeCipherSpec
for result in self._sendMsg(ChangeCipherSpec()):
yield result
#Switch to pending write state
self._changeWriteState()
if self._peer_record_size_limit:
self._send_record_limit = self._peer_record_size_limit
# this is TLS 1.2 and earlier method, so the real limit may be
# lower that what's in the settings
self._recv_record_limit = min(2**14, settings.record_size_limit)
if nextProto is not None:
nextProtoMsg = NextProtocol().create(nextProto)
for result in self._sendMsg(nextProtoMsg):
yield result
#Figure out the correct label to use
if self._client:
label = b"client finished"
else:
label = b"server finished"
#Calculate verification data
verifyData = calc_key(self.version, masterSecret,
cipherSuite, label,
handshake_hashes=self._handshake_hash,
output_length=12)
if self.fault == Fault.badFinished:
verifyData[0] = (verifyData[0]+1)%256
#Send Finished message under new state
finished = Finished(self.version).create(verifyData)
for result in self._sendMsg(finished):
yield result
self.sock.flush()
self.sock.buffer_writes = False
def _getFinished(self, masterSecret, cipherSuite=None,
expect_next_protocol=False, nextProto=None):
#Get and check ChangeCipherSpec
for result in self._getMsg( (ContentType.change_cipher_spec, ContentType.handshake)):
if result in (0,1):
yield result
changeCipherSpec = result
if changeCipherSpec.type != 1:
for result in self._sendError(AlertDescription.illegal_parameter,
"ChangeCipherSpec type incorrect"):
yield result
#Switch to pending read state
self._changeReadState()
#Server Finish - Are we waiting for a next protocol echo?
if expect_next_protocol:
for result in self._getMsg(ContentType.handshake, HandshakeType.next_protocol):
if result in (0,1):
yield result
if result is None:
for result in self._sendError(AlertDescription.unexpected_message,
"Didn't get NextProtocol message"):
yield result
self.next_proto = result.next_proto
else:
self.next_proto = None
#Client Finish - Only set the next_protocol selected in the connection
if nextProto:
self.next_proto = nextProto
#Figure out which label to use.
if self._client:
label = b"server finished"
else:
label = b"client finished"
#Calculate verification data
verifyData = calc_key(self.version, masterSecret,
cipherSuite, label,
handshake_hashes=self._handshake_hash,
output_length=12)
#Get and check Finished message under new state
for result in self._getMsg(ContentType.handshake,
HandshakeType.finished):
if result in (0,1):
yield result
finished = result
if finished.verify_data != verifyData:
for result in self._sendError(AlertDescription.decrypt_error,
"Finished message is incorrect"):
yield result
def _handshakeWrapperAsync(self, handshaker, checker):
try:
for result in handshaker:
yield result
if checker:
try:
checker(self)
except TLSAuthenticationError:
alert = Alert().create(AlertDescription.close_notify,
AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
raise
except GeneratorExit:
raise
except TLSAlert as alert:
if not self.fault:
raise
if alert.description not in Fault.faultAlerts[self.fault]:
raise TLSFaultError(str(alert))
else:
pass
except:
self._shutdown(False)
raise
@staticmethod
def _pickServerKeyExchangeSig(settings, clientHello, certList=None,
private_key=None,
version=(3, 3), check_alt=True):
"""Pick a hash that matches most closely the supported ones"""
hashAndAlgsExt = clientHello.getExtension(
ExtensionType.signature_algorithms)
if version > (3, 3):
if not hashAndAlgsExt:
# the error checking was done before hand, likely we're
# doing PSK key exchange
return None, certList, private_key
if hashAndAlgsExt is None or hashAndAlgsExt.sigalgs is None:
# RFC 5246 states that if there are no hashes advertised,
# sha1 should be picked
return "sha1", certList, private_key
if check_alt:
alt_certs = ((X509CertChain(i.certificates), i.key) for vh in
settings.virtual_hosts for i in vh.keys)
else:
alt_certs = ()
for certs, key in chain([(certList, private_key)], alt_certs):
supported = TLSConnection._sigHashesToList(settings,
certList=certs,
version=version)
for schemeID in supported:
if schemeID in hashAndAlgsExt.sigalgs:
name = SignatureScheme.toRepr(schemeID)
if not name and schemeID[1] in (SignatureAlgorithm.rsa,
SignatureAlgorithm.ecdsa,
SignatureAlgorithm.dsa):
name = HashAlgorithm.toRepr(schemeID[0])
if name:
return name, certs, key
# if no match, we must abort per RFC 5246
raise TLSHandshakeFailure("No common signature algorithms")
@staticmethod
def _sigHashesToList(settings, privateKey=None, certList=None,
version=(3, 3)):
"""Convert list of valid signature hashes to array of tuples"""
certType = None
publicKey = None
if certList and certList.x509List:
certType = certList.x509List[0].certAlg
publicKey = certList.x509List[0].publicKey
sigAlgs = []
if not certType or certType == "Ed25519" or certType == "Ed448":
for sig_scheme in settings.more_sig_schemes:
if version < (3, 3):
# EdDSA is supported only in TLS 1.2 and 1.3
continue
if certType and sig_scheme != certType:
continue
sigAlgs.append(getattr(SignatureScheme, sig_scheme.lower()))
if not certType or certType == "ecdsa":
for hashName in settings.ecdsaSigHashes:
# only SHA256, SHA384 and SHA512 are allowed in TLS 1.3
if version > (3, 3) and hashName in ("sha1", "sha224"):
continue
# in TLS 1.3 ECDSA key curve is bound to hash
if publicKey and version > (3, 3):
curve = publicKey.curve_name
matching_hash = TLSConnection._curve_name_to_hash_name(
curve)
if hashName != matching_hash:
continue
sigAlgs.append((getattr(HashAlgorithm, hashName),
SignatureAlgorithm.ecdsa))
if not certType or certType == "dsa":
for hashName in settings.dsaSigHashes:
if version > (3, 3):
continue
sigAlgs.append((getattr(HashAlgorithm, hashName),
SignatureAlgorithm.dsa))
if not certType or certType in ("rsa", "rsa-pss"):
for schemeName in settings.rsaSchemes:
# pkcs#1 v1.5 signatures are not allowed in TLS 1.3
if version > (3, 3) and schemeName == "pkcs1":
continue
for hashName in settings.rsaSigHashes:
# rsa-pss certificates can't be used to make PKCS#1 v1.5
# signatures
if certType == "rsa-pss" and schemeName == "pkcs1":
continue
try:
# 1024 bit keys are too small to create valid
# rsa-pss-SHA512 signatures
if schemeName == 'pss' and hashName == 'sha512'\
and privateKey and privateKey.n < 2**2047:
continue
# advertise support for both rsaEncryption and RSA-PSS OID
# key type
if certType != 'rsa-pss':
sigAlgs.append(getattr(SignatureScheme,
"rsa_{0}_rsae_{1}"
.format(schemeName, hashName)))
if certType != 'rsa':
sigAlgs.append(getattr(SignatureScheme,
"rsa_{0}_pss_{1}"
.format(schemeName, hashName)))
except AttributeError:
if schemeName == 'pkcs1':
sigAlgs.append((getattr(HashAlgorithm, hashName),
SignatureAlgorithm.rsa))
continue
return sigAlgs
@staticmethod
def _curveNamesToList(settings):
"""Convert list of acceptable curves to array identifiers"""
return [getattr(GroupName, val) for val in settings.eccCurves]
@staticmethod
def _groupNamesToList(settings):
"""Convert list of acceptable ff groups to TLS identifiers."""
return [getattr(GroupName, val) for val in settings.dhGroups]
@staticmethod
def _curve_name_to_hash_name(curve_name):
"""Returns the matching hash for a given curve name, for TLS 1.3
expects the python-ecdsa curve names as parameter
"""
if curve_name == "NIST256p":
return "sha256"
if curve_name == "NIST384p":
return "sha384"
if curve_name == "NIST521p":
return "sha512"
raise TLSIllegalParameterException(
"Curve {0} is not supported in TLS 1.3".format(curve_name))
|
996,313 | 8137ddca1c8aaa5bb2653af4251cd06ac6f3c5b4 | from itertools import count
from statistics import mode
from tkinter import N
from django.http.response import HttpResponseBadRequest
from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.utils import timezone
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views import View
from django.core.paginator import Paginator
from django.db.models import Q, Count
from django.conf import settings
from django.core.cache import cache
import uuid
import operator
import json
from itertools import chain
from .forms import DistractionTaskForm, UserForm, UserDemographicForm, DomainKnowledgeForm
from .forms import SelectedArtworkForm, StudyTransitionForm, PostStudyForm, PostStudyGeneralForm
from .models import User, UserDemographic, Artwork, ArtworkVisited
from .models import UserCondition, ArtworkSelected, RecommendedArtwork
from collector.models import Interaction
from recommendations.models import Similarities, DataRepresentation
from .util import get_condition, get_order
def index(request, prolific_id = None):
if (request.method == 'POST') and (settings.CONTEXT == 'user'):
if 'information_sheet_form' in request.POST:
return handle_information_sheet_post(request, prolific_id)
elif 'demographic_form' in request.POST:
# check that the user has provided consent in the prior step before
# progressing with collecting their data
if User.objects.get(user_id=request.session['user_id']).consent:
return handle_demographic_post(request)
else:
# if they haven't, then reload the page with the consent form
return render(request, "museum_site/index.html", {
'provided_consent': False, 'consent_form': UserForm(),
'consent_required_before_demographic': True,
'study_context': settings.CONTEXT
})
elif 'domain_form' in request.POST:
if User.objects.get(user_id = request.session['user_id']).consent:
return handle_domain_knowledge_post(request)
else:
return render(request, "museum_site/index.html", {
'provided_consent': False, 'consent_form': UserForm(),
'consent_required_before_demographic': True,
'study_context': settings.CONTEXT
})
elif 'distraction_form' in request.POST:
return handle_distraction_task(request)
else:
if 'user_id' in request.session:
if 'distraction_task' in request.session:
return render(
request, 'museum_site/index.html', {
'provided_consent': True,
'provided_demographics': True,
'study_context': settings.CONTEXT,
'load_domain': False,
'load_distraction': True,
'distraction_form': DistractionTaskForm()
}
)
return handle_render_home_page(request)
# if the context is the focus group
if settings.CONTEXT == 'focus':
# and the user doesn't exist, then we need to create one.
if not 'user_id' in request.session:
User.objects.create(
user_id = 'focus-group-user',
consent = True,
email = 'focus@group.com',
contact_outcome = True,
user_created = timezone.now()
)
request.session['user_id'] = 'focus-group-user'
return handle_render_home_page(request)
consent_form = UserForm()
return render(request, "museum_site/index.html", {
'provided_consent': False, 'consent_form': consent_form,
'study_context': settings.CONTEXT
})
def handle_information_sheet_post(request, prolific_id = None):
consent_form = UserForm(request.POST)
if consent_form.is_valid():
new_user = consent_form.save(commit = False)
cleaned_data = consent_form.clean()
# generate the user_id
new_user.user_id = str(uuid.uuid4())
if not "user_id" in request.session:
request.session['user_id'] = new_user.user_id
# if they want to be contacted about the outcome
if cleaned_data['contact_outcome']:
new_user.email = cleaned_data['email']
new_user.contact_outcome = True
# if that ID is present
if prolific_id == 'prolific':
new_user.prolific = True
new_user.user_created = timezone.now()
new_user.save()
# assign to a condition and order
condition = get_condition()
order = get_order()
UserCondition.objects.create(
user = new_user,
condition = condition,
order = order,
current_context = 'initial',
current_step = 1,
timestamp = timezone.now()
)
# now load the demographic survey
demographic_form = UserDemographicForm()
return render(request, "museum_site/index.html", {
'provided_consent': True, 'demographic_form': demographic_form,
'load_demographic': True, 'study_context': settings.CONTEXT
})
else:
return render(request, 'museum_site/index.html', {
'provided_consent': False, 'consent_form': consent_form,
'study_context': settings.CONTEXT
})
def handle_demographic_post(request):
demographic_form = UserDemographicForm(request.POST)
if demographic_form.is_valid():
new_demo = demographic_form.save(commit = False)
cleaned_data = demographic_form.clean()
# overall kill, but just to make it clearer...
new_demo.user = User.objects.get(user_id = request.session['user_id'])
new_demo.age = cleaned_data['age']
new_demo.gender = cleaned_data['gender']
new_demo.education = cleaned_data['education']
new_demo.work = cleaned_data['work']
new_demo.submission_timestamp = timezone.now()
new_demo.save()
domain_form = DomainKnowledgeForm()
return render(request, "museum_site/index.html", {
'provided_consent': True, 'provided_demographics': True,
'domain_form': domain_form, 'load_domain': True,
'study_context': settings.CONTEXT
})
def handle_domain_knowledge_post(request):
domain_form = DomainKnowledgeForm(request.POST)
if domain_form.is_valid():
new_domain = domain_form.save(commit = False)
# assign the user and submission timestamp
new_domain.user = User.objects.get(user_id = request.session['user_id'])
new_domain.submission_timestamp = timezone.now()
new_domain.save()
return handle_render_home_page(request)
def artwork(request, artwork_id):
art = Artwork.objects.get(art_id=artwork_id)
# if the user has previously visited the artwork, then get the rating
av = ArtworkVisited.objects.filter(user=request.session['user_id'], art=artwork_id)
if av:
artwork_rating = av.latest('timestamp').rating
else:
artwork_rating = None
# Convert JSON types to lists
if art.artist:
if any(e.find("unknown") != -1 for e in json.loads(art.artist)):
art.artist = [(0, "Unknown artist")]
else:
art.artist = [(i, e) for i, e in enumerate(json.loads(art.artist))]
if art.birth_date:
art.birth_date = [(i, e) for i, e in enumerate(json.loads(art.birth_date))]
if art.death_date:
art.death_date = [(i, e) for i, e in enumerate(json.loads(art.death_date))]
if art.medium:
art.medium = json.loads(art.medium)
if art.linked_topics:
art.linked_topics = json.loads(art.linked_topics)
if art.linked_terms:
art.linked_terms = json.loads(art.linked_terms)
artists = None
if len(art.artist) > 1 and \
(art.birth_date is not None and len(art.artist) == len(art.birth_date)) and \
(art.death_date is not None and len(art.artist) == len(art.death_date)):
artists = ["{0} ({1} - {2})".format(n[1], db[1], dd[1])
for n, db, dd in zip(*[art.artist, art.birth_date, art.death_date])]
user = User.objects.get(user_id = request.session['user_id'])
user_condition = UserCondition.objects.get(user = user)
# record that the user has seen this artwork
ArtworkVisited.objects.create(
user=user,
art=art,
timestamp=timezone.now()
)
if request.session.get('reset_current_step_count'):
selection_count = 0
request.session['reset_current_step_count'] = False
else:
selected_artwork = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context
)
selection_count = selected_artwork.count()
if request.session.get('current_step'):
selected_artwork = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context,
# step_selected = cache.get('current_step')
step_selected = request.session.get('current_step')
)
else:
selected_artwork = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context
)
# get the artworks that the user has already selected (to grey out the button)
already_selected = {art.selected_artwork.art_id for art in selected_artwork}
# if it's true
# we want to set a variable to reflect that
# but if the count is now lower than the upper bound
# then set the variable AND session variable to False
# here we check if too_many_selected is True
too_many = False
if request.session.get('too_many_selected') == True:
too_many = True
# we want to check if the selection count is gone below the upper bound
# if it has, then set this back to false
if selected_artwork.count() < settings.SELECTION_UPPER_BOUND:
request.session['too_many_selected'] == False
too_many = False
context = {
'provided_consent': True, 'page_id': 'art_' + artwork_id,
'artwork': art,
'artists': artists,
'artwork_rating': str(artwork_rating),
'study_context': settings.CONTEXT,
'selection_count': selected_artwork.count(),
'already_selected': already_selected,
# 'too_many_selected': request.session.get('too_many_selected', False),
'too_many_selected': too_many,
'is_artwork_page': True
}
# fetch the top 5 most similar artworks to this one, if the context is the focus group
if settings.CONTEXT == 'focus':
result_set = Similarities.objects.filter(
art = art,
representation = DataRepresentation.objects.get(source = settings.DATA_REP_TYPE)
)[:5]
context['similar_artworks'] = result_set
return render(request, "museum_site/artwork.html", context)
def step_filtering(artworks, user, selection_context):
# get all the artworks that the user has selected for this condition
if selection_context == 'random':
selected_artworks = {
selected_art.selected_artwork.art_id
for selected_art in ArtworkSelected.objects.filter(
user = user, selection_context = selection_context
)
}
else:
selected_artworks = {
selected_art.selected_artwork
for selected_art in ArtworkSelected.objects.filter(
user = user, selection_context = selection_context
)
}
# record how many artworks there are pre-filtering
pre_filter_length = len(artworks)
# filter those out that have been previously selected
artworks_filtered = [
art for art in artworks
if art.art_id not in selected_artworks
]
return selected_artworks, pre_filter_length, artworks_filtered
def handle_render_home_page(request):
user = User.objects.get(user_id = request.session['user_id'])
user_condition = UserCondition.objects.get(user = user)
if user_condition.current_context == 'initial':
# get the artworks if they're stored in the cache
# artworks = cache.get('artworks')
artworks = request.session.get('artworks')
print('initial context; artworks', artworks)
# if there aren't any stored artworks, i.e., the first the user joins
if artworks is None:
print('the artworks are none')
# get the artworks that are in the initial set and randomise
artworks = Artwork.objects.filter(art_id__in = settings.INITIAL_ARTWORKS).order_by('?')
# store them in the cache, without a timeout.
# cache.set('artworks', artworks, timeout = None)
request.session['artworks'] = [a.art_id for a in artworks]
# request.session['artworks'] = artworks
print('request initial artworks', request.session.get('artworks'))
# also store in the session what the current context is (to check later)
# cache.set('current_context', user_condition.current_context, timeout = None)
request.session['current_context'] = user_condition.current_context
# if show_instructions is not in the session context, then they've not seen it yet.
if 'show_instructions' not in request.session:
# set to true to trigger the instructions being shown
request.session['show_instructions'] = True
else: # otherwise, it is and they have.
# set to false to prevent them bein shown again
request.session['show_instructions'] = False
elif user_condition.current_context == 'random':
# we get the artwork query set in a random way - but should remain the same
# when the page is refreshed
# get the artworks and current context
# artworks = cache.get('artworks')
# cached_current_context = cache.get('current_context')
artworks = request.session.get('artworks')
cached_current_context = request.session.get('current_context')
# if the cached current context is not random, then the user is entering a new condition
if cached_current_context != 'random':
# get a random set of 30 artworks
artworks = Artwork.objects.order_by('?')[:30]
# save the artworks, with the user, and the condition in the database
for art_work in artworks:
RecommendedArtwork.objects.create(
user = user,
recommended_artwork = art_work,
recommendation_context = 'random'
)
# update the artwork and current context in the cache
# cache.set('artworks', artworks, timeout = None)
# cache.set('current_context', 'random', timeout = None)
request.session['artworks'] = [a.art_id for a in artworks]
# request.session['artworks'] = artworks
request.session['current_context'] = 'random'
# add the current step into the cache to keep track
# cache.set('current_step', 1, timeout = None)
request.session['current_step'] = 1
print('CURRENT STEP', user_condition.current_step)
# if these aren't equal, then it means that hte user has moved along into another step
# if cache.get('current_step') != user_condition.current_step:
if request.session.get('current_step') != user_condition.current_step:
# get the selected artworks from the 'initial' step
selected_artworks_initial = [
s_a.selected_artwork.art_id
for s_a in ArtworkSelected.objects.filter(
user = user, selection_context = 'initial'
)
]
# get the artworks that the user has selected in this condition
selected_artworks_condition = [
s_a.selected_artwork.art_id
for s_a in ArtworkSelected.objects.filter(
user = user, selection_context = 'random'
)
]
# fetching the new set of artworks
# 1) exclude those that the user selected in the initial step
# 2) exclude those that the has selected as part of this condition
# 3) randomly order and take the top 30
artworks = Artwork.objects.exclude(
art_id__in = selected_artworks_initial
).exclude(
art_id__in = selected_artworks_condition
).order_by('?')[:30]
# update the cache
# cache.set('artworks', artworks, timeout = None)
# cache.set('current_step', cache.get('current_step') + 1, timeout = None)
request.session['artworks'] = [a.art_id for a in artworks]
# request.session['artworks'] = artworks
request.session['current_step'] = request.session.get('current_step') + 1
else:
assert user_condition.current_context == 'model'
# get the artworks and current context
# artworks = cache.get('artworks')
# cached_current_context = cache.get('current_context')
artworks = request.session.get('artworks')
cached_current_context = request.session.get('current_context')
# if the cached current context is not model, then the user is entering this condition
# for the first time.
if cached_current_context != 'model':
# get the selected artworks by the user
selected_artworks = [
selected_art.selected_artwork
for selected_art in ArtworkSelected.objects.filter(
user = user, selection_context = 'initial'
)
]
# get the model condition that the user should see (either meta, image, or concatenated)
model_condition = DataRepresentation.objects.get(source = user_condition.condition)
# get the similar artworks based on those selected and the representation, order
# by the score (descending), and take the top 30
artworks = Similarities.objects.filter(
representation = model_condition, art__in = selected_artworks
).order_by('-score')[:30]
# get the artworks themselves
artworks = [s_a.similar_art for s_a in artworks]
# save the artworks, with the user and the condition, in the database
for art_work in artworks:
RecommendedArtwork.objects.create(
user = user,
recommended_artwork = art_work,
recommendation_context = 'model',
recommended_step = 1
)
# update the artwork and current context in the cache
# cache.set('artworks', artworks, timeout = None)
# cache.set('current_context', 'model', timeout = None)
request.session['artworks'] = [a.art_id for a in artworks]
# request.session['artworks'] = artworks
request.session['current_context'] = 'model'
# add the current step into the cache to keep track
# cache.set('current_step', 1, timeout = None)
request.session['current_step'] = 1
# if cache.get('current_step') != user_condition.current_step:
if request.session.get('current_step') != user_condition.current_step:
# the recommendations should be based on those selected in the previous step
# get the selected artworks from the previous step
selected_artworks_previous_step = [
s_a.selected_artwork
for s_a in ArtworkSelected.objects.filter(
user = user, selection_context = 'model',
step_selected = request.session.get('current_step')
)
]
# get the other artworks that the user has selected in this condition
# we don't want to include these in the images again
selected_artworks_all_other_steps = [
s_a.selected_artwork
for s_a in ArtworkSelected.objects.filter(
user = user, selection_context = 'model'
).exclude(
step_selected = request.session.get('current_step')
)
]
# get the model condition that the user should be seeing (meta, image, or concat)
model_condition = DataRepresentation.objects.get(source = user_condition.condition)
# the recommendation part
# 1) get the similar artworks based on those selected in the previous step
# 2) exluded those that the user has previously selected (in all other steps)
# 3) order by the score (descending)
artworks = Similarities.objects.filter(
representation = model_condition, art__in = selected_artworks_previous_step
).exclude(
similar_art__in = selected_artworks_all_other_steps
).exclude(
similar_art__in = selected_artworks_previous_step
).order_by('-score')
# artworks = [s_a.similar_art for s_a in artworks]
seen_set = set()
no_duplicates = []
for art in artworks:
if art.similar_art.art_id in seen_set:
continue
else:
seen_set.add(art.similar_art.art_id)
no_duplicates.append(art.similar_art)
artworks = no_duplicates[:30]
# save the artworks, with the user, condition, and step in the database
for art in artworks:
RecommendedArtwork.objects.create(
user = user,
recommended_artwork = art,
recommendation_context = 'model',
# recommended_step = cache.get('current_step') + 1
recommended_step = request.session.get('current_step') + 1
)
# update the cache
# cache.set('artworks', artworks, timeout = None)
# cache.set('current_step', cache.get('current_step') + 1, timeout = None)
request.session['artworks'] = [a.art_id for a in artworks]
# request.session['artworks'] = artworks
request.session['current_step'] = request.session.get('current_step') + 1
# using the list of artwork ids, fetch the actual objects
artworks = [
Artwork.objects.get(art_id = art_id)
for art_id in artworks
]
# convert the artist list
for art in artworks:
if art.artist:
try:
artist_list = json.loads(art.artist)
except json.decoder.JSONDecodeError:
artist_list = art.artist.split(',')[0]
if len(artist_list) > 1:
art.artist = ', '.join(artist_list)
else:
art.artist = artist_list[0]
else:
art.artist = 'unknown artist'
if request.session.get('current_step'):
print('current step', request.session.get('current_step'))
selected_artwork = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context,
# step_selected = cache.get('current_step')
step_selected = request.session.get('current_step')
)
print('number of selected artworks', selected_artwork.count())
else:
print('not in a step')
selected_artwork = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context
)
# get the artworks that the user has already selected (to grey out)
already_selected = {art.selected_artwork.art_id for art in selected_artwork}
if settings.CONTEXT == 'focus':
paginator = Paginator(artworks, 30)
page_number = request.GET.get('page')
artworks = paginator.get_page(page_number)
# if show_reminder is in the session, then set a flag to say they're about to see it
# if that flag is there, then set the show reminder as false
if 'show_reminder' in request.session and 'reminder_seen' not in request.session:
# set a flag for when the page reloads on each artwork selection
request.session['reminder_seen'] = True
elif 'show_reminder' in request.session and 'reminder_seen' in request.session:
# set it to false as they will have seen it if the condition above is met.
request.session['show_reminder'] = False
context = {
'provided_consent': True, 'page_id': 'index',
'artworks': artworks,
'study_context': settings.CONTEXT,
'selection_context': user_condition.current_context,
'selection_count': selected_artwork.count(),
'already_selected': already_selected,
'show_instructions': request.session.get('show_instructions', False),
'show_reminder': request.session.get('show_reminder', False)
}
return render(request, 'museum_site/index.html', context)
def selected_artwork(request):
if request.method == 'POST':
form = SelectedArtworkForm(request.POST)
if form.is_valid():
user = User.objects.get(user_id = request.session['user_id'])
artwork = Artwork.objects.get(art_id = request.POST['artwork_id'])
user_condition = UserCondition.objects.get(user = user)
timestamp = timezone.now()
if form.cleaned_data['selection_button'] == 'Select':
# current_step = -1 if not cache.get('current_step') else cache.get('current_step')
current_step = -1 if not request.session.get('current_step') else request.session.get('current_step')
# get the number of artworks that the user has already selected
number_selected = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context,
# step_selected = cache.get('current_step')
step_selected = request.session.get('current_step')
).count()
# if the number of selected is equal to the upper bound.
if number_selected >= settings.SELECTION_UPPER_BOUND:
request.session['too_many_selected'] = True
return redirect('museum_site:artwork', artwork_id = artwork.art_id)
# current_step = cache.get('current_step')
current_step = request.session.get('current_step')
# save that the user has selected the artwork
ArtworkSelected.objects.create(
user = user,
selected_artwork = artwork,
selection_context = user_condition.current_context,
# step_selected = -1 if not cache.get('current_step') else cache.get('current_step'),
step_selected = -1 if not request.session.get('current_step') else request.session.get('current_step'),
timestamp = timestamp
)
# save it as an interaction event
Interaction.objects.create(
user = user,
timestamp = timestamp,
content_id = artwork.art_id,
event = 'artwork-selected',
page = 'art_' + artwork.art_id
)
return redirect('museum_site:artwork', artwork_id = artwork.art_id)
else:
assert form.cleaned_data['selection_button'] == 'Deselect'
# delete the record from the database
ArtworkSelected.objects.filter(
user = user, selected_artwork = artwork,
step_selected = -1 if not request.session.get('current_step') else request.session.get('current_step')
).delete()
# save it as an interaction event
Interaction.objects.create(
user = user,
timestamp = timestamp,
content_id = artwork.art_id,
event = 'artwork-deselected',
page = 'art_' + artwork.art_id
)
return redirect('museum_site:artwork', artwork_id = artwork.art_id)
def transition_study_stage(request):
# print('transition clicked')
if request.method == 'POST' and 'load_post_study' not in request.session:
form = StudyTransitionForm(request.POST)
if form.is_valid():
user = User.objects.get(user_id = request.session['user_id'])
user_condition = UserCondition.objects.get(user = user)
selection_count = ArtworkSelected.objects.filter(
user = user, selection_context = user_condition.current_context,
# step_selected = -1 if not cache.get('current_step') else cache.get('current_step')
step_selected = -1 if not request.session.get('current_step') else request.session.get('current_step')
).count()
# print('current user condition:', user_condition.current_context)
# if the number of artworks selected is between the lower and upper bound
within_bounds = settings.SELECTION_LOWER_BOUND <= selection_count <= settings.SELECTION_UPPER_BOUND
# if the number of artworks selected is between the lower and upper bound
if (within_bounds and user_condition.current_context == 'initial'):
# and if the user is current in the initial context (just starting the study)
# if user_condition.current_context == 'initial':
# then we need to set their current context based on the first condition
# the user should see (either random or model)
if user_condition.order == 'random':
user_condition.current_context = 'random'
else:
user_condition.current_context = 'model'
user_condition.save() # update the user condition record in the DB
# redirect to the index; the updated user condition will change the
# artworks that the user sees.
return redirect('museum_site:index')
# otherwise, they're transitioning between part one and two or to part-two -> end
elif (within_bounds and user_condition.current_step == settings.NUMBER_OF_STEPS):
if user_condition.current_context == 'random' and user_condition.order == 'random':
# we need to update their current context to 'model', reset their
# current step to 1, and save it
user_condition.current_context = 'model'
user_condition.current_step = 1
user_condition.save()
print('updated context to', user_condition.current_context)
request.session['distraction_task'] = True
# redirect to the index
return redirect('museum_site:index')
# if their context is model and first condition is model
elif user_condition.current_context == 'model' and user_condition.order == 'model':
# we need to update their current context to 'random', reset their
# current step to 1, and save it
user_condition.current_context = 'random'
user_condition.current_step = 1
user_condition.save()
request.session['distraction_task'] = True
# redirect to the index
return redirect('museum_site:index')
# otherwise, they're at the end of the study and the post-study questionnaires
# should be rendered
else: # this is the final part.
request.method = 'GET'
return redirect('museum_site:post-study', which_form = 'part_one')
elif within_bounds:
# state remains the same
# update the number of steps (+1)
user_condition.current_step = user_condition.current_step + 1
user_condition.save()
return redirect('museum_site:index')
def handle_distraction_task(request):
distraction_form = DistractionTaskForm(request.POST)
if distraction_form.is_valid():
new_submission = distraction_form.save(commit = False)
# assign the user and the submission timestamp
new_submission.user = User.objects.get(user_id = request.session['user_id'])
new_submission.submission_timestamp = timezone.now()
new_submission.save()
del request.session['distraction_task']
# add in the flag to bring up the 'reminder' of the instructions.
request.session['show_reminder'] = True
return handle_render_home_page(request)
def post_study(request, which_form):
if request.method == 'POST':
study_id = request.POST['study_id']
if study_id == 'general':
print('posting general')
post_study_form = PostStudyGeneralForm(request.POST)
else:
print('posting', study_id)
post_study_form = PostStudyForm(request.POST)
if post_study_form.is_valid():
new_submission = post_study_form.save(commit = False)
# assign the user, submission timestamp, and the part
new_submission.user = User.objects.get(user_id = request.session['user_id'])
new_submission.submission_timestamp = timezone.now()
if study_id != 'general':
new_submission.part = study_id
new_submission.save()
print('saved!')
if study_id != 'general':
if study_id == 'part_two':
# they've done both part one and two, now load the general form
return redirect('museum_site:post-study', which_form = 'general')
else:
# otherwise we want to just load part two.
return redirect('museum_site:post-study', which_form = 'part_two')
else:
# we render the thank you page
return redirect('museum_site:thank-you')
else:
# if it's a request for general form, then return that
if which_form == 'general':
print('which form == general')
return render(request, 'museum_site/post_study.html', {
'post_study_form': PostStudyGeneralForm(),
'part': 'general',
'provided_consent': True
})
elif which_form == 'part_two':
print('which form == part_two')
return render(request, 'museum_site/post_study.html', {
'post_study_form': PostStudyForm(),
'part': 'part_two',
'provided_consent': True
})
else:
print('which form == part_one')
return render(request, 'museum_site/post_study.html', {
'post_study_form': PostStudyForm(),
'part': 'part_one',
'provided_consent': True
})
def thank_you(request):
user = User.objects.get(user_id = request.session['user_id'])
return render(request, 'museum_site/thankyou.html', context = {
'prolific': True if user.prolific else False
})
@ensure_csrf_cookie
def save_rating(request):
if request.method == 'POST':
rating = request.POST['rating_number']
user = request.session['user_id']
artwork_id = request.POST['artwork_id']
# get the artwork and user pair in question (or by the latest)
art_visited = ArtworkVisited.objects.filter(
user=user,
art=artwork_id
).latest('timestamp')
art_visited.rating = rating
art_visited.save()
return HttpResponse('ok')
else:
return HttpResponseBadRequest('rating not posted to backend')
# if there is a search request
# query = None
# if request.GET.get('search'):
# query = request.GET.get('search').strip()
# art = Artwork.objects.filter(
# Q(title__icontains=query)
# | Q(artist__icontains=query)
# | Q(medium__icontains=query)
# | reduce(operator.or_, (Q(title__icontains = x) for x in query.split(' ')))
# | reduce(operator.or_, (Q(artist__icontains = x) for x in query.split(' ')))
# | reduce(operator.or_, (Q(medium__icontains=x) for x in query.split(' ')))
# )
# # art = Artwork.objects.filter(
# # complex_query(query, "title") |
# # complex_query(query, "artist") |
# # complex_query(query, "medium")
# # )
# else:
# art = Artwork.objects.all()
# Convert ot list
# for e in art:
# if e.artist:
# if e.artist.find("unknown") != -1:
# # e.artist = ["Unknown artist"]
# e.artist = "Unknown artist"
# else:
# e.artist = ", ".join(json.loads(e.artist)) |
996,314 | bef43a16de318aa6a52d4134c9e4872a91198045 | import os
import sys
from sqlalchemy import engine_from_config
from pyramid.paster import get_appsettings, setup_logging
from pyramid.scripts.common import parse_vars
from bash.models import dbs, Base, StaffModel
import transaction
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
dbs.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = StaffModel(
id=1, role='admin', name='admin',
password='993b33d7c0fc53d51125255feae9b9'
'4ce5213c2269d254b895c423c87035610e',
salt='666')
dbs.add(model)
# session.commit()
|
996,315 | 041e08ac8f5664520f1957ee8bf889017e71be25 | import string
from datetime import timedelta
from core.BaseScraper import BaseScraper
from core.QueueItem import QueueItem
from core.data.SymbolRepository import SymbolRepository
from request.MWSearchRequest import MWSearchRequest
alphabet = string.ascii_lowercase
class RandomMarketWatchSymbols(BaseScraper):
def __init__(self):
super(RandomMarketWatchSymbols, self).__init__()
self.symbol_repository = SymbolRepository()
def get_symbols(self):
for i1 in alphabet:
yield i1
for i1 in alphabet:
for i2 in alphabet:
yield i1 + i2
for i1 in alphabet:
for i2 in alphabet:
for i3 in alphabet:
yield i1 + i2 + i3
for i1 in alphabet:
for i2 in alphabet:
for i3 in alphabet:
for i4 in alphabet:
yield i1 + i2 + i3 + i4
def get_queue_item(self, symbol):
request = MWSearchRequest(symbol)
return QueueItem(url=request.get_url(), http_method=request.get_http_method(), metadata={'symbol': symbol})
def get_time_delta(self):
return timedelta(days=3)
def process_data(self, queue_item):
data = MWSearchRequest.parse_response(queue_item.get_response().get_data())
if data:
new_symbols = []
for d in data:
existing = list(self.symbol_repository.find(symbol=d['symbol'], exchange=d['exchange'], instrument_type=d['instrument_type']))
if len(existing) == 0:
new_symbols.append(d)
else:
e = existing[0]
if 'charting_symbol' not in e.keys() and 'charting_symbol' in d.keys():
e['charting_symbol'] = d['charting_symbol']
new_symbols.append(e)
if new_symbols:
self.symbol_repository.insert(new_symbols)
#
# ERROR
# 2020-01-06 16:01:47,923 | INFO | 3 | 06/01/2020 16:01:46 - Error occurred while processing data for scraper <acquisition.scrapers.RandomMarketWatchSymbols.RandomMarketWatchSymbols object at 0x7f98d8616e10>: 'str' object has no attribute 'keys'
# 2020-01-06 16:01:47,944 | INFO | 3 | 06/01/2020 16:01:46 - RandomMarketWatchSymbols - processing took 0.06552s: {'symbol': 'dekk'} |
996,316 | e64a65ba3c080ac2806d52e2484181cceacf4903 | N = int(input())
if N == 1:
print(9)
else:
arr = [1] * 10
arr[0] = 0
for i in range(N-1):
n_arr = [0] * 10
n_arr[0] = arr[1]
n_arr[9] = arr[8]
for j in range(1,9):
n_arr[j] = (arr[j-1] + arr[j+1]) % 1000000000
arr = n_arr[:]
print(sum(arr) % 1000000000)
|
996,317 | 19650c7a4b2468a75634f362582822c214554c4f | import nltk
from sklearn.naive_bayes import MultinomialNB
from Classifiers import classifier_abc
# from .classifier_abc import Classifier
from Classifiers import classifier_abc
class MultinomailNBAlgorithm(classifier_abc.Classifier):
def __init__(self, training_required = False,
raw_data_path =None,
train_set_no=6000, test_set_no=1000,):
# Specific Dump file name for algorithm's Trained data
self.classifierDumpFile = " MultinomialNB.pickle"
# Check if algorithm Trained data is not present,
# or user requests training.
# if training_required or self.need_training() :
# self.train(raw_data_path,
# train_set_no, test_set_no, )
#end __init__
def classify(self, training_set, classifierDumpFile):
"""
:param training_set: Features to train the classifier
:param classifierDumpFile: File name to dump -save- the trained data.
:return: classifier
"""
# Train the module of the specified algorithm using the created training set.
self.MNB_classifier = nltk.SklearnClassifier (MultinomialNB ())
self.MNB_classifier.train (training_set)
# Save the trained data in pickle for further use.
self.dump_files(self.MNB_classifier, classifierDumpFile)
return self.MNB_classifier
# end classify
def __str__(self):
return "MultinomailNBAlgorithm"
#end str |
996,318 | 78175b6f1f3421b4a54dac5e49160e8a2287027a | import re
from nbconvert import MarkdownExporter
import os
from pathlib import Path
from headers import headers
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
test = [ atoi(c) for c in re.split('(\d+)',text) ]
return test
dir = Path("../../../../tutorials")
notebooks = [x for x in os.listdir(dir) if x[-6:] == ".ipynb"]
# sort notebooks based on numbers within name of notebook
notebooks = sorted(notebooks, key=lambda x: natural_keys(x))
e = MarkdownExporter(exclude_output=True)
for i, nb in enumerate(notebooks):
body, resources = e.from_filename(dir / nb)
with open(str(i + 1) + ".md", "w") as f:
f.write(headers[i + 1] + "\n\n")
f.write(body)
|
996,319 | 8c74a4aef697136eec9a2cd9a34bcaa6006dd2a0 | Calendar in Python
Python has a built-in function, calendar to work with date related tasks. You
will learn to display the calendar of a given date in this example.
Examples:
Input 1 :
yy = 2017
mm = 11
Output : November 2017
Mo Tu We Th Fr Sa Su
1 2 3 4 5
6 7 8 9 10 11 12
13 14 15 16 17 18 19
20 21 22 23 24 25 26
27 28 29 30
Input 2 :
yy = 2017
Output:
2017
January February March
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 1 2 3 4 5 1 2 3 4 5
2 3 4 5 6 7 8 6 7 8 9 10 11 12 6 7 8 9 10 11 12
9 10 11 12 13 14 15 13 14 15 16 17 18 19 13 14 15 16 17 18 19
16 17 18 19 20 21 22 20 21 22 23 24 25 26 20 21 22 23 24 25 26
23 24 25 26 27 28 29 27 28 27 28 29 30 31
30 31
April May June
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 1 2 3 4 5 6 7 1 2 3 4
3 4 5 6 7 8 9 8 9 10 11 12 13 14 5 6 7 8 9 10 11
10 11 12 13 14 15 16 15 16 17 18 19 20 21 12 13 14 15 16 17 18
17 18 19 20 21 22 23 22 23 24 25 26 27 28 19 20 21 22 23 24 25
24 25 26 27 28 29 30 29 30 31 26 27 28 29 30
July August September
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 1 2 3 4 5 6 1 2 3
3 4 5 6 7 8 9 7 8 9 10 11 12 13 4 5 6 7 8 9 10
10 11 12 13 14 15 16 14 15 16 17 18 19 20 11 12 13 14 15 16 17
17 18 19 20 21 22 23 21 22 23 24 25 26 27 18 19 20 21 22 23 24
24 25 26 27 28 29 30 28 29 30 31 25 26 27 28 29 30
31
October November December
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 1 2 3 4 5 1 2 3
2 3 4 5 6 7 8 6 7 8 9 10 11 12 4 5 6 7 8 9 10
9 10 11 12 13 14 15 13 14 15 16 17 18 19 11 12 13 14 15 16 17
16 17 18 19 20 21 22 20 21 22 23 24 25 26 18 19 20 21 22 23 24
23 24 25 26 27 28 29 27 28 29 30 25 26 27 28 29 30 31
30 31
## Recommended: Please try your approach on **__{IDE}__** first, before moving
on to the solution.
Implementation 1 : **Displaying Month**
In the program below, we import the calendar module. The built-in function
month() inside the module takes in the year and the month and displays the
calendar for that month of the year.
__
__
__
__
__
__
__
# Python program to display calendar of
# given month of the year
# import module
import calendar
yy = 2017
mm = 11
# display the calendar
print(calendar.month(yy, mm))
---
__
__
Output:
November 2017
Mo Tu We Th Fr Sa Su
1 2 3 4 5
6 7 8 9 10 11 12
13 14 15 16 17 18 19
20 21 22 23 24 25 26
27 28 29 30
**OR** you can directly run
python -m calendar [YEAR] [MONTH]
from command line (CMD in windows or TERMINAL in Linux ) for displaying a
month of a year
for example :
C:\Users\chatu\Desktop>python -m calendar 2019 7
July 2019
Mo Tu We Th Fr Sa Su
1 2 3 4 5 6 7
8 9 10 11 12 13 14
15 16 17 18 19 20 21
22 23 24 25 26 27 28
29 30 31
Implementation 2 : **Displaying Year**
In the program below, we import the calendar module. The built-in function
calender() inside the module takes in the year and displays the calendar for
that year.
__
__
__
__
__
__
__
# Python program to display calendar of
# given year
# import module
import calendar
yy = 2017
# display the calendar
print(calendar.calendar(yy))
---
__
__
Output:
2017
January February March
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 1 2 3 4 5 1 2 3 4 5
2 3 4 5 6 7 8 6 7 8 9 10 11 12 6 7 8 9 10 11 12
9 10 11 12 13 14 15 13 14 15 16 17 18 19 13 14 15 16 17 18 19
16 17 18 19 20 21 22 20 21 22 23 24 25 26 20 21 22 23 24 25 26
23 24 25 26 27 28 29 27 28 27 28 29 30 31
30 31
April May June
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 1 2 3 4 5 6 7 1 2 3 4
3 4 5 6 7 8 9 8 9 10 11 12 13 14 5 6 7 8 9 10 11
10 11 12 13 14 15 16 15 16 17 18 19 20 21 12 13 14 15 16 17 18
17 18 19 20 21 22 23 22 23 24 25 26 27 28 19 20 21 22 23 24 25
24 25 26 27 28 29 30 29 30 31 26 27 28 29 30
July August September
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 1 2 3 4 5 6 1 2 3
3 4 5 6 7 8 9 7 8 9 10 11 12 13 4 5 6 7 8 9 10
10 11 12 13 14 15 16 14 15 16 17 18 19 20 11 12 13 14 15 16 17
17 18 19 20 21 22 23 21 22 23 24 25 26 27 18 19 20 21 22 23 24
24 25 26 27 28 29 30 28 29 30 31 25 26 27 28 29 30
31
October November December
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 1 2 3 4 5 1 2 3
2 3 4 5 6 7 8 6 7 8 9 10 11 12 4 5 6 7 8 9 10
9 10 11 12 13 14 15 13 14 15 16 17 18 19 11 12 13 14 15 16 17
16 17 18 19 20 21 22 20 21 22 23 24 25 26 18 19 20 21 22 23 24
23 24 25 26 27 28 29 27 28 29 30 25 26 27 28 29 30 31
30 31
**OR** you can directly run
python -m calendar [YEAR]
from command line (CMD in windows or TERMINAL in Linux ) for displaying a year
This article is contributed by **ajay0007**. If you like GeeksforGeeks and
would like to contribute, you can also write an article using
contribute.geeksforgeeks.org or mail your article to
contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks
main page and help other Geeks.
Please write comments if you find anything incorrect, or you want to share
more information about the topic discussed above.
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
996,320 | 729a3135f7e562cf21a95eb5e746aede54864f73 | # TODO rough estimation. Make better code later.
from scipy.spatial import distance
import pickle
import numpy as np
import os
import generate_data
import settings
import matplotlib.pyplot as plt
from scipy import stats
import kernelized_tsne
import logging
import datetime
def generate_cluster_results_filename(parameters=settings.parameters):
cluster_results_file_prefix = '../results/cluster_attr_kernelized_'
return cluster_results_file_prefix + generate_data.combine_prefixes(
settings.tsne_parameter_set | settings.x_neighbors_selection_parameter_set, parameters)
def main(parameters=settings.parameters,regenerate_parameters_cache=False):
step = 0.01
choice_K = np.arange(step, 2 + step, step) # Let's try those K.
logging.info("Started loading.")
Y_mnist = generate_data.load_y_mnist(parameters=parameters)
X_mnist = generate_data.load_x_mnist(parameters=parameters)
picked_neighbors = generate_data.load_picked_neighbors(parameters=parameters)
picked_neighbor_labels = generate_data.load_picked_neighbors_labels(parameters=parameters)
accuracy_nn = parameters.get("accuracy_nn", settings.parameters["accuracy_nn"])
precision_nn = parameters.get("precision_nn", settings.parameters["precision_nn"])
labels_mnist = generate_data.load_labels_mnist(parameters=parameters)
baseline_accuracy = generate_data.get_baseline_accuracy(parameters=parameters)
logging.info("Loaded everything.")
D_Y = distance.squareform(distance.pdist(Y_mnist))
# Now find distance to closest neighbor
np.fill_diagonal(D_Y, np.inf) # ... but not to itself
nearest_neighbors_y_dist = np.min(D_Y, axis=1) # Actually, whatever axis
def get_nearest_neighbors_in_y(y, Y_mnist, n=10):
y_distances = np.sum((Y_mnist - y) ** 2, axis=1)
return np.argsort(y_distances)[:n]
# Implementing carefully. Not the fastest, but the most reliable way.
kernel_tsne_mapping = kernelized_tsne.generate_kernelized_tsne_mapping_function(
parameters=parameters,
regenerate_parameters_cache=regenerate_parameters_cache
)
kernelized_detailed_tsne_method_list = ["Kernelized tSNE; K=%.2f" % (k) for k in choice_K]
kernelized_detailed_tsne_method_results = list()
kernelized_detailed_tsne_accuracy = np.zeros((len(kernelized_detailed_tsne_method_list),))
kernelized_detailed_tsne_precision = np.zeros((len(kernelized_detailed_tsne_method_list),))
kernelized_detailed_tsne_time = np.zeros((len(kernelized_detailed_tsne_method_list),))
for j in range(len(choice_K)):
k = choice_K[j]
logging.info("%f", k)
embedder_start_time = datetime.datetime.now()
kernelized_detailed_tsne_method_results.append(kernel_tsne_mapping(picked_neighbors, k=k))
embedder_end_time = datetime.datetime.now()
kernelized_detailed_tsne_time[j] = (embedder_end_time - embedder_start_time).total_seconds()
logging.info("%f complete", k)
#kernelized_detailed_tsne_method_results = [kernel_tsne_mapping(picked_neighbors, k=k) for k in choice_K]
logging.info("%s", kernelized_detailed_tsne_method_list[j])
per_sample_accuracy = np.zeros((len(picked_neighbors),))
per_sample_precision = np.zeros((len(picked_neighbors),))
for i in range(len(picked_neighbors)):
if i % 200 == 0:
logging.info("%d", i)
expected_label = picked_neighbor_labels[i]
y = kernelized_detailed_tsne_method_results[j][i,:]
x = picked_neighbors[i, :]
nn_x_indices = get_nearest_neighbors_in_y(x, X_mnist, n=precision_nn)
nn_y_indices = get_nearest_neighbors_in_y(y, Y_mnist, n=precision_nn)
matching_indices = len([k for k in nn_x_indices if k in nn_y_indices])
per_sample_precision[i] = (matching_indices / precision_nn)
kernelized_indices = get_nearest_neighbors_in_y(kernelized_detailed_tsne_method_results[j][i,:], Y_mnist,
n=accuracy_nn)
obtained_labels = labels_mnist[kernelized_indices]
per_sample_accuracy[i] = sum(obtained_labels==expected_label) / len(obtained_labels)
kernelized_detailed_tsne_accuracy[j] = np.mean(per_sample_accuracy)
kernelized_detailed_tsne_precision[j] = np.mean(per_sample_precision)
logging.info("%s :\t%f\t%f\t%f s", kernelized_detailed_tsne_method_list[j], kernelized_detailed_tsne_precision[j],
kernelized_detailed_tsne_accuracy[j], kernelized_detailed_tsne_time[j])
# Accuracy-vs-power plot
legend_list = list()
f, ax = plt.subplots()
f.set_size_inches(6, 3)
x = [k for k in choice_K] # Ensuring order
y = kernelized_detailed_tsne_accuracy
# plt.title("IDW - Accuracy vs Power") # We'd better use figure caption
# ax.legend([h1,h2,h3,h4,h5,h6], ["Closest Training Set Image"]+idw_method_list)
plt.plot(x, y, c='blue')
h = plt.axhline(y=baseline_accuracy, c='black', linestyle='--')
plt.legend([h], ["Baseline Accuracy (%.4f)" % baseline_accuracy])
plt.xlabel("Kernelized tSNE: K parameter")
plt.ylabel("10-NN Accuracy")
plt.ylim([0, 1])
plt.xlim([0, 2])
f.tight_layout()
plt.savefig("../figures/kernelized-tsne-K-vs-accuracy.png")
ind = [4, 24, 49]
kernelized_tsne_method_list = [
kernelized_detailed_tsne_method_list[i][:10] + kernelized_detailed_tsne_method_list[i][-8:]
for i in ind]
kernelized_tsne_method_results = [kernelized_detailed_tsne_method_results[i] for i in ind]
kernelized_tsne_nearest_neighbors_percentiles_matrix = np.zeros((len(picked_neighbors), len(kernelized_tsne_method_list)))
for i in range(len(picked_neighbors)):
for j in range(len(kernelized_tsne_method_list)):
y = kernelized_tsne_method_results[j][i,:]
nn_dist = np.min(np.sqrt(np.sum((Y_mnist-y)**2, axis=1)))
kernelized_tsne_nearest_neighbors_percentiles_matrix[i,j] = stats.percentileofscore(nearest_neighbors_y_dist, nn_dist)
kernelized_tsne_distance_percentiles = np.mean(kernelized_tsne_nearest_neighbors_percentiles_matrix, axis=0)
for j in range(len(kernelized_tsne_method_list)):
print(kernelized_tsne_method_list[j], kernelized_tsne_distance_percentiles[j])
output_file = generate_cluster_results_filename(parameters)
with open(output_file, 'wb') as f:
pickle.dump((kernelized_detailed_tsne_method_results, kernelized_detailed_tsne_accuracy,
kernelized_detailed_tsne_precision, kernelized_detailed_tsne_time, kernelized_detailed_tsne_method_list), f)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main(parameters=settings.parameters, regenerate_parameters_cache=False)
|
996,321 | 6eeaed5c9cdaa4c4b0fe19a6675bbfdbb29d0af5 | import requests
import xml.etree.ElementTree as ET
from requests.auth import HTTPDigestAuth
from .soap import SoapRequest
def get_myfritz(username:str, password:str) -> str:
r = SoapRequest(
url='http://fritz.box:49000/upnp/control/x_myfritz',
urn='dslforum-org:service:X_AVM-DE_MyFritz:1',
fn='GetInfo')
r.auth(username,password)
r.post()
return r.get_element('NewDynDNSName')
|
996,322 | 0be7640ae32af2c3f0153627d6dad90283762af2 | import json
import os
def json_to_python_object(file):
"""
Get all data from json file.
:param file: file from jsons folder
"""
with open(f"objects/{file}") as f:
json_file = json.load(f) # Load json file
json_objects = json_file['objects'] # Parse all data into python file
print(f'Number of objects: {len(json_objects)}') # To print number of objects
for i in range(0, len(json_objects)): # Make sure that we do this for all objects on file
points = extract_points(json_objects[i])
occluded = extract_occluded(json_objects[i])
attributes = extract_attributes(json_objects[i])
label = extract_label(json_objects[i])
print("================================") # Simple object separator (Not necessary, only for visualization)
print(f'Object number {i + 1}: ')
print(f'Label: {label} \n' # Get label from object
f'Points: {points} \n' # Get point from object
f'Occlusion: {occluded} \n' # Get occlusion value from object
f'Attributes: {attributes}') # Get attribute list from object
def extract_points(json_object):
"""
Get points in [xmin, ymin, xmax, ymin] format.
:param json_object: json object file
:return: json data object.
"""
points = []
for list_of_points in json_object['data']:
for el in list_of_points:
points.append(el)
return points
def extract_attributes(json_object):
"""
Get attributes list.
:param json_object: json object file
:return:
"""
return json_object['attribute']
def extract_occluded(json_object):
"""
Get value of occlusion.
:param json_object: json object file
:return: json occlusion object value
"""
return extract_attributes(json_object)['occlusion']
def extract_label(json_object):
"""
Get a label.
:param json_object: json object file
:return: json class_name object
"""
return json_object['class_name']
if __name__ == '__main__':
for filename in os.listdir('objects'): # Use every file from objects folder
json_to_python_object(filename) # Call the main method to get information
print("////////////////////////////////") # Simple file separator (Not necessary, only for visualization)
|
996,323 | 273804c119046d2e94445d3f3bf7d8595feeb6f3 | import curses
class ScreenSources():
def __init__(self):
# 0 = regular, 1 = monitors, 2 = both
self.active_type = 0
self.typechars = "wer"
self.typenames = [ "Regular", "Monitors", "Both" ]
self.show_data = True
self.wsourcelist = None
self.drawable = False
return
def layout(self, win):
if win is None:
self.drawable = False
return
self.drawable = True
maxy, maxx = win.getmaxyx()
if maxy > 32:
win.attron(curses.color_pair(2))
win.hline(32, 0, curses.ACS_HLINE, maxx)
win.vline(32, 49, curses.ACS_VLINE, maxy)
win.addch(32, 49, curses.ACS_TTEE)
win.attroff(curses.color_pair(2))
# window for the source list
self.wsourcelist = win.derwin(1, maxx, 0, 0)
# window for the active source
self.wactivesource = win.derwin(2, 0)
self.winfol = win.derwin(15, 45, 33, 2) if maxy > 33 else None
self.winfor = win.derwin(33, 52) if maxy > 33 else None
def redraw(self, recurse = False):
if self.drawable is False:
return
outputcount = { 0: 0, 1: 0, 2: 0 }
for output in par.pa_source_outputs.values():
type = 1 if par.pa_sources[output.source].is_monitor else 0
outputcount[type] = 1
wsourcelist = self.wsourcelist
wsourcelist.erase()
wsourcelist.move(0, 1)
i = 0
# print the available sources
for type in range(0, len(self.typenames)):
if i > 0:
wsourcelist.addstr(" | ")
wsourcelist.addstr(self.typechars[type] + ": ")
wsourcelist.addstr(self.typenames[type], curses.color_pair(3 if outputcount[type] > 0 else 1) | (curses.A_BOLD if i == self.active_type else 0))
if outputcount[type] > 0:
wsourcelist.addstr(" [" + str(outputcount[type]) + "]")
i += 1
self.wactivesource.erase()
self.wactivesource.move(0, 0)
i = 0
for source in par.pa_sources:
if self.active_type == 2 or par.pa_sources[source].is_monitor == (self.active_type == 1):
par.pa_sources[source].draw_control(self.wactivesource.derwin(1, i))
i += 23
return
def key_event(self, event):
# source range
for i in range(0, len(self.typechars)):
if event == ord(self.typechars[i]):
if self.active_type == i:
return True
self.active_type = i
self.redraw(True)
return True
return False
# return par.pa_sources.values()[self.active_source].key_event(event)
def getActiveVolume(self):
return None # par.pa_sources.values()[self.active_source].getActiveVolume()
def draw_help(self, win):
win.attron(curses.A_BOLD)
win.addstr(" Keys - Sources\n")
win.addstr("-----------------------------------------")
win.attroff(curses.A_BOLD)
win.addstr("""
h / Left\t\t: Move Cursor left
l / Right\t: Move Cursor right
enter\t\t: Detailed volume control
k / Up\t\t: Volume Up
j / Down\t\t: Volume Down
K\t\t: Volume Up, ignore soft limit
J\t\t: Volume Down, ignore soft limit
n\t\t: Set selected volume to 1.0
m\t\t: Set selected volume to 0.0 (Mute)
N\t\t: Set all volumes to 1.0
M\t\t: Set all volumes to 0.0
""")
from ..pulse.ParCur import par
|
996,324 | c2e637ac3d6c1e372eaee75e90745610a26c0732 | #Este codigo e uma versao do programa de processamento de imagens que utiliza
#como base os algoritmos de Canny, HoughLines e HoughCircles.
#A forma de captura da imagem e atraves da imagem capturada
import cv2
import serial
import time
import numpy as np
import sys
# 3 argumentos sao passados neste codigo
# O primeiro eh o arquivo da imagem a ser processada
# O segundo eh a porta serial para onde serao enviados os resultados
# O terceiro eh o nome do arquivo para onde serao escritos os resultados
args = sys.argv
fileName = args[1]
serialPort = args[2]
outputFileName = args[3]
#captura o video exemplo
img = cv2.imread(fileName)
#Tamanho a imagem
img_shape = img.shape
img_center_x = img_shape[1]/2
img_center_y = img_shape[0]/2
#definicao do angulo de abertura da camera
cam_angle = 160
#abre o arquivo de output
output_file = open(outputFileName, "w")
#converte a imagem para tons de cinza
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#encontra os contornos
edges = cv2.Canny(imgGray,50,150,apertureSize = 3)
#detecta as linhas
lines = cv2.HoughLines(edges,1,np.pi/180,10)
#processamento das linhas
if lines is not None:
for rho,theta in lines[0]:
#monta a reta
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
#limita o tamanho da reta para
#ser possivel achar o centro
if x1 < 0:
x1 = 0
y1 = int(rho/b)
if y1 < 0:
y1 = 0
x1 = int(rho/a)
if x2 > img_shape[1]:
x2 = img_shape[1]
y2 = int((rho - x2*a)/b)
if y2 > img_shape[0]:
y2 = img_shape[0]
x2 = int((rho - y2*b)/a)
#posicao da reta
pos_line = ((x1+x2)/2,(y1+y2)/2)
else:
#se nao achou nada, coloca uma posica absurda
pos_line = (-1,-1)
#detecta os circulos
circles = cv2.HoughCircles(imgGray,cv2.HOUGH_GRADIENT,1,200, param1=10,param2=30,minRadius=2,maxRadius=100)
#processamento dos circulos
if circles is not None:
circles = np.uint16(np.around(circles))
#ordena pelo raio para pegarmos o maior
circles_s = sorted(circles, key=lambda tup: tup[:,2])
#pega as coodernadas do maior circulo
x_c = circles_s[0][0][0]
y_c = circles_s[0][0][1]
r_c = circles_s[0][0][2]
#posicao do circulo
pos_circle = (x_c, y_c)
#encontra o angulo em relacao ao centro da camera
sun_dif_angle = x_c*cam_angle/img_shape[1]
sun_dif_angle = sun_dif_angle - cam_angle/2
else:
#valores absurdos
pos_circle = (-1,-1)
sun_dif_angle = -360
# Envia os dados pela porta serial especificada nos argumentos
porta = serialPort
ser=serial.Serial(porta,9600)
print(ser.portstr)
ser.write(str(pos_line[0]).zfill(8).encode())
ser.write("\n".encode())
ser.write(str(pos_line[1]).zfill(8).encode())
ser.write("\n".encode())
ser.write(str(pos_circle[0]).zfill(8).encode())
ser.write("\n".encode())
ser.write(str(pos_circle[1]).zfill(8).encode())
ser.write("\n".encode())
ser.write(str(round(sun_dif_angle,4)).zfill(8).encode())
ser.write("\n\n\n".encode())
ser.close()
#grava tudo no arquivo
output_file.write(str(pos_line[0]).zfill(8))
output_file.write("\n")
output_file.write(str(pos_line[1]).zfill(8))
output_file.write("\n")
output_file.write(str(pos_circle[0]).zfill(8))
output_file.write("\n")
output_file.write(str(pos_circle[1]).zfill(8))
output_file.write("\n")
output_file.write(str(round(sun_dif_angle,4)).zfill(8))
output_file.write("\n")
output_file.close() |
996,325 | ac864ac72fed239b902a99b7e11a284dcd4d6738 |
class Account:
def __init__(self, account_id: int, owner_id: int, account_type: str, amount: int):
self.account_id = account_id
self.owner_id = owner_id
self.account_type = account_type
self.amount = amount
def __str__(self):
return f"id: {self.account_id}, owner: {self.owner_id}, type: {self.account_type} amount: {self.amount}"
def as_json_dict(self):
return {
"account_id":self.account_id,
"owner_id":self.owner_id,
"account_type":self.account_type,
"amount":self.amount
} |
996,326 | 6e6f6a3101d182684e0a705c24cd24f8b96d0d43 | # coding=utf-8
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import utils.analisis as analisis
import utils.file as futils
# Existe relación entre unos determinados fenotipos y la poblacion (o superpoblacion).
# Es decir, hay una proporción
# significativa o una distribución no equitativa entre el fenotipo y el origen del individuo.
# Se muestra el snps en forma gráfica.
if len(sys.argv) >= 5:
fenotypeData = sys.argv[1]
dataDir = sys.argv[2]
inputFile = sys.argv[3]
outputFile = sys.argv[4]
else:
print "Uso : <fenotypeData> <dataDir> <inputFile> <outputFile>"
sys.exit(-1)
# Contamos los individuos con algún snp a 1, guardamos la entrada como [snp] = ( pob )
def analisis_sim(results):
positives = dict()
for result in results.keys():
for (ssnp, value) in results[result]:
if ssnp not in positives:
positives[ssnp] = dict()
if mapping[result][0] not in positives[ssnp]:
positives[ssnp][mapping[result][0]] = 0
if int(value) == 1: # Es importante el cast a int
positives[ssnp][mapping[result][0]] += 1
return positives
# Cuenta cuantos individuos de cada fenotipo tenemos (genero, población y superpoblación)
def calcular_totales(mapping):
total = dict()
for (pop, super_pop, m_gender) in mapping.values():
if pop not in total:
total[pop] = 0
if super_pop not in total:
total[super_pop] = 0
if m_gender not in total:
total[m_gender] = 0
total[pop] += 1
total[super_pop] += 1
total[m_gender] += 1
return total
# Con los totales calculados por calcular_totales obtenemos la probabilidad de cada snp/población
def calcular_probabilidad(totales, positives):
result = dict()
for snp in positives:
result[snp] = dict()
for poblation in positives[snp]:
result[snp][poblation] = (float(positives[snp][poblation]) / totales[poblation]) * 100.0
return result
# Devolvemos la descripción (Mapped_trait) para el snp de entrada.
def encontrar_descripcion(fenotypes, rs):
for feno in fenotypes:
if feno[0] == rs:
return feno[1]
# Generamos un pdf con las gráficas, este es el proceso más costoso ya que pueden haber muchos snps.
def generar_graficas(prob, fenotypeList, filename="analisis_2.pdf"):
i = 0
plots = list()
pp = PdfPages(filename)
for rs in prob.keys():
x = prob[rs]
colors = plt.cm.BuPu([float(val) / 100.0 for val in x.values()])
# plots.append(plt.figure())
p = plt.figure()
plt.bar(x.keys(), x.values(), color=colors)
plt.ylim((0, 100))
plt.xticks(rotation=90)
plt.title(rs + "\n" + encontrar_descripcion(fenotypeList, rs))
i += 1
pp.savefig(p)
plt.close()
# pp = PdfPages(filename)
# for plot in plots:
# pp.savefig(plot)
pp.close()
listInd = futils.llegir_order("individuosOrder.txt")
# Leemos el orden del cromosoma Y (hombres)
listIndY = futils.llegir_order("individuosOrder_Y.txt")
# Leemos el mapping individuo -> poblacion -> superpoblacion -> genero
mapping = futils.llegir_basic_fenotype(dataDir + "integrated_call_samples_v3.20130502.ALL.panel")
totales = calcular_totales(mapping)
fenotypeList = futils.llegir_selected_fenotype(open(fenotypeData))
r = analisis.compute(open(inputFile))
# individuos con algún snp activo
positives = analisis_sim(r)
# Convertimos numero de casos en probabilidad con el total de individuos.
prob = calcular_probabilidad(totales, positives)
# Generamos la salida
generar_graficas(prob, fenotypeList, outputFile)
|
996,327 | 66c77ee36aec0050cec1747d05bf95bb1f14dc47 | #!encoding:utf-8
__author__ = 'wangfei'
__date__ = '2017/12/23 0023 21:32'
import xadmin
from xadmin import views
from .models import EmailVerifyRecord,Banner
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
class GlobalSetting(object):
site_title = "幕学后台管理系统"
site_footer = "幕学在线网"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ['code','email','send_type','send_time']
search_fields = ['code','email','send_type']
list_filter = ['code','email','send_type','send_time']
class BannerAdmin(object):
list_display = ['title','image','url','index','add_time']
search_fields = ['title','image','url','index',]
list_filter = ['title','image','url','index','add_time']
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting)
xadmin.site.register(views.CommAdminView,GlobalSetting) |
996,328 | d19a116346e95c0b7c0c03cac3cc89e73156e7a2 | '''
Created on Oct 23, 2012
'''
import wx
from com.payleap.serviceproviders.transaction.TransactionProcessCreditCard import TransactionProcessCreditCard
from com.payleap.serviceproviders.merchant.MerchantProcessCreditCard import MerchantProcessCreditCard
class TestApplication:
if __name__ == '__main__':
#***************************************************** Transaction Services ********************************************
#Sample ProcessCreditCard request
#https://uat.payleap.com/transactservices.svc/ProcessCreditCard?UserName=user123_API&Password=Nn2jCtsOuxVlcELE&TransType=Sale&CardNum=4111111111111111&ExpDate=1212&MagData=&NameOnCard=JohnDoe&Amount=1&InvNum=&PNRef=&Zip=98375&Street=&CVNum=&ExtData=
#=======================================================================
# transactionXMLParser = ProcessCreditCardSP.process(
# "Sale", #TransType
# "4111111111111111", #CardNum
# "1215", #ExpDate
# "Name Python", #NameOnCard
# "", #MagData
# "1", #Amount
# "", #InvNum
# "", #PNRef
# "98375", #Zip
# "", #Street
# "", #CVNumber
# "", #ExtData
# "UAT" #ServerMode
# )
# if transactionXMLParser is not None:
# print "Response : ", transactionXMLParser.respMSG , ", Response Code: " , transactionXMLParser.result , ", Response Msg: ", transactionXMLParser.message , ", PNRef: ", transactionXMLParser.PNRef , ", AuthCode: ", transactionXMLParser.authCode , ", Token Number: ", transactionXMLParser.tokenNumber
# else:
# print "Empty response"
#=======================================================================
#Sample ProcessCheck request (Check sale transaction request)
#https://uat.payleap.com/transactservices.svc/ProcessCheck?UserName=user123_API&Password=Ew3Kg6cV9MD9Ja@v&TransType=Sale&CheckNum=1234&TransitNum=262084356&AccountNum=1234567890&InvNum=&Amount=1.00&MICR=&NameOnCheck=John%20Doe&DL=&SS=&DOB=&StateCode=WA&CheckType=Personal&PNRef=&MagData=&ExtData=%3CSecCode%3EWEB%3C/SecCode%3E
#=======================================================================
# transactionXMLParser = ProcessCheckSP.process(
# "Sale", #TransType
# "1234", #CheckNum
# "262084356", #TransitNum
# "1234567890", #AccountNum
# "", #InvNum
# "1.00", #Amount
# "", #MICR
# "Name", #NameOnCheck
# "", #DL
# "", #SS
# "", #DOB
# "WA", #StateCode
# "Personal", #CheckType
# "", #PNRef
# "", #MagData
# "<SecCode>WEB</SecCode>", #ExtData
# "UAT" #ServerMode
# )
# if transactionXMLParser is not None:
# print "Response : ", transactionXMLParser.respMSG , ", Response Code: " , transactionXMLParser.result , ", Response Msg: ", transactionXMLParser.message , ", PNRef: ", transactionXMLParser.PNRef , ", AuthCode: ", transactionXMLParser.authCode , ", Token Number: ", transactionXMLParser.tokenNumber
# else:
# print "Empty response"
#=======================================================================
#Example of a debit card sale transaction request
#https://uat.payleap.com/transactservices.svc/ProcessDebitCard?UserName=user123_API&Password=Ew3Kg6cV9MD9Ja@v&TransType=Sale&CardNum=4111111111111111&ExpDate=1215&MagData=4055011111111111=05121015432112345678&NameOnCard=&Amount=18.1&InvNum=&PNRef=&Pin=1234&SureChargeAmt=&CashBackAmt=&RegisterNum=&ExtData=%3CKeySerialNumber%3E4A003102930003BB%3C%2FKeySerialNumber%3E
#=======================================================================
# transactionXMLParser = ProcessDebitCard.process(
# "Sale", #TransType
# "4111111111111111", #CardNum
# "1215", #ExpDate
# "4055011111111111=05121015432112345678", #MagData
# "", #NameOnCard
# "18.2", #Amount
# "", #InvNum
# "", #PNRef
# "1234", #Pin
# "", #SureChargeAmt
# "", #CashBackAmt
# "", #RegisterNum
# "<KeySerialNumber>4A003102930003BB</KeySerialNumber>", #ExtData
# "UAT" #ServerMode
# )
#
# if transactionXMLParser is not None:
# print "Response : ", transactionXMLParser.respMSG , ", Response Code: " , transactionXMLParser.result , ", Response Msg: ", transactionXMLParser.message , ", PNRef: ", transactionXMLParser.PNRef , ", AuthCode: ", transactionXMLParser.authCode , ", Token Number: ", transactionXMLParser.tokenNumber
# else:
# print "Empty response"
#=======================================================================
#Example of an EBT food stamp sale transaction request
#https://uat.payleap.com/transactservices.svc/ProcessEBTCard?UserName=user123_API&Password=Ew3Kg6cV9MD9Ja@v&TransType=FoodStampSale&CardNum=4111111111111111&ExpDate=1215&MagData=4111111111111111=05121015432112345678&NameOnCard=&Amount=10.13&InvNum=1&PNRef=&Pin=1234&SureChargeAmt=&CashBackAmt=&RegisterNum=&ExtData=%3CKeySerialNumber%3E4A003102930003BB%3C%2FKeySerialNumber%3E
#=======================================================================
# transactionXMLParser = ProcessEBTCard.process(
# "FoodStampSale", #TransType
# "4111111111111111", #CardNum
# "1215", #ExpDate
# "4055011111111111=05121015432112345678", #MagData
# "", #NameOnCard
# "10.14", #Amount
# "", #InvNum
# "", #PNRef
# "1234", #Pin
# "", #SureChargeAmt
# "", #CashBackAmt
# "", #RegisterNum
# "<KeySerialNumber>4A003102930003BB</KeySerialNumber>", #ExtData
# "UAT" #ServerMode
# )
#
# if transactionXMLParser is not None:
# print "Response : ", transactionXMLParser.respMSG , ", Response Code: " , transactionXMLParser.result , ", Response Msg: ", transactionXMLParser.message , ", PNRef: ", transactionXMLParser.PNRef , ", AuthCode: ", transactionXMLParser.authCode , ", Token Number: ", transactionXMLParser.tokenNumber
# else:
# print "Empty response"
#=======================================================================
#Example of gift card redeem transaction request
#https://uat.payleap.com/transactservices.svc/ProcessGiftCard?UserName=user123_API&Password=Ew3Kg6cV9MD9Ja@v&TransType=Redeem&CardNum=4111111111111111&ExpDate=1215&MagData=&Amount=1&InvNum=&PNRef=&ExtData=
#=======================================================================
# transactionXMLParser = ProcessGiftCardSP.process(
# "Redeem", #TransType
# "4111111111111111", #CardNum
# "1215", #ExpDate
# "", #MagData
# "1", #Amount
# "", #InvNum
# "", #PNRef
# "", #ExtData
# "UAT" #ServerMode
# )
#
# if transactionXMLParser is not None:
# print "Response : ", transactionXMLParser.respMSG , ", Response Code: " , transactionXMLParser.result , ", Response Msg: ", transactionXMLParser.message , ", PNRef: ", transactionXMLParser.PNRef , ", AuthCode: ", transactionXMLParser.authCode , ", Token Number: ", transactionXMLParser.tokenNumber
# else:
# print "Empty response"
#=======================================================================
#Example of a card validation request
#https://uat.payleap.com/transactservices.svc/ValidCard?CardNumber=4111111111111111&ExpDate=1215
#=======================================================================
# validationResponse = ValidationSP.processValidCard(
# "4012888888881881", #CardNum
# "1215", #ExpDate
# "UAT" #ServerMode
# )
#
# if validationResponse is not None:
# print "Response : ", validationResponse
# else:
# print "Empty response"
#=======================================================================
#Example of a card length validation request
#https://uat.payleap.com/transactservices.svc/ValidCardLength?CardNumber=4111111111111111
#=======================================================================
# validationResponse = ValidationSP.processValidCardLength(
# "4012888888881881", #CardNum
# "UAT" #ServerMode
# )
#
# if validationResponse is not None:
# print "Response : ", validationResponse
# else:
# print "Empty response"
#=======================================================================
#Example of an expiration date validation request
#https://uat.payleap.com/transactservices.svc/ValidExpDate?ExpDate=1215
#=======================================================================
# validationResponse = ValidationSP.processValidExpDate(
# "1111", #ExpDate
# "UAT" #ServerMode
# )
#
# if validationResponse is not None:
# print "Response : ", validationResponse
# else:
# print "Empty response"
#=======================================================================
#Example of a mod 10 check
#https://uat.payleap.com/transactservices.svc/ValidMod10?CardNumber=4111111111111111
#=======================================================================
# validationResponse = ValidationSP.processValidMod10(
# "401288888888188", #CardNum
# "UAT" #ServerMode
# )
#
# if validationResponse is not None:
# print "Response : ", validationResponse
# else:
# print "Empty response"
#=======================================================================
#***************************************************** Merchant Services ********************************************
#Example of an AddRecurringCreditCard request
#https://uat.payleap.com/MerchantServices.svc/AddRecurringCreditCard?Username=user123_API&Password=Nn2jCtsOuxVlcELE&Vendor=123&CustomerID=12345&CustomerName=ABCompany&FirstName=John&LastName=Doe&Title=&Department=&Street1=&Street2=&Street3=&City=&StateID=&Province=&Zip=&CountryID=&Email=&Mobile=&ContractID=i&ContractName=ABCService&BillAmt=45.00&TaxAmt=&TotalAmt=4500&StartDate=01/01/2012&EndDate=&BillingPeriod=Week&BillingInterval=1&MaxFailures=&FailureInterval=&EmailCustomer=&EmailMerchant=&EmailCustomerFailure=&EmailMerchantFailure=&CcAccountNum=1234567890987654&CcExpdate=1212&CcNameOnCard=&CcStreet=&CcZip=&ExtData=
#=======================================================================
# merchantServiceResponse = AddRecurringCreditCardSP.process(
# "415", #Vendor
# "12345", #CustomerID
# "ABCompany", #CustomerName
# "John", #FirstName
# "Doe", #LastName
# "", #Title
# "", #Department
# "", #Street1
# "", #Street2
# "", #Street3
# "", #City
# "", #StateID
# "", #Province
# "", #Zip
# "", #CountryID
# "", #Email
# "", #Mobile
# "i", #ContractID
# "ABCService", #ContractName
# "1", #BillAmt
# "0", #TaxAmt
# "1", #TotalAmt
# "01/01/2012", #StartDate
# "", #EndDate
# "Week", #BillingPeriod
# "1", #BillingInterval
# "", #MaxFailures
# "", #FailureInterval
# "", #EmailCustomer
# "", #EmailMerchant
# "", #EmailCustomerFailure
# "", #EmailMerchantFailure
# "4012888888881881", #CcAccountNum
# "1212", #CcExpdate
# "", #CcNameOnCard
# "", #CcStreet
# "", #CcZip
# "", #ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of an AddRecurringCheck request
#https://uat.payleap.com/MerchantServices.svc/AddRecurringCheck?Username=user123_API&Password=Nn2jCtsOuxVlcELE&Vendor=123&CustomerID=12345&CustomerName=Gravity%20Apples&FirstName=Isaac&LastName=Newton&Title=&Department=&Street1=123%20Main%20St&Street2=&Street3=&City=Lincolnshire&StateID=GA&Province=&Zip=12345&CountryID=&Email=&Mobile=123-456-7890&ContractID=54321&ContractName=Required&BillAmt=1&TaxAmt=&TotalAmt=1&StartDate=01/09/2012&EndDate=&BillingPeriod=Month&BillingInterval=1&MaxFailures=&FailureInterval=&EmailCustomer=&EmailMerchant=&EmailCustomerFailure=&EmailMerchantFailure=&CheckType=Personal&AccountType=Checking&CheckNum=&MICR=&AccountNum=1234567890001&TransitNum=987654321&SS=&DOB=12/25/1642&BranchCity=&DL=&StateCode=&NameOnCheck=&ExtData=
#=======================================================================
# merchantServiceResponse = AddRecurringCheckSP.process(
# "415",# Vendor
# "12345",# CustomerID
# "Gravity Apples",# CustomerName
# "Isaac",# FirstName
# "Newton",# LastName
# "",# Title
# "",# Department
# "123 Main St",# Street1
# "",# Street2
# "",# Street3
# "Lincolnshire",# City
# "GA",# StateID
# "",# Province
# "12345",# Zip
# "",# CountryID
# "",# Email
# "9096414117",# Mobile
# "54321",# ContractID
# "Required",# ContractName
# "1",# BillAmt
# "",# TaxAmt
# "1",# TotalAmt
# "01/09/2012",# StartDate
# "",# EndDate
# "Month",# BillingPeriod
# "1",# BillingInterval
# "",# MaxFailures
# "",# FailureInterval
# "",# EmailCustomer
# "",# EmailMerchant
# "",# EmailCustomerFailure
# "",# EmailMerchantFailure
# "Personal",# CheckType
# "Checking",# AccountType
# "",# CheckNum
# "",# MICR
# "1234567890001",# AccountNum
# "987654321",# TransitNum
# "",# SS
# "",# DOB
# "",# BranchCity
# "",# DL
# "",# StateCode
# "",# NameOnCheck
# "",# ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of a ProcessCreditCard - Recurring billing request
#https://uat.payleap.com/MerchantServices.svc/ProcessCreditCard?Username=user123_API&Password=Nn2jCtsOuxVlcELE&Vendor=123&CcInfoKey=1234&Amount=11.00&InvNum=&ExtData=
#=======================================================================
# merchantServiceResponse = ProcessCreditCardRecurringBillingSP.process(
# "415",# Vendor
# "1234",# CcInfoKey
# "1.00",# Amount
# "",# InvNum
# "",# ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of ProcessCheck request
#https://uat.payleap.com/MerchantServices.svc/ProcessCheck?Username=user123_API&Password=Nn2jCtsOuxVlcELE&Vendor=123&CheckInfoKey=1234&Amount=11.00&InvNum=&ExtData=
#=======================================================================
# merchantServiceResponse = ProcessCheckRecurringBillingSP.process(
# "415",# Vendor
# "",# checkInfoKey
# "1.00",# Amount
# "",# InvNum
# "",# ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of a ManageCheckInfo request
#https://uat.payleap.com/MerchantServices.svc/ManageCheckInfo?Username=user123_API&Password=Nn2jCtsOuxVlcELE&TransType=Add&Vendor=123&CustomerKey=1234&CheckInfoKey=&CheckType=Personal&AccountType=Checking&CheckNum=&MICR=&AccountNum=1234567890001&TransitNum=261072770&SS=&DOB=&BranchCity=&DL=&StateCode=&NameOnCheck=&Email=&DayPhone=&Street1=&Street2=&Street3=&City=&StateID=&Province=&PostalCode=&CountryID=&ExtData=
#=======================================================================
# merchantServiceResponse = ManageCheckInfoSP.process(
# "Add",# TransType
# "415",# Vendor
# "1234",# CustomerKey
# "",# CheckInfoKey
# "Personal",# CheckType
# "Checking",# AccountType
# "",# CheckNum
# "",# MICR
# "1234567890001",# AccountNum
# "261072770",# TransitNum
# "",# SS
# "",# DOB
# "",# BranchCity
# "",# DL
# "",# StateCode
# "",# NameOnCheck
# "",# Email
# "",# DayPhone
# "",# Street1
# "",# Street2
# "",# Street3
# "",# City
# "",# StateID
# "",# Province
# "",# PostalCode
# "",# CountryID
# "",# ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of a ManageCreditCardInfo request
#https://uat.payleap.com/MerchantServices.svc/ManageCreditCardInfo?Username=user123_API&Password=Nn2jCtsOuxVlcELE&TransType=Add&Vendor=123&CustomerKey=6022&CardInfoKey=&CcAccountNum=8675309867530900&CcExpDate=1212&CcNameonCard=Tommy%20Tutone&CcStreet=&CcZip=&ExtData=
#=======================================================================
# merchantServiceResponse = ManageCreditCardInfoSP.process(
# "Add", # TransType
# "415", # Vendor
# "6022", # CustomerKey
# "", # CardInfoKey
# "8675309867530900", #CcAccountNum
# "1212", # CcExpDate
# "Tommy Tutone", # CcNameonCard
# "", # CcStreet
# "", # CcZip
# "", # ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of a ManageContract request
#https://uat.payleap.com/MerchantServices.svc/ManageContract?Username=user123_API&Password=ENn2jCtsOuxVlcELE&TransType=Add&Vendor=123&CustomerKey=6022&ContractKey=&PaymentInfoKey=5452&PaymentType=CC&CustomerID=13579&CustomerName=Garfield%20&%20Friends&FirstName=Jon&LastName=Arbuckle&Title=&Department=&Street1=&Street2=&Street3=&City=&StateID=&Province=&Zip=&CountryID=&Email=&DayPhone=&NightPhone=&Fax=&Mobile=&ContractID=12346&ContractName=Lasagna&BillAmt=400.00&TaxAmt=3.00&TotalAmt=403.00&StartDate=01/11/2012&EndDate=&NextBillDt=01/11/2012&BillingPeriod=Semimonthly&BillingInterval=0&MaxFailures=&FailureInterval=&EmailCustomer=&EmailMerchant=&EmailCustomerFailure=&EmailMerchantFailure=&Status=&ExtData=
#=======================================================================
# merchantServiceResponse = ManageContractSP.process(
# "Add", #TransType
# "415", #Vendor
# "6102", #CustomerKey
# "", #ContractKey
# "5452", #PaymentInfoKey
# "CC", #PaymentType
# "13579", #CustomerID
# "Garfield", #CustomerName
# "Jon", #FirstName
# "Arbuckle", #LastName
# "", #Title
# "", #Department
# "", #Street1
# "", #Street2
# "", #Street3
# "", #City
# "", #StateID
# "", #Province
# "", #Zip
# "", #CountryID
# "", #Email
# "", #DayPhone
# "", #NightPhone
# "", #Fax
# "", #Mobile
# "12346", #ContractID
# "Lasagna", #ContractName
# "400.00", #BillAmt
# "3.00", #TaxAmt
# "403.00", #TotalAmt
# "01/11/2012", #StartDate
# "", #EndDate
# "01/11/2012", #NextBillDt
# "Semimonthly", #BillingPeriod
# "0", #BillingInterval
# "", #MaxFailures
# "", #FailureInterval
# "", #EmailCustomer
# "", #EmailMerchant
# "", #EmailCustomerFailure
# "", #EmailMerchantFailure
# "", #Status
# "", #ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.ccInfoKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
#Example of a ManageCustomer request
#https://uat.payleap.com/MerchantServices.svc/ManageCustomer?Username=user123_API&Password=Nn2jCtsOuxVlcELE&TransType=Add&Vendor=123&CustomerKey=&CustomerID=12345&CustomerName=ComedyCo&FirstName=Jack&LastName=Black&Title=&Department=&Street1=&Street2=&Street3=&City=&StateID=&Province=&Zip=&CountryID=&DayPhone=&NightPhone=&Fax=&Email=&Mobile=&Status=&ExtData=
#=======================================================================
# merchantServiceResponse = ManageCustomerSP.process(
# "Add",# TransType
# "415",# Vendor
# "",# CustomerKey
# "",# CustomerID
# "Name D1",# CustomerName
# "Name",# FirstName
# "D1",# LastName
# "",# Title
# "",# Department
# "",# Street1
# "",# Street2
# "",# Street3
# "",# City
# "",# StateID
# "",# Province
# "",# Zip
# "",# CountryID
# "",# DayPhone
# "",# NightPhone
# "",# Fax
# "",# Email
# "",# Mobile
# "",# Status
# "",# ExtData
# "UAT" #ServerMode
# )
#
# if merchantServiceResponse is not None:
# print "Message : ", merchantServiceResponse.message , ", CCInfoKey: " , merchantServiceResponse.customerKey , ", CheckInfoKey: ", merchantServiceResponse.checkInfoKey , ", PNRef: ", merchantServiceResponse.pnref , ", Result: ", merchantServiceResponse.result , ", Error: ", merchantServiceResponse.error
# else:
# print "Empty response"
#=======================================================================
# ************************************* Reporting Services *********************************
#Example of a GetCardTrx request
#https://uat.payleap.com/reportingservices.svc/GetCardTrx?UserName=user123_API&Password=test&RPNum=123&PNRef=&BeginDt=2000-01-01&EndDt=3000-01-01&PaymentType=&ExcludePaymentType=&TransType=Sale &ExcludeTransType=&ApprovalCode=&Result=&ExcludeResult=&NameOnCard=&CardNum=&CardType=&ExcludeCardType=&ExcludeVoid=TRUE&User=&InvoiceId=&SettleFlag=&SettleMsg=&SettleDt=&TransformType= &Xsl=&ColDelim=&RowDelim=&IncludeHeader=TRUE&ExtData=
#=======================================================================
# cardTrxResponse = GetCardTrxSP.process(
# "415", # RPNum
# "", # PNRef
# "2011-09-29", #BeginDt
# "2012-10-29", #EndDt
# "", # PaymentType
# "", # ExcludePaymentType
# "", # TransType
# "", # ExcludeTransType
# "", # ApprovalCode
# "", # Result
# "", # ExcludeResult
# "", # NameOnCard
# "", # CardNum
# "", # CardType
# "", # ExcludeCardType
# "", # ExcludeVoid
# "", # User
# "", # InvoiceId
# "", # SettleFlag
# "", # SettleMsg
# "", # SettleDt
# "", # TransformType
# "", # Xsl
# "", # ColDelim
# "", # RowDelim
# "", # IncludeHeader
# "", # ExtData
# "UAT" #ServerMode
# )
# if(cardTrxResponse is not None):
# if isinstance(cardTrxResponse, Exception):
# print "Error while processing your request: " , cardTrxResponse
# else:
# if isinstance(cardTrxResponse, list):
# print "length: " , len(cardTrxResponse)
# for item in cardTrxResponse:
# print "TRX_HD_Key: ", item.TRX_HD_Key
# else:
# #Error occured
# print cardTrxResponse
# else:
# print "Error: Blank response found"
#=======================================================================
#Example of a GetCardTrxSummary request
#https://uat.payleap.com/reportingservices.svc/GetCardTrxSummary?UserName=user123_API&Password=test&RPNum=123&BeginDt=2000-01-01&EndDt=3000-01-01&ApprovalCode=&Register=&NameOnCard=&CardNum=&CardType=VISA&ExcludeVoid=FALSE&User=&SettleFlag=&SettleMsg=&SettleDt=&TransformType=&Xsl=&ColDelim=&RowDelim=&IncludeHeader=&ExtData=
#=======================================================================
# cardTrxSummeryResp = GetCardTrxSummarySP.process(
# "415", # RPNum
# "2012-01-01", # BeginDt
# "2012-10-29", # EndDt
# "", # ApprovalCode
# "", # Register
# "", # NameOnCard
# "", # CardNum
# "", # CardType
# "", # ExcludeVoid
# "", # User
# "", # SettleFlag
# "", # SettleMsg
# "", # SettleDt
# "", # TransformType
# "", # Xsl
# "", # ColDelim
# "", # RowDelim
# "", # IncludeHeader
# "", # ExtData
# "UAT" #ServerMode
# )
# if(cardTrxSummeryResp is not None):
# if isinstance(cardTrxSummeryResp, Exception):
# print "Error while processing your request: " , cardTrxSummeryResp
# else:
# if isinstance(cardTrxSummeryResp, list):
# print "length: " , len(cardTrxSummeryResp)
# for item in cardTrxSummeryResp:
# print "Payment_Type_ID: ", item.Payment_Type_ID
# else:
# #Error occured
# print cardTrxSummeryResp
# else:
# print "Error: Blank response found"
#=======================================================================
#Example of a GetCheckTrx request:
#https://uat.payleap.com/reportingservices.svc/GetCheckTrx?UserName=user123_API&Password=test&RPNum=123&PNRef=&BeginDt=2011-09-20T12:00:00&EndDt=2011-09-20T12:30:00&PaymentType=VERIFY&ExcludePaymentType=&TransType=&ExcludeTransType=&ApprovalCode=&Result=0&ExcludeResult=&NameOnCheck=&CheckNum=&AcctNum=&RouteNum=&ExcludeVoid=&User=&InvoiceId=&SettleFlag=&SettleMsg=&SettleDt=&TransformType=&Xsl=&ColDelim=&RowDelim=&IncludeHeader=&ExtData=
#=======================================================================
# checkTrxResponse = GetCheckTrxSP.process(
# "415", #
# "", #
# "2011-09-20T12:00:00", #
# "2012-10-29T12:30:00", #
# "VERIFY", #
# "", #
# "", #
# "", #
# "", #
# "0", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "", #
# "UAT" #ServerMode
# )
# if(checkTrxResponse is not None):
# if isinstance(checkTrxResponse, Exception):
# print "Error while processing your request: " , checkTrxResponse
# else:
# if isinstance(checkTrxResponse, list):
# print "length: " , len(checkTrxResponse)
# for item in checkTrxResponse:
# print "Trx_HD_Key: ", item.Trx_HD_Key
# else:
# #Error occured
# print checkTrxResponse
# else:
# print "Error: Blank response found"
#=======================================================================
#Example of a GetInfo request using the BatchInquiry TransType
#https://uat.payleap.com/reportingservices.svc/GetInfo?UserName=user123_API&Password=test&TransType=BatchInquiry&ExtData=
#=======================================================================
# infoserviceResponse = GetInfoSP.process(
# "Setup", # TransType
# "", # ExtData
# "UAT" #ServerMode
# )
# if(infoserviceResponse is not None):
# if isinstance(infoserviceResponse, Exception):
# print "Error while processing your request: " , infoserviceResponse
# else:
# if isinstance(infoserviceResponse, list):
# print "length: " , len(infoserviceResponse)
# for item in infoserviceResponse:
# print "RespMSG: ", item.RespMSG
# else:
# #Error occured
# print infoserviceResponse
# else:
# print "Error: Blank response found"
#=======================================================================
#Example of a GetOpenBatchSummary request
#https://uat.payleap.com/reportingservices.svc/GetOpenBatchSummary?UserName=user123_API&Password=test&RPNum=123&BeginDt=&EndDt=&ExtData=
#=======================================================================
# openBatchSummaryResponse = GetOpenBatchSummarySP.process(
# "415", # RPNum
# "", # beginDt
# "", # endDt
# "", # extData
# "UAT" #ServerMode
# )
# if(openBatchSummaryResponse is not None):
# if isinstance(openBatchSummaryResponse, Exception):
# print "Error while processing your request: " , openBatchSummaryResponse
# else:
# if isinstance(openBatchSummaryResponse, list):
# print "length: " , len(openBatchSummaryResponse)
# for item in openBatchSummaryResponse:
# print "Sale: ", item.Sale
# else:
# #Error occured
# print openBatchSummaryResponse
# else:
# print "Error: Blank response found"
#=======================================================================
#Custom token sample
trProcessCreditCardResponse = TransactionProcessCreditCard.processCreditCard(
"Sale", #TransType
"4111111111111111", #CardNum
"1215", #ExpDate
"Name Python", #NameOnCard
"", #MagData
"6", #Amount
"", #InvNum
"", #PNRef
"98375", #Zip
"", #Street
"", #CVNumber
"<CustomerTokenization>T</CustomerTokenization>", #ExtData
"UAT" #ServerMode
)
if(trProcessCreditCardResponse is not None):
if isinstance(trProcessCreditCardResponse, Exception):
print "Error while processing your request: " , trProcessCreditCardResponse
else:
if trProcessCreditCardResponse.result == 0:
print "************** Custom Token Transaction Credit Response Started *********************"
print "Result: " , trProcessCreditCardResponse.result
print "Resp Message: " , trProcessCreditCardResponse.respMSG
print "Message 1: " , trProcessCreditCardResponse.message1
print "Message 2: " , trProcessCreditCardResponse.message2
print "Token Number: " , trProcessCreditCardResponse.tokenNumber
print "************** Custom Token Transaction Credit Response Completed *********************"
#===========================================================
# Populating billing form to bill a customer using received Token number from above request
#===========================================================
merchantServiceResponse = MerchantProcessCreditCard.processCreditCard(
"415",# Vendor
trProcessCreditCardResponse.tokenNumber,# CcInfoKey
"2.00",# Amount
"",# InvNum
"",# ExtData
"UAT" #ServerMode
)
if merchantServiceResponse is not None and (merchantServiceResponse.error is None or merchantServiceResponse.error == ""):
print "Message: ", merchantServiceResponse.message + ", AuthCode: " + merchantServiceResponse.authCode + ", PNRef: " + merchantServiceResponse.pnref
else:
print "Request Failed: " + merchantServiceResponse.error
else:
print "Error: Blank response found" |
996,329 | 63f115673ab162274300cc72e2aa0afa9fd0bcd0 | import os
import time
import requests
import shortuuid
import urllib.parse
import xml.etree.ElementTree as ET
from .cluster import AugerClusterApi
from .utils.exception import AugerException
from .project_file import AugerProjectFileApi
SUPPORTED_FORMATS = ['.csv', '.arff']
class AugerDataSetApi(AugerProjectFileApi):
"""Auger DataSet API."""
def __init__(self, ctx, project_api=None,
data_set_name=None, data_set_id=None):
super(AugerDataSetApi, self).__init__(
ctx, project_api, data_set_name, data_set_id)
def create(self, data_source_file, data_set_name=None):
data_source_file, local_data_source = \
AugerDataSetApi.verify(data_source_file)
if local_data_source:
file_url = self._upload_to_cloud(data_source_file)
file_name = os.path.basename(data_source_file)
if data_set_name:
self.object_name = data_set_name
else:
self.object_name = self._get_data_set_name(file_name)
else:
file_url = data_source_file
url_path = urllib.parse.urlparse(file_url).path
file_name = os.path.basename(url_path)
self.object_name = file_name
try:
return super().create(file_url, file_name)
except Exception as exc:
if 'en.errors.project_file.url_not_uniq' in str(exc):
raise AugerException(
'DataSet already exists for %s' % file_url)
raise exc
def _get_readable_name(self):
# patch readable name
return 'DataSet'
@staticmethod
def verify(data_source_file):
if urllib.parse.urlparse(data_source_file).scheme in ['http', 'https']:
return data_source_file, False
data_source_file = os.path.abspath(
os.path.join(os.getcwd(), data_source_file))
filename, file_extension = os.path.splitext(data_source_file)
if not file_extension in SUPPORTED_FORMATS:
raise AugerException(
'Source file has to be one of the supported fomats: %s' %
', '.join(SUPPORTED_FORMATS))
if not os.path.isfile(data_source_file):
raise AugerException(
'Can\'t find file to import: %s' % data_source_file)
return data_source_file, True
def _upload_to_cloud(self, file_to_upload):
cluster_mode = self.parent_api.parent_api.get_cluster_mode()
if cluster_mode == 'single_tenant':
return self._upload_to_single_tenant(file_to_upload)
else:
return self._upload_to_multi_tenant(file_to_upload)
def _upload_to_single_tenant(self, file_to_upload):
# get file_uploader_service from the cluster
# and upload data to that service
project_properties = self.parent_api.properties()
cluster_id = project_properties.get('cluster_id')
cluster_api = AugerClusterApi(
self.ctx, self.parent_api, cluster_id)
cluster_properties = cluster_api.properties()
file_uploader_service = cluster_properties.get('file_uploader_service')
upload_token = file_uploader_service.get('params').get('auger_token')
upload_url = '%s?auger_token=%s' % (
file_uploader_service.get('url'), upload_token)
file_url = self._upload_file(file_to_upload, upload_url)
self.ctx.log(
'Uploaded local file to Auger Cloud file: %s' % file_url)
return file_url
def _upload_file(self, file_name, url):
with open(file_name, 'rb') as f:
r = requests.post(url, data=f)
if r.status_code == 200:
rp = urllib.parse.parse_qs(r.text)
return ('files/%s' % rp.get('path')[0].split('files/')[-1])
else:
raise AugerException(
'HTTP error [%s] while uploading file to Auger Cloud...' % r.status_code)
def _upload_to_multi_tenant(self, file_to_upload):
file_path = 'workspace/projects/%s/files/%s-%s' % \
(self.parent_api.object_name, shortuuid.uuid(),
os.path.basename(file_to_upload))
res = self.rest_api.call('create_project_file_url', {
'project_id': self.parent_api.object_id,
'file_path': file_path})
if res is None:
raise AugerException(
'Error while uploading file to Auger Cloud...')
url = res['url']
with open(file_to_upload, 'rb') as f:
files = {'file': (file_path, f)}
res = requests.post(url, data=res['fields'], files=files)
if res.status_code == 201 or res.status_code == 200:
bucket = urllib.parse.urlparse(url).netloc.split('.')[0]
return 's3://%s/%s' % (bucket, file_path)
else:
raise AugerException(
'HTTP error [%s] while uploading file'
' to Auger Cloud...' % res.status_code)
def _get_data_set_name(self, file_name):
fname, fext = os.path.splitext(file_name)
return self._get_uniq_object_name(fname, fext)
|
996,330 | f1d2be01a35ea77355603e18b1c7f0dc8b422c6d | import traceback
import logging
logger = logging.getLogger("vaex.events")
class Signal(object):
def __init__(self, name=None):
"""
:type name: str
:return:
"""
self.name = name or repr(self)
self.callbacks = []
self.extra_args = {}
def connect(self, callback, prepend=False, *args, **kwargs):
# logger.debug("(%s) connected %s", self.name, callback)
# insert first, otherwise emit may get a keyerror in multithreaded cases
self.extra_args[callback] = (args, kwargs)
if prepend:
self.callbacks.insert(0, callback)
else:
self.callbacks.append(callback)
return callback
def emit(self, *args, **kwargs):
results = []
for callback in list(self.callbacks): # copy it because handlers van add or remove items
extra_args, extra_kwargs = self.extra_args[callback]
final_args = args + extra_args
final_kwargs = {}
final_kwargs.update(extra_kwargs)
final_kwargs.update(kwargs)
try:
# logger.debug("(%s) calling %r with arguments %r and kwargs %r", self.name, callback, final_args, final_kwargs)
value = callback(*final_args, **final_kwargs)
results.append(value)
except Exception:
logger.exception("error in handling callback %r with arguments %r and kwargs %r", callback, final_args, final_kwargs)
raise
# tb = traceback.format_exc()
# raise Exception("error while calling callback: %r with arguments %r and kwargs %r" % (callback, final_args, final_kwargs), tb)
return results
def disconnect(self, callback):
self.callbacks.remove(callback)
del self.extra_args[callback]
|
996,331 | 7bff5198c76a1c6f0f7c79065881cc2c05d9b326 | # -*- coding:utf-8 -*-
from django.contrib import admin
from models import *
# Register your models here.
class book_infoAdmin(admin.ModelAdmin):
list_display = ('book_category', 'book_name', 'book_auth', 'book_desc', 'book_is_recommend','book_display',)
list_display_links = ('book_category', 'book_name', 'book_auth',)
list_editable = ('book_is_recommend', 'book_desc','book_display')
fieldsets = (
(None, {
'fields': ('book_name', 'book_desc','book_auth', 'book_is_recommend', 'book_category')
}),
)
class Chapter_ArticleAdmin(admin.ModelAdmin):
list_display = ('Chapter_book', 'chapter_title', 'Chapter_level', 'chapter_is_recommend', 'chapter_display')
list_display_links = ('Chapter_book', 'chapter_title',)
list_editable = ('Chapter_level', 'chapter_is_recommend', 'chapter_display')
fieldsets = (
(None, {
'fields': ('Chapter_book', 'Chapter_level', 'chapter_title', 'chapter_desc', 'chapter_content', 'chapter_is_recommend', 'chapter_display')
}),
)
class Media:
js = (
'/static/js/kindeditor-4.1.10/kindeditor-min.js',
'/static/js/kindeditor-4.1.10/lang/zh_CN.js',
'/static/js/kindeditor-4.1.10/config.js',
)
admin.site.register(book_info, book_infoAdmin),
admin.site.register(Book_Category),
admin.site.register(Chapter_Article, Chapter_ArticleAdmin),
|
996,332 | 5178a068b49776e3eb90717844a3863766c90346 | from __future__ import print_function
from math import radians, sin, cos, sqrt, asin
from operator import add
import json
import sys
import datetime
import boto3
from pyspark.sql import SparkSession
import pyspark.sql.functions as PY
from pyspark.sql.types import DoubleType
from config import sql_username
from config import sql_password
from config import sql_host
def insertSql(df):
df.write.format("jdbc")\
.option("url", "jdbc:mysql://" + sql_host + "/airplanes")\
.option("dbtable", "close_calls")\
.option("driver", "com.mysql.cj.jdbc.Driver")\
.option("user", sql_username)\
.option("password", sql_password).mode("append").save()
def calcDistance(lat1, lon1, lat2, lon2):
'''
Calculates Haversine distance between two lat/lon coordinates
From https://rosettacode.org/wiki/Haversine_formula#Python
:param lat1: Latitude of first point
:param lon1: Longitude of first point
:param lat2: Latitude of second point
:param lon2: Longitude of second point
:returns: Float, distance between two points in km
'''
R = 6372.8 # Earth radius in kilometers
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
lat1 = radians(lat1)
lat2 = radians(lat2)
a = sin(delta_lat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(delta_lon / 2.0) ** 2
c = 2 * asin(sqrt(a))
return R * c
def explodeCosArr(row):
'''
Explodes position, altitude, time array into its own row
Cos[] is as an array of with the following repeating schema:
[latitude, longitude, time, altitude, latitude, longitude, time, altitude... ]
Uses modulo operator to determine element type and create a new row
:returns: One or more rows for every four elements of Cos array
'''
output = []
latLongArr = row[3]
lat = lon = time = alt = 0
for i in range(len(latLongArr)):
if i % 4 == 0:
if (i != 0):
output.append( (row[0], row[1], row[2], alt, lat, lon, time, row[8], row[9], row[10], row[11], row[12] ))
lat = None
if latLongArr[i]:
lat = latLongArr[i]
if i % 4 == 1:
lon = None
if latLongArr[i]:
lon = latLongArr[i]
if i % 4 == 2:
time = None
if latLongArr[i]:
#remove milliseconds from timestamp
time = int(latLongArr[i] / 1000)
if i % 4 == 3:
alt = None
if latLongArr[i]:
alt = int(latLongArr[i])
output.append( (row[0], row[1], row[2], alt, lat, lon, time, row[8], row[9], row[10], row[11], row[12] ))
return output
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: main <YYYY-MM-DD>", file=sys.stderr)
sys.exit(-1)
spark = SparkSession\
.builder\
.appName("CloseCalls with join math")\
.getOrCreate()
#register UDF
udfCalcDistance = PY.udf(calcDistance, DoubleType())
try:
date = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d")
except ValueError:
print("Error parsing date: " + date)
sys.exit()
startDate = date;
endDate = date + datetime.timedelta(days=1)
minute = datetime.timedelta(minutes = 1)
while startDate < endDate:
url = "s3a://radar-data/" + startDate.strftime("%Y-%m-%d/%Y-%m-%d-%H%M") + "Z.json"
print(" !***** loading: " + url)
#read main files
try:
df = spark.read.json(url, multiLine=True).select('acList')
except Exception:
print (" !~***** broken file:" + url)
continue
startDate += minute
#load dataframe of airport elevations
airportAltDf = spark.read.json('s3a://radar-data/airportsLatLon.json')
df = df.select(PY.explode("acList").alias("tmp")).select("tmp.*")\
.select("Icao", "Reg", "Call", "Cos", "Alt", "Lat", "Long", "PosTime", "Trak", "Spd", "From", "To", "Type") \
.dropna("any", None, ["Icao", "Reg", "Alt", "Lat", "Long", "PosTime", "Cos"])\
#expand Cos position into rows
expandedMap = df.rdd.repartition(64).flatMap(explodeCosArr);
#turn RDD back into DF; remove duplicated timestamps; remove null values
explodedDf = spark.createDataFrame(expandedMap, df.drop("Cos").schema)\
.dropDuplicates(["Icao", "PosTime"])\
.dropna("any", None, ["Icao", "Reg", "Alt", "Lat", "Long", "PosTime"])\
.filter(PY.col("Alt") > 500) #filter out planes below 500ft
#self-join to detect planes within 0.01 degrees and 1000 ft
d1 = explodedDf.alias("d1")
d2 = explodedDf.toDF("_Icao", "_Reg", "_Call", "_Alt", "_Lat", "_Long", "_PosTime", "_Trak", "_Spd", "_From", "_To", "_Type" )
joined_df = d1.join(d2,
((d1.PosTime == d2._PosTime )\
& (PY.abs(d1.Lat - d2._Lat) <= .01)\
& (PY.abs(d1.Long - d2._Long) <= .01)\
& (PY.abs(d1.Alt - d2._Alt) < 1000)\
& (d1.Lat < d2._Lat)\
& (d1.Icao != d2._Icao)), 'inner')
#filter to get close calls within 1km
joined_df = joined_df\
.withColumn("Alt_Diff", (PY.abs(PY.col('Alt') - PY.col('_Alt'))))\
.withColumn("Distance", udfCalcDistance( PY.col('Lat'), PY.col('Long'), PY.col('_Lat'), PY.col('_Long') ) )\
.filter(PY.col("Distance") < 1)
#self-join to detect when close call is .1 deg (~11km) away from airport and near ground
closeAirportDf = joined_df.join(airportAltDf,
((PY.abs(joined_df.Lat - airportAltDf.lat) <= .1)\
& (PY.abs(joined_df.Long - airportAltDf.lon) <= .1)\
& (joined_df.Alt - airportAltDf.elevation <= 2000)), 'inner')
#left anti join to remove planes at airport
joined_df = joined_df.join(closeAirportDf,
((joined_df.Icao == closeAirportDf.Icao)\
&(joined_df._Icao == closeAirportDf._Icao)\
&(joined_df.PosTime == closeAirportDf.PosTime)), 'leftanti')
insertSql(joined_df)
spark.stop()
|
996,333 | 0ac74c8b583b942f0ba299b44c9e47e28b710da7 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 13:16:31 2020
@author: jakem
"""
from monodisperse_box_xform import Monodisperse2
import numpy as np
import matplotlib.pyplot as plt
transform_list = np.arange(1,0,-.05)
transformsize = 1-transform_list
transform_size = []
transform_size.append(transformsize)
# af_dev_1 = []
# af_dev_2 = []
# af_dev_3 = []
# af_dev_4 = []
# af_dev_5 = []
# test_kicks = [0.001, 0.01, 0.05, 0.1, 0.2]
# for kick in test_kicks:
# print(kick)
# for xform in np.arange(1,.05,-.1):
# N = 1000
# Bx = 40
# By = 40
# seed = 115
# m = Monodisperse2(N,Bx,By,seed)
# area_frac = 0.5
# swell = m.equiv_swell(area_frac)
# cycle_number = 200
# m.train_xform(xform, 1, area_frac, kick, cycle_number, noise=0)
# area_frac_array = np.array(np.linspace(0,1,100))
# memory = m.detect_memory(0, 1, .01)
# af_deviation = memory - area_frac
# if test_kicks.index(kick) == 0:
# af_dev_1.append(af_deviation)
# elif test_kicks.index(kick) == 1:
# af_dev_1.append(af_deviation)
# elif test_kicks.index(kick) == 2:
# af_dev_1.append(af_deviation)
# elif test_kicks.index(kick) == 3:
# af_dev_1.append(af_deviation)
# elif test_kicks.index(kick) == 4:
# af_dev_5.append(af_deviation)
# else:
# print('index of test_kicks is out of range')
# break
# print(af_dev_1)
# print(af_dev_2)
# print(af_dev_3)
# print(af_dev_4)
# print(af_dev_5)
for xform in np.arange(.14,0,-.02):
print(xform)
af_dev_1 = []
af_dev_2 = []
af_dev_3 = []
af_dev_4 = []
af_dev_5 = []
kick = 0.001 #Vary Kick size
N = 1000
Bx = 40
By = 40
seed = 115
m = Monodisperse2(N,Bx,By,seed)
area_frac = 0.5
swell = m.equiv_swell(area_frac)
cycle_number = 300
m.train_xform(xform, 1, area_frac, kick, cycle_number, noise=0)
area_frac_array = np.array(np.linspace(0,1,100))
memory = m.detect_memory(0, 1, .01)
af_deviation = memory - area_frac
print(af_deviation)
# Kick = 0.001
Ts1 = [0,.05,.1,.15,.2,.25,.3,.35,.4,.45,.5,.55,.6,.65,.75,.85,.9]
af1 = [-.01,-.06,-.1,-.15,-.18,-.22,-.26,-.29,-.32,-.35,-.38,-.4,-.42,-.44,-.47,-.49,-.43]
# Kick = 0.01
Ts2 = [0,.05,.1,.15,.2,.25,.3,.35,.4,.45,.5,.55,.6,.65,.7,.75,.8]
af2 = [0,-.05,-.1,-.14,-.18,-.22,-.26,-.29,-.32,-.35,-.38,-.4,-.42,-.44,-.46,-.47,-.48]
# Kick = 0.02
Ts3 = [0,.05,.1,.15,.2,.25,.3,.35,.45,.5,.55,.6,.65,.7,.75,.8,.85]
af3 = [0,-.05,-.09,-.14,-.18,-.22,-.25,-.29,-.35,-.38,-.4,-.42,-.44,-.46,-.47,-.48,-.49]
# Kick = 0.05
Ts4 = [0,.05,.1,.15,.2,.25,.35,.45,.5,.55,.6,.65,.7,.75,.8,.85,.9]
af4 = [0,-.04,-.09,-.13,-.17,-.21,-.19,-.24,-.37,-.4,-.42,-.44,-.46,-.47,-.48,-.49,-.49]
# Kick = 0.1
Ts5 = [0,.1,.15,.25,.3,.35,.4,.47,.5,.53,.6,.65,.7,.75,.85,.9]
af5 = [0,-.07,-.12,-.14,-.08,-.11,-.07,-.35,-.13,-.38,-.41,-.43,-.45,-.38,-.46,-.49]
# fig = plt.figure()
# ax1 = fig.add_subplot(111)
# ax1.plot(Ts1,af2)
# ax1.plot(Ts2,af2)
# ax1.plot(Ts3,af3)
# ax1.plot(Ts4,af4)
# ax1.plot(Ts5,af5)
|
996,334 | 27c85ee9f6dfd42c5c3b41f4e9b851bd8c682863 | from setuptools import setup, find_packages
setup(
name='ODEmethods',
version='0.1',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='Python package that includes numerical methods for solving ordinary differential equations (initial value problems).',
long_description=open('README.md').read(),
install_requires=['numpy'],
url='https://github.com/KSpenko/ODEmethods',
author='Krištof Špenko',
author_email='kristof.spenko.scrm@gmail.com'
) |
996,335 | 80edb9365920241ea9748f93077f4ef4ddc66086 | import numpy as np
W = np.random.rand(2, 1) #2X1 행렬
b = np.random.rand(1)
print("W = ", W, ",W.shape = ", W.shape, ", b =", b, ",b.shape", b.shape)
x_data = np.array([[2,4],[4,11],[6,6],[8,5],[10,7],[12,16],[14,8],[16,3],[18,7]]) #9X2
t_data = np.array([0,0,0,0,1,1,1,1,1]).reshape(9,1) #9X1
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def loss_func(x, t):
delta = 1e-7
z = np.dot(x, W) + b
y = sigmoid(z)
return -np.sum(t * np.log(y + delta) + (1 - t) * np.log(1 - y + delta))
# 작은 값을 더해주지 않으면 y가 0이나 1일 경우 아주 작은 값을 더해주어야 한다.
# 수치해석에서 로그를 구현하는 일반적인 부분
def numerical_derivative(f, x):
delta_x = 1e-4
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + delta_x
fx1 = f(x) # f(x+delta_x)
x[idx] = tmp_val - delta_x
fx2 = f(x) # f(x-delta_x)
grad[idx] = (fx1 - fx2) / (2 * delta_x)
x[idx] = tmp_val
it.iternext()
return grad
def error_val(x, t):
delta = 1e-7 # log 무한대 발생 방지
z = np.dot(x, W) + b
y = sigmoid(z)
# cross-entropy
return -np.sum(t * np.log(y + delta) + (1 - t) * np.log((1 - y) * delta))
def predict(x):
z = np.dot(x, W) + b
y = sigmoid(z)
if y > 0.5:
result = 1 # true
else:
result = 0 # false
return y, result
learning_rate = 1e-2 # 발산하는 경우, 1e-3 ~ 1e-6 등으로 바꾸어 실행
f = lambda x: loss_func(x_data, t_data) # f(x) = loss_func(x_data, t_data)
print("Initial error value = ", error_val(x_data, t_data), "Initial W =", W, "\n", ",b =", b)
for step in range(10001):
W -= learning_rate * numerical_derivative(f, W)
b -= learning_rate * numerical_derivative(f, b)
if (step % 400 == 0):
print("step = ", step, "error value = ", error_val(x_data, t_data), "W = ", W, ",b = ", b)
test_data = np.array([3, 17])
print(predict(test_data))
test_data = np.array([7, 21])
print(predict(test_data)) |
996,336 | 5c7ac24eff3071fd271147cc68dd5f240eb1607f | #!/usr/bin/env python
#-*- coding: utf-8 -*-
def search(low, high, arr, target):
print(low, high)
mid = (low + high)//2
if (arr[mid]==target):
return mid
if low == high:
return None
if(arr[mid] > target):
return search(low, mid-1, arr, target)
if(arr[mid]< target):
return search(mid+1, high, arr, target)
if __name__ == '__main__':
arr = []
for n in range(100000):
arr.append(n)
result = search(0, len(arr)-1, arr, 50)
print(result)
|
996,337 | 4419dc9ae8db9d97c95724fbe1af0cbdb345356e | import unittest
from unittest import mock
from user import login_user, register_user
from . import mocks
class TestTodoListCreate(unittest.TestCase):
@mock.patch("requests.post", return_value=mocks.MockNoContentResponse)
def test_register_user(self, mock_res):
response = register_user("sample@email.com",
"samplepassword",
"Some name",
"Some last name")
self.assertEqual(response, "User registered.")
@mock.patch("requests.post", return_value=mocks.MockValidationErrorResponse)
def test_register_user_invalid_response(self, mock_res):
response = register_user("sample@email.com",
"samplepassword",
"Some name",
"Some last name")
self.assertEqual(response, {'message': 'validation_error'})
|
996,338 | f1b737c0642ff3451d5ff41b81ab673e18aebce2 | from django.urls import path
from .views import *
urlpatterns=[
path('create_axes/',create_axes),
path('create_tache/',create_tache),
path('mon_stage/',my_stage),
path('my_progress/',getprogress),
path('my_progress/<int:id>',validate_tache),
path('update_tache/<int:id>',update_tache),
path('delete_tache/<int:id>',delete_tache),
path('update_axe/<int:id>',update_axe),
path('delete_axe/<int:id>',delete_axe),
path('rapport/',get_rapport),
] |
996,339 | c04062a81d68d87199a965c6070733e57a23c294 | import re
from stringfuzz.scanner import scan
from stringfuzz.ast import *
from stringfuzz.util import join_terms_with
__all__ = [
'parse',
'parse_file',
'parse_tokens',
'ParsingError',
]
# constants
MAX_ERROR_SIZE = 200
UNDERLINE = '-'
MESSAGE_FORMAT = '''Parsing error on line {number}:
{context}{actual_value}
{underline}^
{filler}expected {expected}, got {actual_type} {actual_value!r}'''
# data structures
class Stream(object):
def __init__(self, tokens, text):
self.text = text
self.current_token = None
self.stream = (t for t in tokens)
def advance(self):
self.current_token = next(self.stream, None)
def accept(self, name):
if self.current_token is not None and self.current_token.name == name:
self.advance()
return True
return False
def peek(self):
return self.current_token
def expect(self, expected):
previous = self.current_token
if self.accept(expected):
return previous
raise ParsingError(expected, self)
class ParsingError(IndexError):
def __init__(self, expected, stream):
# compute actual value
actual_token = stream.current_token
if actual_token is not None:
actual_type = actual_token.name
actual_value = actual_token.value
error_index = actual_token.position
else:
actual_type = 'nothing'
actual_value = ''
error_index = len(stream.text) - 1
# get error context
parsed_text = stream.text[0:error_index]
context = parsed_text[-MAX_ERROR_SIZE:]
if len(context) < len(parsed_text):
context = '... ' + context
# find row and column of error
try:
latest_newline_index = parsed_text.rindex('\n')
except ValueError as e:
latest_newline_index = 0
error_row = parsed_text.count('\n') + 1
error_column = error_index - latest_newline_index - 1
# compose message
message = MESSAGE_FORMAT.format(
number = error_row,
context = context,
underline = (UNDERLINE * error_column),
filler = (' ' * error_column),
expected = expected,
actual_type = actual_type,
actual_value = actual_value,
)
# pass message to superclass
super().__init__(message)
# parsers
def accept_arg(s):
token = s.peek()
# nested expression
if s.accept('LPAREN'):
expression = expect_expression(s)
s.expect('RPAREN')
return expression
# literal
if s.accept('BOOL_LIT'):
if token.value == 'true':
return BoolLitNode(True)
elif token.value == 'false':
return BoolLitNode(False)
if s.accept('INT_LIT'):
return IntLitNode(int(token.value))
if s.accept('STRING_LIT'):
return StringLitNode(token.value)
# others
if s.accept('RE_ALLCHAR'):
return ReAllCharNode()
if s.accept('IDENTIFIER'):
return IdentifierNode(token.value)
if s.accept('SETTING'):
return SettingNode(token.value)
return None
def accept_meta_arg(s):
arg = s.peek()
if (
s.accept('BOOL_LIT') or
s.accept('INT_LIT') or
s.accept('STRING_LIT') or
s.accept('IDENTIFIER')
):
return MetaDataNode(arg.value)
if s.accept('SETTING'):
return SettingNode(arg.value)
return None
def expect_identifier(s):
token = s.expect('IDENTIFIER')
return IdentifierNode(token.value)
def expect_arg(s):
result = accept_arg(s)
if result is None:
raise ParsingError('an argument', s)
return result
def expect_sort(s):
result = accept_sort(s)
if result is None:
raise ParsingError('a sort', s)
return result
def repeat_star(s, getter):
terms = []
while True:
term = getter(s)
# break on no term
if term is None:
break
terms.append(term)
return terms
def accept_sort(s):
# compound sort
if s.accept('LPAREN'):
symbol = expect_identifier(s)
sorts = [expect_sort(s)]
sorts += repeat_star(s, accept_sort)
s.expect('RPAREN')
return CompoundSortNode(symbol, sorts)
# atomic sort
token = s.peek()
if s.accept('IDENTIFIER'):
return AtomicSortNode(token.value)
return None
def accept_sorted_var(s):
if s.accept('LPAREN'):
name = expect_identifier(s)
sort = expect_sort(s)
s.expect('RPAREN')
return SortedVarNode(name, sort)
return None
def expect_expression(s):
if s.accept('ASSERT'):
assertion = expect_arg(s)
return AssertNode(assertion)
# declarations and definitions
if s.accept('DECLARE_FUN'):
name = expect_identifier(s)
s.expect('LPAREN')
signature = repeat_star(s, accept_sort)
s.expect('RPAREN')
return_sort = expect_sort(s)
return FunctionDeclarationNode(name, BracketsNode(signature), return_sort)
if s.accept('DEFINE_FUN'):
name = expect_identifier(s)
s.expect('LPAREN')
signature = repeat_star(s, accept_sorted_var)
s.expect('RPAREN')
return_sort = expect_sort(s)
s.expect('LPAREN')
body = expect_expression(s)
s.expect('RPAREN')
return FunctionDefinitionNode(name, BracketsNode(signature), return_sort, body)
if s.accept('DECLARE_CONST'):
name = expect_identifier(s)
return_sort = expect_sort(s)
return ConstantDeclarationNode(name, return_sort)
# special expression cases
if s.accept('CONCAT'):
# first two args are mandatory
a = expect_arg(s)
b = expect_arg(s)
# more args are optional
other_args = repeat_star(s, accept_arg)
# re-format n-ary concats into binary concats
concat = join_terms_with([a, b] + other_args, ConcatNode)
return concat
if s.accept('CONTAINS'):
a = expect_arg(s)
b = expect_arg(s)
return ContainsNode(a, b)
if s.accept('AT'):
a = expect_arg(s)
b = expect_arg(s)
return AtNode(a, b)
if s.accept('LENGTH'):
a = expect_arg(s)
return LengthNode(a)
if s.accept('INDEXOFVAR'):
# two arguments are expected
a = expect_arg(s)
b = expect_arg(s)
# the third argument may or may not be there
c = accept_arg(s)
if c is not None:
return IndexOf2Node(a, b, c)
return IndexOfNode(a, b)
if s.accept('INDEXOF'):
a = expect_arg(s)
b = expect_arg(s)
return IndexOfNode(a, b)
if s.accept('INDEXOF2'):
a = expect_arg(s)
b = expect_arg(s)
c = expect_arg(s)
return IndexOf2Node(a, b, c)
if s.accept('PREFIXOF'):
a = expect_arg(s)
b = expect_arg(s)
return PrefixOfNode(a, b)
if s.accept('SUFFIXOF'):
a = expect_arg(s)
b = expect_arg(s)
return SuffixOfNode(a, b)
if s.accept('REPLACE'):
a = expect_arg(s)
b = expect_arg(s)
c = expect_arg(s)
return StringReplaceNode(a, b, c)
if s.accept('SUBSTRING'):
a = expect_arg(s)
b = expect_arg(s)
c = expect_arg(s)
return SubstringNode(a, b, c)
if s.accept('FROM_INT'):
a = expect_arg(s)
return FromIntNode(a)
if s.accept('TO_INT'):
a = expect_arg(s)
return ToIntNode(a)
if s.accept('IN_RE'):
a = expect_arg(s)
b = expect_arg(s)
return InReNode(a, b)
if s.accept('STR_TO_RE'):
a = expect_arg(s)
return StrToReNode(a)
if s.accept('RE_CONCAT'):
# first two args are mandatory
a = expect_arg(s)
b = expect_arg(s)
# more args are optional
other_args = repeat_star(s, accept_arg)
# re-format n-ary concats into binary concats
concat = join_terms_with([a, b] + other_args, ReConcatNode)
return concat
if s.accept('RE_STAR'):
a = expect_arg(s)
return ReStarNode(a)
if s.accept('RE_PLUS'):
a = expect_arg(s)
return RePlusNode(a)
if s.accept('RE_RANGE'):
a = expect_arg(s)
b = expect_arg(s)
return ReRangeNode(a, b)
if s.accept('RE_UNION'):
# first two args are mandatory
a = expect_arg(s)
b = expect_arg(s)
# more args are optional
other_args = repeat_star(s, accept_arg)
# re-format n-ary unions into binary unions
union = join_terms_with([a, b] + other_args, ReUnionNode)
return union
if s.accept('RE_INTER'):
# first two args are mandatory
a = expect_arg(s)
b = expect_arg(s)
# more args are optional
other_args = repeat_star(s, accept_arg)
# re-format n-ary intersections into binary intersections
inter = join_terms_with([a, b] + other_args, ReInterNode)
return inter
token = s.peek()
if s.accept('META_COMMAND'):
body = repeat_star(s, accept_meta_arg)
return MetaCommandNode(token.value, *body)
# generic expression case
name = expect_identifier(s)
body = repeat_star(s, accept_arg)
return GenericExpressionNode(name, *body)
def get_expressions(s):
expressions = []
s.advance()
while s.peek() is not None:
s.expect('LPAREN')
expressions.append(expect_expression(s))
s.expect('RPAREN')
return expressions
# public API
def parse_file(path, language):
with open(path, 'r') as file:
return parse(file.read(), language)
def parse(text, language):
return parse_tokens(scan(text, language), language, text)
def parse_tokens(tokens, language, text):
return get_expressions(Stream(tokens, text))
|
996,340 | dd103a2e12054503123045706663314d08d0b956 | #!/usr/bin/python
T=input()
I=0
while T>0:
T-=1
I+=1
n=input()
candy = map(long,raw_input().split())
candy.sort()
sum = 0
s=0
for i in candy:
sum^=i
s+=i
if sum!=0:
print 'Case #%d: NO' % I
else:
print 'Case #%d: %d' % (I,s-candy[0])
|
996,341 | 01dc8f0e5c123cd9018adbb5e43cfb313e3f2f9d | input_first = int(input())
inputs_after_first = []
for i in range(0, input_first):
inputs_after_first.append(int(input()))
total = 0
for number in inputs_after_first:
total += number
print(total)
|
996,342 | cdf4f982d62ee63e3e061eff790b16b55984f08c | a = 5.5
b = 5.5
print(a == b)
print(a is b)
# both the values stored in same address
s1 = "JENI"
s2 = "JENI"
print(s1 == s2)
print(s1 is s2)
#both are pointed in same address
|
996,343 | 442961d2070df1a32b8de898fd5b3caa0166d9c4 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
def dfs(root, sum, path, ret):
if not root:
return
path.append(root.val)
if not root.left and not root.right:
if root.val == sum:
ret.append(list(path))
path.pop(-1)
return
dfs(root.left, sum - root.val, path, ret)
dfs(root.right, sum - root.val, path, ret)
path.pop(-1)
path, ret = [], []
dfs(root, sum, path, ret)
return ret
|
996,344 | 0b4d181a82bdfbad71670290444900070c003af8 |
import argparse
parser = argparse.ArgumentParser(description='Train CatDog Model.')
parser.add_argument('modelpath', metavar='PATH', type=str,
help='path to saved h5 model.')
parser.add_argument('imagepath', metavar='PATH', type=str,
help='path to saved image to test.')
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras import optimizers
from keras.models import Sequential, Model, load_model
from keras.layers import Dropout, Flatten, Dense, Input
import numpy as np
classes = ['cat', 'dog']
def test(modelpath: str, imagepath: str) -> str:
# dimensions of our images.
img_width, img_height = 150, 150
model: Model = load_model(modelpath)
image = load_img(imagepath, target_size=(img_height, img_width))
image = np.expand_dims(img_to_array(image), axis=0)
return 'cat' if model.predict(image) < 0.5 else 'dog'
if __name__ == '__main__':
args = parser.parse_args()
prediction = test(args.modelpath, args.imagepath)
print(f'That\'s a {prediction}!')
|
996,345 | c9094676d71491d916b51175c368a27754374bc2 | preco = input ( "Informe um preço:" )
posicao = 0
posicao_da_virgula = - 1
enquanto posicao < len ( preco ):
if preco [ posicao ] == ',' :
posicao_da_virgula = posicao
posicao = posicao + 1
se posicao_da_virgula == - 1 :
imprimir ( "O valor redes" , preco , "reais" )
mais :
print ( "O valor prestado foi de" , preco [: posicao_da_virgula ], "reais" )
print ( "com" , preco [ posicao_da_virgula + 1 :], "centavos" )
|
996,346 | 73f3fde5eefd90c6effb90847be747a0382e47e9 | import unittest
import arff
ARFF = '''%
% DESCRIPTION HERE
%
@RELATION weather
@ATTRIBUTE outlook {sunny, overcast, rainy}
@ATTRIBUTE temperature REAL
@ATTRIBUTE humidity REAL
@ATTRIBUTE windy {TRUE, FALSE}
@ATTRIBUTE play {yes, no}
@DATA
sunny,85.0,85.0,FALSE,no
sunny,80.0,90.0,TRUE,no
overcast,83.0,86.0,FALSE,yes
rainy,70.0,96.0,FALSE,yes
rainy,68.0,80.0,FALSE,yes
rainy,65.0,70.0,TRUE,no
overcast,64.0,65.0,TRUE,yes
sunny,72.0,95.0,FALSE,no
sunny,69.0,70.0,FALSE,yes
rainy,75.0,80.0,FALSE,yes
sunny,75.0,70.0,TRUE,yes
overcast,72.0,90.0,TRUE,yes
overcast,81.0,75.0,FALSE,yes
rainy,71.0,91.0,TRUE,no
'''
OBJ = {
u'description':u'\nDESCRIPTION HERE\n',
u'relation': u'weather',
u'attributes': [
(u'outlook', [u'sunny', u'overcast', u'rainy']),
(u'temperature', 'REAL'),
(u'humidity', 'REAL'),
(u'windy', [u'TRUE', u'FALSE']),
(u'play', [u'yes', u'no'])
],
u'data': [
[u'sunny', 85.0, 85.0, u'FALSE', u'no'],
[u'sunny', 80.0, 90.0, u'TRUE', u'no'],
[u'overcast', 83.0, 86.0, u'FALSE', u'yes'],
[u'rainy', 70.0, 96.0, u'FALSE', u'yes'],
[u'rainy', 68.0, 80.0, u'FALSE', u'yes'],
[u'rainy', 65.0, 70.0, u'TRUE', u'no'],
[u'overcast', 64.0, 65.0, u'TRUE', u'yes'],
[u'sunny', 72.0, 95.0, u'FALSE', u'no'],
[u'sunny', 69.0, 70.0, u'FALSE', u'yes'],
[u'rainy', 75.0, 80.0, u'FALSE', u'yes'],
[u'sunny', 75.0, 70.0, u'TRUE', u'yes'],
[u'overcast', 72.0, 90.0, u'TRUE', u'yes'],
[u'overcast', 81.0, 75.0, u'FALSE', u'yes'],
[u'rainy', 71.0, 91.0, u'TRUE', u'no']
]
}
class TestEncodeComment(unittest.TestCase):
def get_encoder(self):
encoder = arff.ArffEncoder()
return encoder
def test_encode(self):
encoder = self.get_encoder()
result = encoder.encode(OBJ)
expected = ARFF
self.assertEqual(result, expected)
def test_iter_encode(self):
encoder = self.get_encoder()
result = encoder.iter_encode(OBJ)
expected = ARFF.split('\n')
for r, e in zip(result, expected):
self.assertEqual(r, e)
def test_invalid_object(self):
encoder = self.get_encoder()
fixture = {'attributes':[('name','REAL')], 'data':[[1]]}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
fixture = {'relation':'name', 'data':[[1]]}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
def test_invalid_object_attribute(self):
encoder = self.get_encoder()
fixture = {
'relation':'name',
'attributes':[4],
'data':[[1]]
}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
fixture = {
'relation':'name',
'attributes':[(2, 'REAL')],
'data':[[1]]
}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
fixture = {
'relation':'name',
'attributes':[('NAME', 'REAL', 'MORE')],
'data':[[1]]
}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
fixture = {
'relation':'name',
'attributes':[('NAME', 3)],
'data':[[1]]
}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
def test_attribute_invalid_attribute_type(self):
encoder = self.get_encoder()
fixture = {
'relation':'name',
'attributes':[('name','INVALID')],
'data':[[1]]
}
self.assertRaises(
arff.BadObject,
encoder.encode,
fixture
)
def test_encode_duplicate_attribute_name(self):
encoder = self.get_encoder()
fixture = {
'relation': 'name',
'attributes': [('name', 'INTEGER'), ('name', 'INTEGER')],
'data': [[0], [1]],
}
with self.assertRaisesRegexp(arff.BadObject,
'Trying to use attribute name "name" '
'for the second time.'):
encoder.encode(fixture)
def test_encode_string(self):
encoder = self.get_encoder()
fixture = """@RELATION bla
@ATTRIBUTE attr STRING
@DATA
'a,b,c'
'a,b,c '
'a\\\\c'
'a\\"c'
'a\\'c'
'a\\nc'
'a\\tc'
'a\\000c'
'a\\017c'
"""
my_arff = {
"attributes": [["attr", "STRING"]],
"data": [["a,b,c"],
["a,b,c "],
["a\\c"],
["a\"c"],
["a'c"],
["a\nc"],
["a\tc"],
["a\0c"],
["a\017c"],
],
"relation": "bla"}
self.assertEqual(encoder.encode(my_arff), fixture)
def test_encode_adding_quotes_with_spaces(self):
# regression tests for https://github.com/renatopp/liac-arff/issues/87
encoder = self.get_encoder()
# \u3000 corresponds to an ideographic space. It should be treated as
# a space.
fixture = {
'relation': 'name',
'attributes': [('A', 'STRING'), ('B', 'STRING')],
'data': [[u'a', u'b'], [u'b\u3000e', u'a']],
}
expected_data = u"""@RELATION name
@ATTRIBUTE A STRING
@ATTRIBUTE B STRING
@DATA
a,b
'b\u3000e',a
"""
arff_data = encoder.encode(fixture)
self.assertEqual(arff_data, expected_data)
decoder = arff.ArffDecoder()
arff_object = decoder.decode(arff_data)
self.assertEqual(arff_object['data'], fixture['data'])
|
996,347 | 4d2b8851ff8537fe3d9f8f2002230ce4ba508a7a |
class Input_controller():
def __init__(self, player_conf):
# lee el archivo y setea las configuraciones en el diccionario
dicc = {}
self._set_keys(dicc)
def _set_keys(self, dicc):
pass |
996,348 | d7feb0e978069e70c0a2af34a736a7053b630cae | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import mock
import pytest
from google.api.core import exceptions
from google.api.core import retry
def test_if_exception_type():
predicate = retry.if_exception_type(ValueError)
assert predicate(ValueError())
assert not predicate(TypeError())
def test_if_exception_type_multiple():
predicate = retry.if_exception_type(ValueError, TypeError)
assert predicate(ValueError())
assert predicate(TypeError())
assert not predicate(RuntimeError())
def test_if_transient_error():
assert retry.if_transient_error(exceptions.InternalServerError(''))
assert retry.if_transient_error(exceptions.TooManyRequests(''))
assert not retry.if_transient_error(exceptions.InvalidArgument(''))
def test_exponential_sleep_generator_base_2():
gen = retry.exponential_sleep_generator(
1, 60, 2, jitter=0.0)
result = list(itertools.islice(gen, 8))
assert result == [1, 2, 4, 8, 16, 32, 60, 60]
@mock.patch('random.uniform')
def test_exponential_sleep_generator_jitter(uniform):
uniform.return_value = 1
gen = retry.exponential_sleep_generator(
1, 60, 2, jitter=2.2)
result = list(itertools.islice(gen, 7))
assert result == [1, 3, 7, 15, 31, 60, 60]
uniform.assert_called_with(0.0, 2.2)
@mock.patch('time.sleep')
@mock.patch(
'google.api.core.helpers.datetime_helpers.utcnow',
return_value=datetime.datetime.min)
def test_retry_target_success(utcnow, sleep):
predicate = retry.if_exception_type(ValueError)
call_count = [0]
def target():
call_count[0] += 1
if call_count[0] < 3:
raise ValueError()
return 42
result = retry.retry_target(target, predicate, range(10), None)
assert result == 42
assert call_count[0] == 3
sleep.assert_has_calls([mock.call(0), mock.call(1)])
@mock.patch('time.sleep')
@mock.patch(
'google.api.core.helpers.datetime_helpers.utcnow',
return_value=datetime.datetime.min)
def test_retry_target_non_retryable_error(utcnow, sleep):
predicate = retry.if_exception_type(ValueError)
exception = TypeError()
target = mock.Mock(side_effect=exception)
with pytest.raises(TypeError) as exc_info:
retry.retry_target(target, predicate, range(10), None)
assert exc_info.value == exception
sleep.assert_not_called()
@mock.patch('time.sleep')
@mock.patch(
'google.api.core.helpers.datetime_helpers.utcnow')
def test_retry_target_deadline_exceeded(utcnow, sleep):
predicate = retry.if_exception_type(ValueError)
exception = ValueError('meep')
target = mock.Mock(side_effect=exception)
# Setup the timeline so that the first call takes 5 seconds but the second
# call takes 6, which puts the retry over the deadline.
utcnow.side_effect = [
# The first call to utcnow establishes the start of the timeline.
datetime.datetime.min,
datetime.datetime.min + datetime.timedelta(seconds=5),
datetime.datetime.min + datetime.timedelta(seconds=11)]
with pytest.raises(exceptions.RetryError) as exc_info:
retry.retry_target(target, predicate, range(10), deadline=10)
assert exc_info.value.cause == exception
assert exc_info.match('Deadline of 10.0s exceeded')
assert exc_info.match('last exception: meep')
assert target.call_count == 2
def test_retry_target_bad_sleep_generator():
with pytest.raises(ValueError, match='Sleep generator'):
retry.retry_target(
mock.sentinel.target, mock.sentinel.predicate, [], None)
|
996,349 | ea76693ca7db37e3052003f3c9b2f11099f81beb | from CoSource import *
from CoGameM import *
# Render Work here, for local player only
class CoRender:
def __init__(self, screen):
# tolerate range for objects displayed in sight
self.tolerateWH = TOLERANCE_RANGE
self.screen = screen
self.screenWH = (screen.get_width(),
screen.get_height())
self.display_range = (float(self.tolerateWH[0]+self.screenWH[0]/2)/PPM,
float(self.tolerateWH[1]+self.screenWH[1]/2)/PPM)
# choose game objects and display them if insight
self.cur_render_origin = (0,0)
# encode_data format (pos, encode_data)
# obj format (pos, img, (ix, iy, w, h), angle, front_back), pos is the center of the image
def renderProcessor(self, objmultigroups):
self.screen.fill((0, 0, 0))
for objgroups in objmultigroups:
for obj in objgroups:
# if self.isInSight((obj[0][0]*PPM, obj[0][1]*PPM)):
# decode and restore the encode_data
decode_data = decodeRenderInfo(obj[1])
# drange = (self.shift_len*self.image_idx, 0, self.shift_len, IMAGES[self.myimage].get_height())
drange = (ENTITY_SHIFT[decode_data[0]]*decode_data[1], 0, ENTITY_SHIFT[decode_data[0]], IMAGES[decode_data[0]].get_height())
tmp_image = IMAGES[decode_data[0]].subsurface(drange)
if decode_data[3]: # flip the image if marked
tmp_image = pygame.transform.flip(tmp_image, True, False)
# be careful, pygame take ccw as default
tmp_image = pygame.transform.rotate(tmp_image, 0.0-decode_data[2])
self.renderBlit(tmp_image, (self.screenWH[0]/2-
(self.cur_render_origin[0]
-obj[0][0])*PPM-tmp_image.get_width()/2,
self.screenWH[1]/2+(self.cur_render_origin[1]-obj[0][1])*PPM
-tmp_image.get_height()/2))
# self.screen.blit(tmp, (obj[0][0]*PPM-tmp.get_width()/2, self.screenWH[1]-(obj[0][1]*PPM+tmp.get_height()/2)))
def renderBlit(self, image, pos):
self.screen.blit(image, pos)
def renderDisplay(self):
pygame.display.flip()
def updateRenderOrigin(self, view_center): # in meters, the center of viewpos in phyworld
if view_center:
self.cur_render_origin = view_center
def isInSight(self, entity_pos, view_center): # all in meters
dx, dy = math.fabs(entity_pos[0]-view_center[0]), math.fabs(entity_pos[1]-view_center[1])
return dx <= self.display_range[0] and dy <= self.display_range[1]
# pos should the position of the screen-center
# def updateRenderOrigin(self, player_pos, mouse_pos):
# self.cur_render_origin = (VIEW_SCALA*(mouse_pos[0]
# -self.screenWH[0]/2)+PPM*player_pos[0],
# VIEW_SCALA*(self.screenWH[1]/2
# -mouse_pos[1])+PPM*player_pos[1])
# def isInSight(self, pos): # all in pixels, pos is the center of the image
# dx, dy = math.fabs(pos[0]-self.cur_render_origin[0]), math.fabs(pos[1]-self.cur_render_origin[1])
# return dx <= self.display_range[0] and dy <= self.display_range[1]
def quit(self):
pass |
996,350 | d45dbcb7e1b1f5d22acf825615e9f9af77e821e7 | import random
import time
from collections import namedtuple
from threading import Thread, Event
from typing import List
import numpy as np
from pyspark import SparkContext
try:
import CoresetKSeg
import ksegment
from stack import Stack
import utils_seg
except ImportError:
from k_segment_coreset import ksegment
from k_segment_coreset import CoresetKSeg
from k_segment_coreset.stack import Stack
from k_segment_coreset import utils_seg
StackItem = namedtuple("StackItem", "coreset level")
WeightedPointSet = namedtuple("WeightedPointSet", "points weights")
class CoresetStreamer(Thread):
def __init__(
self, coreset_alg, sample_size: int, eps: float, k: int, streaming_context
):
super().__init__(name="coreset stream thread")
self.coreset_alg = coreset_alg
self.sample_size = sample_size
self.last_leaf = []
self.eps = eps
self.k = k
self.streaming_context = streaming_context
self._stop_event = Event()
self.stack = Stack()
self.leaf_size = sample_size
def _add_leaf(self, points, weights):
points = CoresetKSeg.CoresetKSeg.compute_coreset(
data=points, k=self.k, eps=self.eps
)
if weights is None:
# weights = np.ones((points.shape[0])).ravel()
weights = np.ones(len(points)).ravel()
self._insert_into_tree(WeightedPointSet(points, weights))
def _merge(self, pset1: WeightedPointSet, pset2: WeightedPointSet):
try:
if type(pset1.points) is np.ndarray:
points = np.vstack([pset1.points, pset2.points])
else:
points = pset1.points + pset2.points
except ValueError as e:
print(e)
raise e
weights = np.hstack([pset1.weights, pset2.weights])
# points = pset1.points + pset2.points
# weights = pset1.weights + pset2.weights
cset = self.coreset_alg(k=self.k, eps=self.eps, weights=weights)
coreset, weights = cset.compute(data_points=points)
return WeightedPointSet(coreset, weights)
def _is_correct_level(self, level):
if self.stack.is_empty():
return True
elif self.stack.top().level > level:
return True
elif self.stack.top().level == level:
return False
else:
raise Exception("New level should be smaller")
def _insert_into_tree(self, coreset):
level = 1
while not self._is_correct_level(level):
last = self.stack.pop()
coreset = self._merge(last.coreset, coreset)
level += 1
self.stack.push(StackItem(coreset, level))
def add_points(self, points):
"""Add a set of points to the stream.
If the set is larger than leaf_size, it is split
into several sets and a coreset is constructed on each set.
"""
# TODO: maybe tay into account leaf_size or maybe external chunk size is enough
# for split in np.array_split(points, self.leaf_size):
self._add_leaf(points, None)
def get_unified_coreset(self):
solution = None
while not self.stack.is_empty():
coreset = self.stack.pop().coreset
if solution is None:
solution = coreset
else:
solution = self._merge(solution, coreset)
return solution.points, solution.weights
def __str__(self):
return "{}".format(self.stack)
def batch(iterable_data, batch_size: int = 10, random_size_chunks: bool = False):
data_len = len(iterable_data)
chunk_start = batch_size
min_batch_size = min(20, batch_size)
max_batch_size = max(50, batch_size)
current_chunk_size = batch_size
while chunk_start < data_len:
if random_size_chunks:
current_chunk_size = random.randint(min_batch_size, max_batch_size)
yield iterable_data[
chunk_start : min(chunk_start + current_chunk_size, data_len)
]
chunk_start += current_chunk_size
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if "log_time" in kw:
name = kw.get("log_name", method.__name__.upper())
kw["log_time"][name] = int((te - ts) * 1000)
else:
print("%r %2.2f ms" % (method.__name__, (te - ts) * 1000))
return result
return timed
@timeit
def get_dividers(points: np.ndarray, col_to_divide_by: int = 0) -> List:
points = np.column_stack(
(np.arange(1, len(points) + 1), points[:, col_to_divide_by])
)
k = 4
eps = 0.3
stream = CoresetStreamer(
CoresetKSeg.CoresetKSeg, sample_size=200, eps=eps, k=k, streaming_context=None
)
for chunk in batch(points, batch_size=70, random_size_chunks=False):
# print(len(chunk))
stream.add_points(chunk)
# print("#"*60, "\n\t", stream)
p_cset, w_cset = stream.get_unified_coreset()
dividers = ksegment.coreset_k_segment(p_cset, k)
return dividers
@timeit
def get_dividers_spark_no_tree(points: np.ndarray, col_to_divide_by: int = 0) -> List:
points = np.column_stack(
(np.arange(1, len(points) + 1), points[:, col_to_divide_by])
)
k = 5
eps = 0.4
chunk_size = 200
aggregated_for_rdd = []
for i in range(0, len(points), chunk_size):
aggregated_for_rdd.append(points[i : i + chunk_size])
sc = SparkContext()
data = sc.parallelize(aggregated_for_rdd)
all_coresets = data.map(
lambda x: CoresetKSeg.CoresetKSeg.compute_coreset(x, k, eps)
).collect()
sc.stop()
tmp = []
for t in all_coresets:
tmp += t
coreset_join = CoresetKSeg.CoresetKSeg.compute_coreset(tmp, k, eps, is_coreset=True)
utils_seg.visualize_2d(points, coreset_join, k, eps, show=True)
dividers = ksegment.coreset_k_segment(coreset_join, k)
return dividers
if __name__ == "__main__":
file_path = "/home/ge/k-segment/datasets/segmentation/KO_no_date.csv"
points = utils_seg.load_csv_into_dataframe(file_path).values
print(get_dividers(points))
print(get_dividers_spark_no_tree(points))
|
996,351 | ab1ebc1253a1710f3bcbcd9d27caf269f313b48d | KEYWORDS = ['class', 'constructor', 'function', 'method', 'field', 'static', 'var', 'int', 'char', 'boolean', 'void',
'true', 'false', 'null', 'this', 'let', 'do', 'if', 'else', 'while', 'return']
SYMBOLS = ['{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=', '~']
STRING_CONST_PATTERN = '"[^\\n"]*?"'
IDENTIFIER_PATTERN = '[a-zA-Z_][a-zA-Z0-9_]*'
# comments removing
INLINE_COMMENT_PATTERN = "//.*\n"
MULTILINE_COMMENT_PATTERN = "/\*\*\n(\s\*.*\n)*\s\*/"
API_COMMENT_PATTERN = "/\*\*.*\*/"
INLINE_COMMENT_REPLACEMENT = "\n"
DEFAULT_REPLACEMENT = ""
COMMENTS_REMOVING = {INLINE_COMMENT_PATTERN: INLINE_COMMENT_REPLACEMENT,
MULTILINE_COMMENT_PATTERN: DEFAULT_REPLACEMENT,
API_COMMENT_PATTERN: DEFAULT_REPLACEMENT}
OPERATORS = ['+', '-', '*', '/', '&', '|', '<', '>', '=']
UNARY_OPERATORS = ['-', '~']
KEYWORD_CONSTANTS = ['true', 'false', 'null', 'this']
|
996,352 | 33547c7589ab8ea929f367015a53fabde4293890 | from collections import Container
from os import sep, access, R_OK
from os.path import join, isfile
from uuid import UUID
from tornado.ioloop import IOLoop
from urpc.ast import Protocol
from urpc.storage.json import JsonStorage
try:
from settings import temp_dir
except ImportError:
temp_dir = join(sep, "tmp") # join("tmp") for Windows
class CachedItem:
def __init__(self, timeout, project):
self.timeout = timeout
self.project = project
class SessionManager(Container):
# after 3 minutes without access project is removed from RAM cache and is dumped to disk
_dump_timeout = 3 * 60
def __init__(self):
self._storage = JsonStorage()
self._loop = IOLoop.current()
self._cache = {}
def _path_from_uid(self, uid):
file_name = str(uid) + ".json"
file_path = join(temp_dir, file_name)
return file_path
def __getitem__(self, uid):
assert isinstance(uid, UUID)
item = self._cache.setdefault(uid, CachedItem(None, None))
if item.timeout:
self._loop.remove_timeout(item.timeout)
if not item.project:
path = self._path_from_uid(uid)
if isfile(path) and access(path, R_OK):
with open(path, "rb") as f:
item.project = self._storage.load(f)
else:
item.project = Protocol(name="Default project", version="0")
item.timeout = self._loop.call_later(self._dump_timeout, self._dump_cached, uid)
return item.project
def __setitem__(self, uid, project):
assert isinstance(uid, UUID) and isinstance(project, Protocol)
item = self._cache.setdefault(uid, CachedItem(None, None))
if item.timeout:
self._loop.remove_timeout(item.timeout)
item.project = project
item.timeout = self._loop.call_later(self._dump_timeout, self._dump_cached, uid)
def __contains__(self, uid):
assert isinstance(uid, UUID)
if uid in self._cache:
return True
else:
file_name = str(uid) + ".json"
file_path = join(temp_dir, file_name)
return isfile(file_path) and access(file_path, R_OK)
def _dump_cached(self, uid):
assert isinstance(uid, UUID)
item = self._cache.pop(uid)
self._loop.remove_timeout(item.timeout)
path = self._path_from_uid(uid)
with open(path, "wb") as f:
self._storage.save(item.project, f)
|
996,353 | 68c9fc9c5944868e5fd0175584f42f41738325db | """
Compute the difference between the 12 UTC 850 hPa temp and afternoon high
"""
import datetime
from calendar import month_abbr
from pyiem.datatypes import temperature
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn
def main():
"""Go Main Go."""
ASOS = get_dbconn("asos")
acursor = ASOS.cursor()
POSTGIS = get_dbconn("postgis")
pcursor = POSTGIS.cursor()
data = [0] * 12
for i in range(12):
data[i] = []
pcursor.execute(
"""
select valid, tmpc from raob_profile p JOIN raob_flights f on
(p.fid = f.fid) where f.station in ('KOAX', 'KOVN', 'KOMA') and
p.pressure = 850 and extract(hour from valid at time zone 'UTC') = 12
and tmpc > -40
ORDER by valid ASC
"""
)
for row in pcursor:
valid = row[0]
t850 = temperature(row[1], "C")
acursor.execute(
f"SELECT max(tmpf) from t{valid.year} "
"WHERE station = 'OMA' and valid BETWEEN %s and %s",
(valid, valid + datetime.timedelta(hours=12)),
)
row2 = acursor.fetchone()
if row2[0] is None:
continue
high = temperature(row2[0], "F")
data[valid.month - 1].append(high.value("C") - t850.value("C"))
(fig, ax) = plt.subplots(1, 1)
ax.plot([1, 12], [13, 13], "-", lw=1.5, color="green", zorder=1)
ax.boxplot(data)
ax.set_title("1960-2013 Omaha Daytime High Temp vs 12 UTC 850 hPa Temp")
ax.set_ylabel(r"Temperature Difference $^\circ$C")
ax.set_xticks(range(1, 13))
ax.set_ylim(-20, 25)
ax.set_xticklabels(month_abbr[1:])
ax.grid(axis="y")
fig.savefig("test.png")
if __name__ == "__main__":
main()
|
996,354 | 7f1264e9f40cb9ce3b7f0ffbf7f9478d285db9e0 | import turtle
t = turtle.Pen()
t.reset()
for x in range(1,6):
t.forward(100)
t.left(108)
|
996,355 | 0adc08a3272ee72077aab646dca6169c4b033a05 | from django.shortcuts import render, redirect
from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
def register(request):
"""view for registration"""
if request.method != 'POST':
# make a new form
form = UserCreationForm()
else:
# validation and save the form
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
login(request, new_user)
return redirect('blog:index')
context = {'form': form}
return render(request, 'registration/register.html', context)
@login_required
def account(request):
"""view for profile page"""
context = {'user': request.user}
return render(request, 'registration/account.html', context)
|
996,356 | 09512621dc27741b31dc74ef038021dd92ab6cc5 | import matplotlib as plt
import seaborn as sns
import pandas as pd
import numpy as np
#models=['eucli','cos','manhat']
xyz=[]
models=["manhat"]
for model in models:
#model="manhat"
if(model=="eucli"):
threshold=[0.05,0.11,0.16,0.21,0.27,0.33,0.38,0.44,0.49,0.55]
elif(model=="cos"):
threshold=[0.00,0.01,0.03,0.04,0.05,0.07,0.09,0.13,0.14,0.15]
elif(model=="manhat"):
threshold=[10,17,24,31,38,45,52,59,66,71]
if(model=="eucli"):
data=pd.read_csv("/home/dk-tanmay/Desktop/euclidean.csv")
elif(model=="cos"):
data=pd.read_csv("/home/dk-tanmay/Desktop/cosine.csv")
elif(model=="manhat"):
data=pd.read_csv("/home/dk-tanmay/Desktop/manhatten.csv")
total_miss_rate=0
total_fmr=0
total_intrusion=0
for thre in threshold:
for index,value in data.iterrows():
thre_name = round(float(thre),3)
locals()['total_miss_rate_{}'.format(str(thre_name))] = 0
locals()['total_fmr_{}'.format(str(thre_name))] = 0
locals()['total_intrusion_{}'.format(str(thre_name))] = 0
for thre in threshold:
count=0
for index,value in data.iterrows():
thre_name = float(thre)
xyz.append(data['Threshold'][index])
if(data['Threshold'][index]==thre):
print(model,thre)
locals()['total_miss_rate_{}'.format(str(thre_name))] += data['Miss_R'][index]
locals()['total_fmr_{}'.format(str(thre_name))] += data['FMR'][index]
locals()['total_intrusion_{}'.format(str(thre_name))] += data['Intrusion'][index]
count+=1
print(count)
for thre in threshold:
thre_name = float(thre)
locals()['total_miss_rate_{}'.format(str(thre_name))] = locals()['total_miss_rate_{}'.format(str(thre_name))]/count
locals()['total_fmr_{}'.format(str(thre_name))] = locals()['total_fmr_{}'.format(str(thre_name))]/count
locals()['total_intrusion_{}'.format(str(thre_name))] = locals()['total_intrusion_{}'.format(str(thre_name))]/count
miss_rates=[]
fmr_rates=[]
intrusion_rates=[]
for thre in threshold:
thre_name = float(thre)
miss_rates.append(locals()['total_miss_rate_{}'.format(str(thre_name))])
fmr_rates.append(locals()['total_fmr_{}'.format(str(thre_name))])
intrusion_rates.append(locals()['total_intrusion_{}'.format(str(thre_name))])
df= pd.DataFrame()
df['miss_rates']=miss_rates
df['fmr']=fmr_rates
df['intrusions']=intrusion_rates
if(model=="eucli"):
plt.rcParams['figure.figsize']=(15,10)
g = sns.lineplot(data=df,marker="o")
g.set(xticks=np.arange(10),xticklabels=threshold)
g.set(xlabel='Model_threshold', ylabel='Rates', title= 'euclidean distance')
g.figure.savefig("euclidean.pdf")
elif(model=="cos"):
plt.rcParams['figure.figsize']=(15,10)
h = sns.lineplot(data=df,marker="o")
h.set(xticks=np.arange(10),xticklabels=threshold)
h.set(xlabel='Model_threshold', ylabel='Rates', title= 'cosine distance')
h.figure.savefig("cosine.pdf")
elif(model=="manhat"):
plt.rcParams['figure.figsize']=(15,10)
j = sns.lineplot(data=df,marker="o")
j.set(xticks=np.arange(10),xticklabels=threshold)
j.set(xlabel='Model_threshold', ylabel='Rates', title= 'manhatten distance')
j.figure.savefig("manhatten.pdf")
|
996,357 | 0920a1c1b655e4b526f6c2c7d898bef25b39094e | #coding=utf-8
"""
Ugly number is a number that only have factors 3, 5 and 7.
Design an algorithm to find the Kth ugly number. The first 5 ugly numbers are 3, 5, 7, 9, 15 ...
Example
If K=4, return 9
Challenge
O(K log K) or O(K) time
"""
class Solution:
"""
@param k: The number k.
@return: The kth prime number as description.
main reference: http://bookshadow.com/weblog/2015/08/19/leetcode-ugly-number-ii/
Notice that all the ugly number must be the previous ugly number *3 or *5 or *7,
using this fact, we will divide these numbers into 3 stacks and pop 1 element
once a time from these 3 stacks, here is the example for the case 2,3,5,same
principle apply for the 3,5,7:
丑陋数序列可以拆分为下面3个子列表:
(1) 1×2, 2×2, 3×2, 4×2, 5×2, …
(2) 1×3, 2×3, 3×3, 4×3, 5×3, …
(3) 1×5, 2×5, 3×5, 4×5, 5×5, …
我们可以发现每一个子列表都是丑陋数本身(1, 2, 3, 4, 5, …) 乘以 2, 3, 5
接下来我们使用与归并排序相似的合并方法,从3个子列表中获取丑陋数。每一步我们从中选出最小的一个,然后向后移动一步。
"""
def kthPrimeNumber(self, k):
# write your code here
if k<1:
return 0
# first way I do, which is not the hint means
# stack3,stack5,stack7=[3],[5],[7]
# # res=[]
# for _ in xrange(k):
# min_pop=min(stack3[0],stack5[0],stack7[0])
# if min_pop==stack3[0]:
# stack3.pop(0)
# elif min_pop==stack5[0]:
# stack5.pop(0)
# else:
# stack7.pop(0)
# stack3.append(min_pop*3)
# stack5.append(min_pop*5)
# stack7.append(min_pop*7)
# # res.append(min_pop)
n3,n5,n7=0,0,0
res=[1]
for _ in xrange(k):
m3,m5,m7=res[n3]*3,res[n5]*5,res[n7]*7
min_push=min(m3,m5,m7)
if min_push==m3:
n3+=1
if min_push==m5:
n5+=1
if min_push==m7:
n7+=1
res.append(min_push)
return res[-1]
if __name__=='__main__':
print Solution().kthPrimeNumber(19)
"""
another problem of ugly number:
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number
this problem is much easier,here is one java solution:
我们不断整除2,3,5直到其不能被除尽,然后判断最后结果是否是1.
"""
"""
public boolean isUgly(int num) {
if(num<=0) return false;
if(num==1) return true;
while(num>=2 && num%2==0) num/=2;
while(num>=3 && num%3==0) num/=3;
while(num>=5 && num%5==0) num/=5;
return num==1;
}
""" |
996,358 | 28f62de57caf13af18b7003865686b5a51e5d760 | """
"""
import json
from time import clock
import bisect
businesses = 'JSON/businesses_many_reviews.json'
reviews = 'JSON/yelp_academic_dataset_review.json'
offset_b, offset_r = 0, 0
business_list = []
t1 = clock()
b_count, r_count = 0, 0
def index(a, x, yes, no):
# Locate the leftmost value exactly equal to x
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
yes += 1
else:
no += 1
return yes, no
with open(businesses) as b:
if offset_b:
b.seek(offset_b, 0) # seek from beginning of the file to offset
for line in b:
obj = json.loads(line)
business = obj['business_id']
bisect.insort(business_list, str(business))
b_count += 1
# if b_count >= 5:
# break
t2 = clock()
print "Done putting businesses in a list, time elapsed: " + str(t2 - t1)
with open(reviews) as r:
if offset_r:
r.seek(offset_r, 0)
yes, no = 0, 0
for line in r:
try:
offset_r += len(line)
obj = json.loads(line)
business = obj['business_id']
old_yes = yes
yes, no = index(business_list, business, yes, no)
if old_yes + 1 == yes: # write review to file keyed w/ business_id
business_file = 'JSON/over_1000_reviews/' + str(obj['business_id']) + '.json'
with open(business_file, "a") as f:
f.write(line)
r_count += 1
if r_count % 100000 == 0:
print "processed " + str(r_count) + " of 4153150 (" \
+ str(round((r_count/float(4153150))*100, 4)) + "%)"
except KeyboardInterrupt:
# can use offset_r to start processing at point in file where
# processing was terminated with f.seek(offset) in another run
print "\n" + str(offset_r)
break
print "yes, no: " + str(yes) + ", " + str(no)
t3 = clock()
print "Done matching business ids, total time elapsed: " + str(t3 - t1)
print r_count, b_count
|
996,359 | e20a231e593b77ba63d689d7e88ce8cc3534ec2e | from NodoLS import nodeSimple
from graphviz import Digraph
class Simple_list:
def __init__(self):
self.root = None
self.tam = 0
def addSimple(self, x, y, value, n):
first = nodeSimple(x, y, value)
self.tam = n
if self.root is None:
self.root = first
else:
aux = self.root
while aux.next is not None:
aux = aux.next
aux.next = first
def view(self):
aux = self.root
while aux is not None:
print(' Posicion X: ', str(aux.x) + ' Posicion Y: ', str(aux.y) + ' Valor: ', str(aux.value))
aux = aux.next
print('\n')
Simple_list.Graphviz(self)
def Graphviz(self):
aux = self.root
cont = 0
while aux is not None:
if aux.next is None:
break
else:
if int(aux.x) <= int(self.tam) and int(aux.y) <= int(self.tam):
print(aux.x, aux.y)
cont += 1
aux = aux.next
print('\n')
def binaria(self):
aux = self.root
temp = ''
while aux is not None:
if aux.value != '0':
temp = temp + '1'
else:
temp = temp + '0'
if aux.y == self.tam:
temp = temp + '\n'
aux = aux.next
Simple_list.comparar(self, temp, self.tam)
def comparar(self, text, row):
aux = self.root
temp = ''
while aux is not None:
if aux.y <= row:
temp = temp + str(aux.value)
if aux.y == row:
temp = temp + '\n'
aux = aux.next
b = temp.split('\n')
a = text.split('\n')
cont = 0
while cont <= (int(row) - 1):
print(b[cont] + ' ---> ' + a[cont])
cont += 1
|
996,360 | 15a4aa7863d9049b9eb75ae5ceec2b2e3045457e | #R python
import os
#os.chdir("Desktop/python test/")
#os.getcwd()
import scrublet as scr
#import solo as sl
#import doubletdetection
import numpy as np
import tarfile
import matplotlib.pyplot as plt
import pandas as pd
def load_csv(path):
data_read = pd.read_csv(path)
list = data_read.values.tolist()
data = np.array(list)
print(data.shape)
# print(data)
return data
path = "counts.csv"
raw_counts =load_csv(path)
raw_counts=np.delete(raw_counts,0, axis = 1)
raw_counts=raw_counts.astype(np.float)
raw_counts=raw_counts.transpose()
scrub = scr.Scrublet(raw_counts)
doublet_scores=scrub.scrub_doublets()
x=scrub.calculate_doublet_scores()
np.savetxt('./scrublet.txt',x)
#try:
# doublet_scores, predicted_doublets = scrub.scrub_doublets()
#except:
# np.savetxt('./scrublet.txt',scrub.calculate_doublet_scores())
|
996,361 | 5be3916248af2d9bc770a5f2fc00e1632949a3a1 | # -*- coding: utf-8 -*-
# @Time : 2020/12/21 14:35
# @Author : Baron!
import json
import logging
import requests
log = logging.getLogger(name='4399-cdn')
default_push_url = 'https://fapi.gz4399.com/api/portal/v1/push'
default_preheat_url = 'https://fapi.gz4399.com/api/portal/v1/preheat'
def _domain_url(domain):
domain = str(domain).lower()
if not (domain.startswith('http://') or domain.startswith('https://')):
domain = 'https://' + domain
if domain.endswith('/'):
domain = domain[:len(domain) - 1]
return domain
def _refresh(domain, paths):
data = {}
urls = []
dirs = []
if isinstance(paths, str):
paths = [paths]
for i in paths:
ri = i.rindex('/')
name = i[ri + 1:]
i = domain + i
if not name:
dirs.append(i)
else:
urls.append(i)
if len(urls) != 0:
data["urls"] = urls
data["urlAction"] = "expire"
if len(dirs) != 0:
data["dirs"] = dirs
data["dirAction"] = "expire"
refresh_data = json.dumps(data)
return refresh_data
def _preheat(domain, paths):
data = {}
urls = []
if isinstance(paths, str):
paths = [paths]
for i in paths:
ri = i.rindex('/')
name = i[ri + 1:]
i = domain + i
if not name:
return False
urls.append(i)
if len(urls) != 0:
data["urls"] = urls
data["urlAction"] = "preheat_urlcomm"
preheat_data = json.dumps(data)
return preheat_data
def cdn_refresh(cfg, domain, paths):
try:
headers = {
"X-Auth-User": cfg.get('user'),
"X-Auth-Password": cfg.get('password')
}
data = _refresh(_domain_url(domain), paths)
post_url = cfg.get('push_url', default_push_url)
print("4399 cdn push", post_url, data)
res = requests.post(url=post_url, data=data, headers=headers)
callback = json.loads(res.content)
code = callback['code']
msg = callback['message']
if code == 0:
return True
return {"异常": str(code) + ' - ' + msg}
except Exception as e:
log.exception(e)
return repr(e)
def cdn_preheat(cfg, domain, paths):
try:
headers = {
"X-Auth-User": cfg.get('user'),
"X-Auth-Password": cfg.get('password')
}
data = _preheat(_domain_url(domain), paths)
post_url = cfg.get('preheat_url', default_preheat_url)
print("4399 cdn preheat", post_url, data)
res = requests.post(url=post_url, data=data, headers=headers)
callback = json.loads(res.content)
code = callback['code']
msg = callback['message']
if code == 0:
return True
return {"异常": str(code) + ' - ' + msg}
except Exception as e:
log.exception(e)
return repr(e)
if __name__ == '__main__':
cfg = {
"user": "***",
"password": "***",
"domain": "csjs-cdnres.netfungame.com"
}
domain = "http://csjs-cdnres.netfungame.com"
paths = "***"
result = cdn_refresh(cfg, domain, paths)
print("refresh result:", result)
paths = "/game/ios4399-cod/index.html"
result = cdn_preheat(cfg, domain, paths)
print("preheat result:", result)
|
996,362 | ddc30af951b83b317353325631cfdb71c86e0b13 | import matplotlib.pyplot as plt
import numpy as np
fig, ax_f = plt.subplots()
#ax_c = ax_f.twinx()
x = np.linspace(10, 30, 100)
k = 1
ax_f.plot(x, np.cos(k * x))
k2 = 0.5
ax_f.plot(x, np.cos(k2 * x))
ax_f.set_xlim(10, 30)
ax_f.set_title('Задание 4.')
#ax_f.set_ylabel('Fahrenheit')
#ax_c.set_ylabel('Celsius')
plt.show() |
996,363 | 04db6dfa56d09939a6531f8e453619a02b848b82 | from typing import List
import pandas as pd
from solutions.baseline.raifhack_ds.metrics import metrics_stat
from pipeline.transforms import BaseTransform
from pipeline.evaluators import BaseEvaluator
from .base_pipeline import BasePipeline
class FitPreidctPipeline(BasePipeline):
def __init__(self, transforms: List[BaseTransform], evaluator: BaseEvaluator):
self.transforms = transforms
self.evaluator = evaluator
def preprocess(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies each transform, assigns result of it as new column(s)"""
for transform in self.transforms:
cols = transform(df)
if isinstance(cols, pd.Series):
df.loc[:, transform.name] = cols
elif isinstance(cols, list):
for i, col in enumerate(cols):
if not isinstance(col, pd.Series):
raise ValueError('Invalid Transform return type:', type(cols))
df.loc[:, f'{transform.name}_{i}'] = col
else:
raise ValueError('Invalid Transform return type:', type(cols))
return df
def postprocess(self,
df: pd.DataFrame,
drop_cols_names: List[str],
drop_na_cols: bool = False,
drop_na_rows: bool = False) -> pd.DataFrame:
df = df.drop(drop_cols_names, axis=1)
if drop_na_rows:
df = df[df.isna().sum(axis=1) == 0]
if drop_na_cols:
not_na_cols = df.isna().sum(axis=0) == 0
df = df[not_na_cols[not_na_cols].index.tolist()]
return df
def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:
"""Fits model, measures metrics, returns scores"""
self.evaluator.fit(train_x, train_y, test_x, test_y)
predictions = self.evaluator.predict(test_x)
print(predictions)
metrics = metrics_stat(predictions, test_y)
return metrics
|
996,364 | 865a1d641589d58888ea547fed942c1dec929f6f | import pickle
from enum import Enum
class UVARGPacket(object):
def __init__(self, destination_id: int, source_id: int, next_intersection: int, raw_data: bytes, uav_send_me_to_car_in_section: tuple):
self.raw_data = raw_data
self.source_id = source_id
self.destination_id = destination_id
self.next_intersection = next_intersection
self.uav_send_me_to_car_in_section = uav_send_me_to_car_in_section
def serialize(self) -> bytes:
return pickle.dumps(self)#, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def deserialize(data: bytes):
return pickle.loads(data)#, protocol=pickle.HIGHEST_PROTOCOL)
class UVARMessageTypes(Enum):
DATA = "DATA"
RREQ = "RREQ"
RREP = "RREP"
class UVARMobileType:
CAR = "CAR"
UAV = "UAV"
class UVARSPacket(object):
def __init__(self, packet_type: UVARMessageTypes, destination_id: int, source_id: int, ttl: int = None, uav_id: int = None):
self.packet_type = packet_type
self.source_id = source_id
self.destination_id = destination_id
self.ttl = ttl
self.uav_id = uav_id
self.path = []
def serialize(self) -> bytes:
return pickle.dumps(self)
@staticmethod
def deserialize(data: bytes):
return pickle.loads(data) |
996,365 | 35d2cc4313f9821a6b820469440149d6c173bb9b | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 23:44:10 2021
@author: yrc2
"""
from biorefineries import oilcane as oc
import biosteam as bst
import numpy as np
from biosteam.utils import colors, GG_colors
import matplotlib.pyplot as plt
from biosteam.plots import (
plot_contour_2d,
MetricBar,
plot_vertical_line,
rounded_tickmarks_from_data as tickmarks,
plot_scatter_points,
)
from thermosteam.units_of_measure import format_units
from thermosteam.utils import set_figure_size, set_font
from biorefineries.oilcane._load_data import images_folder
from warnings import filterwarnings
from scipy.ndimage.filters import gaussian_filter
import os
__all__ = (
'plot_relative_sorghum_oil_content_and_cane_oil_content_contours_manuscript',
'plot_recovery_and_oil_content_contours_manuscript',
'plot_recovery_and_oil_content_contours',
'plot_relative_sorghum_oil_content_and_cane_oil_content_contours',
'plot_recovery_and_oil_content_contours_biodiesel_only',
'plot_recovery_and_oil_content_contours_with_oilsorghum_only',
'plot_metrics_across_composition'
)
filterwarnings('ignore', category=bst.exceptions.DesignWarning)
line_colors = [
GG_colors.orange.RGBn,
GG_colors.purple.RGBn,
GG_colors.green.RGBn,
GG_colors.blue.RGBn,
GG_colors.yellow.RGBn,
colors.CABBI_teal.RGBn,
colors.CABBI_grey.RGBn,
colors.CABBI_brown.RGBn,
]
shadecolor = (*colors.neutral.RGBn, 0.20)
linecolor = (*colors.neutral_shade.RGBn, 0.85)
targetcolor = (*colors.red_tint.RGBn, 1)
startcolor = (*colors.red_tint.RGBn, 1)
edgecolor = (*colors.CABBI_black.RGBn, 1)
CABBI_colors = (colors.CABBI_yellow.tint(75).RGBn,
colors.CABBI_yellow.tint(30).RGBn,
colors.CABBI_yellow.RGBn,
colors.CABBI_green.tint(5).RGBn,
colors.CABBI_teal_green.shade(40).RGBn,
colors.CABBI_teal_green.shade(60).RGBn)
CABBI_colors_x = (colors.CABBI_blue_light.tint(90).RGBn,
colors.CABBI_blue_light.tint(40).RGBn,
colors.CABBI_blue_light.RGBn,
# colors.CABBI_teal.RGBn,
# colors.CABBI_teal_green.tint(10).RGBn,
colors.CABBI_teal_green.tint(40).shade(15).RGBn,
colors.CABBI_teal_green.shade(45).RGBn)
diverging_colormaps = [
plt.cm.get_cmap('RdYlGn')
]
colormaps = [
plt.cm.get_cmap('viridis'),
plt.cm.get_cmap('copper_r'),
# LinearSegmentedColormap.from_list('CABBI', CABBI_colors, 25),
# LinearSegmentedColormap.from_list('CABBI', CABBI_colors_x, 25),
plt.cm.get_cmap('inferno_r'),
plt.cm.get_cmap('copper_r'),
plt.cm.get_cmap('bone_r'),
] * 2
light_letter_color = colors.neutral.tint(98).RGBn
letter_color = colors.neutral.tint(80).RGBn
dark_letter_color = colors.neutral.shade(80).RGBn
# %% Plot functions for publication
def _add_letter_labels(axes, xpos, ypos, colors):
M, N = shape = colors.shape
if shape == (2, 2):
letters=np.array([['A', 'C'], ['B', 'D']])
elif shape == (3, 2):
letters=np.array([['A', 'D'], ['B', 'E'], ['C', 'F']])
elif shape == (2, 1):
letters=np.array([['A'], ['B']])
for i in range(M):
for j in range(N):
ax = axes[i, j]
letter = letters[i, j]
xlb, xub = ax.get_xlim()
ylb, yub = ax.get_ylim()
if hasattr(ax, '_cached_ytwin'):
ax = ax._cached_ytwin
ax.text((xlb + xub) * xpos, (yub + ylb) * ypos, letter, color=colors[i, j],
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold', zorder=1e17)
def plot_metrics_across_composition_manuscript(load=True, fs=8, smooth=1):
set_font(size=fs)
set_figure_size()
fig, axes = plot_metrics_across_composition(
load=load,
smooth=smooth,
)
colors = np.zeros([2, 2], object)
colors[:] = [[light_letter_color, light_letter_color],
[light_letter_color, light_letter_color],
[light_letter_color, light_letter_color]]
_add_letter_labels(axes, 1 - 0.68, 0.7, colors)
plt.subplots_adjust(right=0.92, wspace=0.1 * (fs/8) ** 2, top=0.9, bottom=0.10)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'recovery_and_oil_content_contours.{i}')
plt.savefig(file, transparent=True)
def plot_recovery_and_oil_content_contours_manuscript(load=True, fs=8, smooth=1):
set_font(size=fs)
set_figure_size()
fig, axes = plot_recovery_and_oil_content_contours(
load=load,
smooth=smooth,
)
colors = np.zeros([2, 2], object)
colors[:] = [[light_letter_color, light_letter_color],
[light_letter_color, light_letter_color]]
_add_letter_labels(axes, 1 - 0.68, 0.7, colors)
plt.subplots_adjust(right=0.92, wspace=0.1 * (fs/8) ** 2, top=0.9, bottom=0.10)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'recovery_and_oil_content_contours.{i}')
plt.savefig(file, transparent=True)
def plot_recovery_and_oil_content_contours_with_oilsorghum_only(fs=10, smooth=1):
set_font(size=fs)
set_figure_size(4, 1.1)
fig, axes = plot_recovery_and_oil_content_contours(
load=True,
smooth=smooth,
with_oilsorghum_only=True
)
colors = np.zeros([2, 1], object)
colors[:] = [[light_letter_color], [light_letter_color]]
# _add_letter_labels(axes, 1 - 0.68, 0.7, colors)
plt.subplots_adjust(left=.2, right=0.92, wspace=0.1 * (fs/8) ** 2, top=0.9, bottom=0.10)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'recovery_and_oil_content_contours.{i}')
plt.savefig(file, transparent=True)
def plot_recovery_and_oil_content_contours_biodiesel_only(load=True, fs=8, metric_indices=None):
set_font(size=fs)
set_figure_size()
if metric_indices is None: metric_indices = (0, 2, 6, 10)
for cmap, i in zip(colormaps, metric_indices):
fig, axes = plot_recovery_and_oil_content_contours(
load=load, configurations=np.array([[7, 8], [5, 6]]),
N_points=20, yticks=[0, 2.5, 5, 7.5, 10, 12.5, 15],
titles=['Batch', 'Fed-Batch'],
metric_index=i, cmap=cmap,
)
load = True
colors = np.zeros([2, 2], object)
colors[:] = [[light_letter_color, light_letter_color],
[light_letter_color, light_letter_color]]
_add_letter_labels(axes, 1 - 0.68, 0.85, colors)
plt.subplots_adjust(right=0.92, wspace=0.1 * (fs/8) ** 2, top=0.9, bottom=0.10)
for j in ('svg', 'png'):
file = os.path.join(images_folder, f'recovery_and_oil_content_contours_biodiesel_only_{i}.{j}')
plt.savefig(file, transparent=True)
def plot_relative_sorghum_oil_content_and_cane_oil_content_contours_manuscript(load=True, fs=8, smooth=0.9):
set_font(size=fs)
set_figure_size()
fig, axes = plot_relative_sorghum_oil_content_and_cane_oil_content_contours(
load=load, smooth=smooth,
)
colors = np.zeros([2, 2], object)
colors[:] = [[light_letter_color, light_letter_color],
[light_letter_color, light_letter_color]]
_add_letter_labels(axes, 1 - 0.82, 0.70, colors)
plt.subplots_adjust(right=0.92, wspace=0.1, top=0.9, bottom=0.12)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'relative_sorghum_oil_content_and_cane_oil_content_contours.{i}')
plt.savefig(file, transparent=True)
# %% General
def metrics_across_oil_and_fiber_content(configuration, load):
# Generate contour data
x = np.linspace(0., 0.04, 5)
y = np.linspace(0.35, 0.75, 5)
z = np.array([0.6, 0.65, 0.7])
X, Y, Z = np.meshgrid(x, y, z)
folder = os.path.dirname(__file__)
file = f'{configuration}_composition_analysis.npy'
file = os.path.join(folder, file)
if load:
data = np.load(file, allow_pickle=True)
else:
from warnings import filterwarnings
filterwarnings('ignore')
# This returns data of all metrics for the given configuration,
# but we are mainly interested in MFPP and productivity in L biodiesel per MT cane.
data = oc.evaluate_metrics_across_composition(
X, Y, Z, configuration,
)
np.save(file, data)
return X, Y, Z, data
def plot_metrics_across_composition(
configuration=None, load=False, N_decimals=1,
yticks=None, titles=None,
cmap=None, smooth=None,
):
if configuration is None: configuration = 'O2'
metric_indices=[0, 2]
MFPP = oc.all_metric_mockups[0] # Maximum feedstock purchase price
BP = oc.all_metric_mockups[2] # Biodiesel production
# EP = oc.all_metric_mockups[5] # Energy production
X, Y, Z, data = metrics_across_oil_and_fiber_content(load)
data = data[:, :, :, metric_indices]
xticks = [0, 1, 2, 3, 4]
yticks = [35, 45, 55, 65, 75]
if smooth: # Smooth curves due to heat exchanger network and discontinuities in design decisionss
A, B, C, D = data.shape
for i in range(C):
for j in range(D):
data[:, :, i, j] = gaussian_filter(data[:, :, i, j], smooth)
data = np.swapaxes(data, 2, 3)
# Plot contours
xlabel = 'Oil content [dry wt. %]'
ylabel = "Fiber content [dry wt. %]"
if titles is None: titles = np.array(['60% moisture', '65% moisture', '70% moisture'])
metric_bars = [
MetricBar(MFPP.name, format_units(MFPP.units), colormaps[0], tickmarks(data[:, :, 0, :], 8, 1, expand=0, p=0.5), 10, 1),
MetricBar('Biod. prod.', format_units(BP.units), plt.cm.get_cmap('copper'), tickmarks(data[:, :, 1, :], 8, 1, expand=0, p=0.5), 10, 1),
# MetricBar(EP.name, format_units(EP.units), colormaps[2], tickmarks(data[:, :, 2, :], 5, 5, expand=0, p=5), 10, 1),
]
fig, axes, CSs, CB = plot_contour_2d(
100.*X[:, :, 0], 100.*Y[:, :, 0], titles, data, xlabel, ylabel, xticks, yticks, metric_bars,
styleaxiskw=dict(xtick0=True), label=True,
)
def determine_axis_column(moisture_content):
for column, axis_moisture in enumerate([60, 65, 70]):
if abs(moisture_content - axis_moisture) < 2.5:
return column
raise RuntimeError('could not determine axis with similar moisture content')
df = oc.get_composition_data()
names = df.index
lines = []
for name, color in zip(names, line_colors):
data = df.loc[name]
lines.append(
(name,
data['Stem Oil (dw)']['Mean'] * 100,
data['Fiber (dw)']['Mean'] * 100,
data['Water (wt)']['Mean'] * 100,
color)
)
txtbox = dict(boxstyle='round', facecolor=colors.neutral.shade(20).RGBn,
edgecolor='None', alpha=0.9)
for *axes_columns, _ in axes:
for (name, lipid, fiber, moisture, color) in lines:
index = determine_axis_column(moisture)
plt.sca(axes_columns[index]._cached_ytwin)
plt.text(
lipid + 0.1, fiber + 1, name, weight='bold', c=color,
bbox=txtbox,
)
plot_scatter_points(
[lipid], [fiber], marker='o', s=50,
color=color, edgecolor=edgecolor, clip_on=False, zorder=1e6,
)
return fig, axes
def relative_sorghum_oil_content_and_cane_oil_content_data(load, relative):
# Generate contour data
y = np.linspace(0.05, 0.15, 20)
x = np.linspace(-0.03, 0., 20) if relative else np.linspace(0.02, 0.15, 20)
X, Y = np.meshgrid(x, y)
folder = os.path.dirname(__file__)
file = 'oil_content_analysis.npy'
if relative: file = 'relative_' + file
file = os.path.join(folder, file)
configurations = [1, 2]
if load:
data = np.load(file)
else:
data = oc.evaluate_configurations_across_sorghum_and_cane_oil_content(
X, Y, configurations, relative,
)
np.save(file, data)
return X, Y, data
def plot_relative_sorghum_oil_content_and_cane_oil_content_contours(
load=False, configuration_index=..., relative=False, smooth=None,
):
# Generate contour data
X, Y, data = relative_sorghum_oil_content_and_cane_oil_content_data(load, relative)
data = data[:, :, configuration_index, [0, 6]]
# Plot contours
xlabel = "Oil-sorghum oil content [dry wt. %]"
if relative: xlabel = ('relative ' + xlabel).capitalize()
ylabel = 'Oilcane oil content\n[dry wt. %]'
yticks = [5, 7.5, 10, 12.5, 15]
xticks = [-3, -2, -1, 0] if relative else [2, 5, 7.5, 10, 12.5, 15]
MFPP = oc.all_metric_mockups[0]
TCI = oc.all_metric_mockups[6]
if configuration_index == 0:
Z = np.array(["Direct Cogeneration"])
data = data[:, :, :, np.newaxis]
elif configuration_index == 1:
Z = np.array(["Integrated Co-Fermentation"])
data = data[:, :, :, np.newaxis]
elif configuration_index == ...:
Z = np.array(["Direct Cogeneration", "Integrated Co-Fermentation"])
data = np.swapaxes(data, 2, 3)
else:
raise ValueError('configuration index must be either 0 or 1')
metric_bars = [
[MetricBar(MFPP.name, format_units(MFPP.units), colormaps[0], tickmarks(data[:, :, 0, 0], 5, 1, expand=0, p=0.5), 10, 1),
MetricBar(MFPP.name, format_units(MFPP.units), colormaps[0], tickmarks(data[:, :, 0, 1], 5, 1, expand=0, p=0.5), 10, 1)],
[MetricBar(TCI.name, format_units(TCI.units), colormaps[1], tickmarks(data[:, :, 1, 0], 5, 5, expand=0, p=5), 10, 1),
MetricBar(TCI.name, format_units(TCI.units), colormaps[1], tickmarks(data[:, :, 1, 1], 5, 5, expand=0, p=5), 10, 1)],
]
if smooth: # Smooth curves due to heat exchanger network and discontinuities in design decisionss
A, B, M, N = data.shape
for m in range(M):
for n in range(N):
metric_data = data[:, :, m, n]
data[:, :, m, n] = gaussian_filter(metric_data, smooth)
# for a in range(A):
# values = metric_data[a, :]
# values.sort()
# p = np.arange(values.size)
# coeff = np.polyfit(p, values, 5)
# values[:] = np.polyval(coeff, p)
# for b in range(B):
# values = metric_data[:, b]
# values.sort()
# p = np.arange(values.size)
# coeff = np.polyfit(p, values, 5)
# values[:] = np.polyval(coeff, p)
fig, axes, CSs, CB = plot_contour_2d(
100.*X, 100.*Y, Z, data, xlabel, ylabel, xticks, yticks, metric_bars,
styleaxiskw=dict(xtick0=True), label=True,
)
# for i in axes.flatten():
# plt.sca(i)
# plot_scatter_points([7], [10], marker='*', s=100, color=startcolor,
# edgecolor=edgecolor, clip_on=False, zorder=3)
return fig, axes
def plot_recovery_and_oil_content_contours(
load=False, metric_index=0, N_decimals=1, configurations=None,
N_points=20, yticks=None, titles=None, cmap=None, smooth=None,
with_oilsorghum_only=False,
):
if yticks is None: yticks = [5, 7.5, 10, 12.5, 15]
if configurations is None:
if with_oilsorghum_only:
configurations = np.array([['O1*'], ['O2*']])
if titles is None: titles = ['Oilcane & Oil-sorghum']
else:
configurations = np.array([['O1', 'O1*'], ['O2', 'O2*']])
if titles is None: titles = ['Oilcane Only', 'Oilcane & Oil-sorghum']
# Generate contour data
x = np.linspace(0.40, 1.0, N_points)
y = np.linspace(yticks[0] / 100, yticks[-1] / 100, N_points)
X, Y = np.meshgrid(x, y)
metric = bst.metric
folder = os.path.dirname(__file__)
file = "oil_extraction_analysis.npy"
file = os.path.join(folder, file)
if load:
data = np.load(file)
else:
data = oc.evaluate_configurations_across_recovery_and_oil_content(
X, Y, configurations,
)
np.save(file, data)
data = data[:, :, :, :, metric_index]
if with_oilsorghum_only:
data = data[:, :, :, -1:]
if smooth: # Smooth curves due to heat exchanger network and discontinuities in design decisionss
A, B, M, N = data.shape
for m in range(M):
for n in range(N):
metric_data = data[:, :, m, n]
data[:, :, m, n] = gaussian_filter(metric_data, smooth)
# Plot contours
xlabel = 'Crushing mill oil recovery [%]'
ylabel = "Oil content [dry wt. %]"
ylabels = [f'Direct Cogeneration\n{ylabel}',
f'Integrated Co-Fermentation\n{ylabel}']
xticks = [40, 50, 60, 70, 80, 90, 100]
metric = oc.all_metric_mockups[metric_index]
units = metric.units if metric.units == '%' else format_units(metric.units)
if cmap is None: cmap = colormaps[metric_index]
mb = lambda x, name=None: MetricBar(
metric.name if name is None else name, units if name is None else "", cmap,
tickmarks(data[:, :, x, :], 5, 0.1, expand=0, p=0.1, f=lambda x: round(x, N_decimals)),
10, N_decimals=N_decimals
)
metric_bars = [mb(0), mb(1, "")]
fig, axes, CSs, CB = plot_contour_2d(
100.*X, 100.*Y, titles, data, xlabel, ylabels, xticks, yticks, metric_bars,
fillcolor=None, styleaxiskw=dict(xtick0=False), label=True,
)
M, N = configurations.shape
for i in range(M):
for j in range(N):
ax = axes[i, j]
plt.sca(ax)
plt.fill_between([60, 90], [yticks[0]], [yticks[-1]],
color=shadecolor,
linewidth=1)
plot_vertical_line(60, ls='-.',
color=linecolor,
linewidth=1.0)
plot_vertical_line(90, ls='-.',
color=linecolor,
linewidth=1.0)
if hasattr(ax, '_cached_ytwin'):
plt.sca(ax._cached_ytwin)
# plot_scatter_points([60], [10], marker='*', s=100, color=startcolor,
# edgecolor=edgecolor, clip_on=False, zorder=3)
# plot_scatter_points([ub], [15], marker='*', s=100, color=targetcolor,
# edgecolor=edgecolor, clip_on=False, zorder=3)
return fig, axes |
996,366 | 928865f8f02e10a9945c7343a819fff6d98a817b | BASE_API_URL = 'https://cloud.veriserve.co.jp/api/v2/' #APIのパス
API_KEY = 'XXXX' #APIのキー
FOLDER_PATH = 'XXXX' #対象ファイルを格納するフォルダーのパス
TSV_NAME = '1.0' #テストスイートバージョン名
QF_COLUMN_MAX = 21 #QFで取り込める列の最大(テスト定義の自由項目数 +「優先度」列)
TSV_STATUS = 'available' #テストスイートバージョンのステータスの値(利用可)
COL_TITLE_START = '優先度' #ヘッダーの最初の列のタイトル
TEST_SUITE_DELETE_FLG = 1 #1:削除する、0:削除しない |
996,367 | 55c955403b3b640616d140b3b2b02aa8d856765c | '''https://leetcode.com/problems/minimum-add-to-make-parentheses-valid'''
class Solution:
def minAddToMakeValid(self, S: str) -> int:
cnt = 0
stk = []
for letter in S:
if letter == "(":
stk.append(letter)
else:
if stk:
stk.pop()
else:
cnt+=1
return cnt + len(stk) |
996,368 | 4daed0bbd367fa47b8c37a8a69d369cf3cef6669 | #! /usr/bin/env python3
import rtmidi
import os
import yaml
import argparse
import sys
def get_args():
p = argparse.ArgumentParser(description="Map MIDI input to commands")
p.add_argument("-l", "--list",
action="store_true",
help="prints a list of available MIDI input devices")
p.add_argument("-k", "--keycodes",
action="store_true",
help="continuously prints keycodes from MIDI input")
p.add_argument("-p", "--port",
type=int,
help="device port to open")
p.add_argument("-c", "--config",
default="~/.config/midi-device-mapper/config.yaml",
help="path to (yaml) config file" +
"(default: ~/.config/midi-device-mapper/config.yaml)")
args = p.parse_args()
return args
def read_config(file_path):
try:
with open(file_path, "r") as file:
config = yaml.safe_load(file)
return config
except:
print("Config file does not exist")
sys.exit(1)
def list_ports():
ports = range(midi_in.getPortCount())
if ports:
for i in ports:
print(str(i) + ": " + midi_in.getPortName(i))
else:
print("No available MIDI input ports")
sys.exit(1)
def choose_port():
list_ports()
d = int(input("Choose device: "))
return d
def print_message(midi):
if midi.isNoteOn():
msg = ["ON", midi.getNoteNumber(), midi.getVelocity()]
elif midi.isNoteOff():
msg = ["OFF", midi.getNoteNumber(), midi.getVelocity()]
elif midi.isController():
msg = ["CTRL", midi.getControllerNumber(), midi.getControllerValue()]
print("%-4s %3i %3i" % (msg[0], msg[1], msg[2]))
def print_keycodes(port):
midi_in.openPort(port)
while True:
m = midi_in.getMessage(250)
if m:
print_message(m)
def eval_input(port):
midi_in.openPort(port)
while True:
m = midi_in.getMessage(250)
if m:
note = m.getNoteNumber()
if note in config:
if m.isNoteOn():
cmd = config[note]
os.system(cmd)
elif m.isController():
val = val_pct(m.getControllerValue())
cmd = " ".join((config[note], str(val)))
os.system(cmd)
def val_pct(val):
res = round(float(val) / 127, 2)
return res
if __name__ == "__main__":
args = get_args()
midi_in = rtmidi.RtMidiIn()
if args.list:
list_ports()
else:
if args.port:
port = args.port
else:
port = choose_port()
if args.keycodes:
print_keycodes(port)
else:
config = read_config(args.config)
eval_input(port)
|
996,369 | dc58eeadb793cda5d254f0bf69f0301b0597741f | import time
import pickle
def save_message(file_name, msg, suffix=".prototxt", verbose=True):
start = time.time()
f = open(file_name + suffix, "wb")
f.write(msg.SerializeToString())
f.close()
if verbose:
print("File {} saved.".format(file_name + suffix))
print("Time to save:", time.time() - start)
def read_message(file_name, msg, suffix=".prototxt", verbose=True):
start = time.time()
f = open(file_name + suffix, "rb")
msg.ParseFromString(f.read())
f.close()
if verbose:
print("File {} loaded.".format(file_name + suffix))
print("Time to load:", time.time() - start)
def save_pickle(file_name, obj, suffix=".pkl", verbose=False):
start = time.time()
file_name = file_name + suffix if suffix not in file_name else file_name
with open(file_name, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
if verbose:
print("File {} saved.".format(file_name))
print("Time to s ave:", time.time() - start)
def read_pickle(file_name, suffix=".pkl", verbose=False):
start = time.time()
file_name = file_name+suffix if suffix not in file_name else file_name
with open(file_name, 'rb') as handle:
classifier = pickle.load(handle)
if verbose:
print("File {} loaded.".format(file_name))
print("Time to load:", time.time() - start)
return classifier
|
996,370 | 269d5b18f7cf8bd85ce81772b8e5102edf548e1d | #coding:utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains #引入ActionChains鼠标操作类
from selenium.webdriver.common.keys import Keys #引入keys类操作
import time
import random
import json
chrome_driver_path = './chromedriver'
home_page = 'http://drugs.dxy.cn'
save_file = 'sub_class_file.txt'
data_per = {}
start_ = 3
end_ = 16
browser = webdriver.Chrome(chrome_driver_path)
browser.get(home_page)
#click more hidden content
check_list = browser.find_element_by_class_name('sidemenu')
cli = check_list.find_element_by_class_name('more')
cli.click()
check_list = check_list.find_element_by_tag_name('ul')
check_list = check_list.find_elements_by_tag_name('li')
for i in check_list:
sup_class = i.text.encode("utf-8").decode("utf-8")
data_per [sup_class] = {}
herf = i.find_element_by_tag_name('a')
herf.click()
ullist = browser.find_element_by_css_selector("[class='ullist clearfix']")
ullist = ullist.find_elements_by_tag_name("li")
for li in ullist:
li = li.find_element_by_tag_name('h3')
li = li.find_element_by_tag_name('a')
sub_class = li.text.encode("utf-8").decode("utf-8")
href = li.get_attribute("href").strip()
data_per[sup_class][sub_class] = href
print(data_per)
with open(save_file, 'w') as fp:
json.dump(data_per, fp)
sleep_sec = random.randint(start_, end_)
time.sleep(sleep_sec)
browser.quit()
|
996,371 | 60ea265a34924407cfe9e988381d0dbbd665b616 | >>> #this is a comment
>>> print("hi")
hi
>>> ##This is a simple comment in Python!
>>> print("With this I will print out some simple text")
|
996,372 | c97c1472b4247aa00da997aa81e38de78701a94c | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
from django.contrib.auth import views as auth_views
"""
URL patterns used by django to load views.
"""
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^shelld/$', views.shelld, name='shelld'),
url(r'^updates/$', views.updates, name='updates'),
url(r'^about/', views.about, name='about'),
url(r'^learn/', views.learn, name='learn'),
url(r'^chat/', views.chat, name='chat'),
url(r'^sponsors/', views.sponsors, name='sponsors'),
url(r'^scoreboard/$', views.scoreboard, name='scoreboard'),
url(r'^score/', views.score, name='score'),
url(r'^profile/(?P<teamid>[a-zA-Z0-9]*)$', views.profile),
url(r'^problems/$', views.problems, name='problems'),
url(r'^problems/submit_problem/$', views.submit_problem, name='submit_problem'),
url(r'^account/$', views.account, name='account'),
url(r'^account/change_password/$', views.change_password, name='change_password'),
url(r'^account/create_team/$', views.create_team, name='create_team'),
url(r'^account/join_team/$', views.join_team, name='join_team'),
url(r'^account/login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^account/logout/$', auth_views.logout, {'next_page': 'home'}, name='logout'),
url(r'^account/signup/$', views.signup, name='signup'),
url(r'^resetpassword/$', auth_views.password_reset,
{'post_reset_redirect' : 'passwordsent/',
'template_name': 'registration/password_reset_form.html'}, name='password_reset'),
url(r'^resetpassword/passwordsent/', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', auth_views.password_reset_confirm, {'post_reset_redirect' : '/reset/done/'}, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^activate/(?P<key>.+)/$', views.activation, name='activation'),
url(r'^new-activation-link/(?P<user_id>\d+)/$', views.new_activation_link, name='new_link'),
]
|
996,373 | 0ee2693b5129061a4ce39ee3f9bff567e92d91f9 |
import zipfile
import re
import os
from os import listdir
from os.path import isfile, join
def __unzip(year, month):
dr = ('/home/ubuntu/sec/' + year + '/' + month + '/')
onlyfiles = [f for f in listdir(dr) if isfile(join(dr, f))]
for f in onlyfiles:
try:
fh = open(dr + f, 'rb')
z = zipfile.ZipFile(fh)
drct = '/home/ubuntu/xbrl/' + year + '/' \
+ month + '/' + f + '/'
if not os.path.exists(drct):
os.makedirs(drct)
for name in z.namelist():
z.extract(name, drct)
fh.close()
except:
print(f)
__unzip('2016', '01')
|
996,374 | 752c2a3295e4051b3d01ffdc9d7e37a18ded3a90 | #!/usr/bin/env python
import numpy as np
from structures import Data
def main():
measured_data = Data()
print measured_data
a = 1.0
b = 2.0
c = 3.0
measured_data.append_data(a, b, c)
print measured_data
if __name__ == '__main__':
main()
|
996,375 | 819ebdeae8c47ca1569a8b04367e201bd9b28a90 | import os
import re
root = "/home/yuguess/ProcessFile/15min"
pattern =r"(.*)\.(DCE|CZC|SHF)"
def doFunc(path, fileName):
fullFile = path + "/" + fileName
print "remove ", fullFile
os.remove(fullFile)
#######################################
regex = re.compile(pattern)
for (path, dirs, files) in os.walk(root):
for fileName in files:
if regex.match(str(fileName)):
doFunc(path, fileName)
|
996,376 | 5774ebe788a0ed0b6d521fa13ada3fa660ed77be | #! /usr/bin/env python3
import numpy as np
import json
import os
import pathlib
import sys
# check that the PLAID dataset already exists
if not (os.path.exists("PLAID/") and os.path.isdir("PLAID/")):
print("PLAID not downloaded yet. Run `plaid_serializer.py`")
sys.exit()
if not (os.path.exists("numpy_arrays/") and os.path.isdir("PLAID/")):
print("numpy arrays not created yet. Run `plaid_serializer.py`")
sys.exit()
# make folders if needed
out_dir = 'barsim_et_al_data/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# collect metadata
metadata_filenames = [("PLAID/meta1.json", 'dataset1')] # only do dataset1 for now
metadata = {}
input_traces = 0
for infilename,datasetname in metadata_filenames:
with open(infilename, 'r') as infile:
metadata[datasetname] = json.load(infile)
input_traces += len(metadata[datasetname])
# determine output size based on data characteristics
phased_trace_count = 50
frequency = 30000
num_traces = input_traces * phased_trace_count # number of traces * duplicates (with phase offset)
data_len = int((frequency/60 * 2) + 3) # voltage & current, plus name, class and location
output_data = np.zeros((num_traces, data_len), dtype=float)
# lookup arrays for device name, class, and house names
name_map = []
class_map = []
house_map = []
# create maps
for datasetname in sorted(metadata.keys()):
for item in metadata[datasetname]:
# collect various fields
data_id = str(item['id'])
location = str(item['meta']['location'])
device_class = str(item['meta']['type']).replace(' ', '_')
device_state = str(item['meta']['instances']['status']).replace(' ', '_').replace('-', '_')
device_appliance = ''
for app_key in sorted(item['meta']['appliance'].keys()):
if app_key == 'notes':
continue
if device_appliance != '' and item['meta']['appliance'][app_key] != '':
device_appliance += '_'
device_appliance += item['meta']['appliance'][app_key].replace(' ', '_').replace('-', '_').replace('.', '_').replace('(', '').replace(')', '').replace('/', '')
if device_appliance == '':
device_appliance = 'unknown'
device_full_name = datasetname + '_' + location + '_' + device_appliance + '_' + device_class
# map device names
if device_full_name not in name_map:
name_map.append(device_full_name)
# map device classes
if device_class not in class_map:
class_map.append(device_class)
# map device locations
if location not in house_map:
house_map.append(location)
name_map.sort()
class_map.sort()
house_map.sort()
# iterate through data
output_index = 0
for datasetname in sorted(metadata.keys()):
for item in metadata[datasetname]:
# collect various fields
data_id = str(item['id'])
location = str(item['meta']['location'])
device_class = str(item['meta']['type']).replace(' ', '_')
device_state = str(item['meta']['instances']['status']).replace(' ', '_').replace('-', '_')
device_appliance = ''
for app_key in sorted(item['meta']['appliance'].keys()):
if app_key == 'notes':
continue
if device_appliance != '' and item['meta']['appliance'][app_key] != '':
device_appliance += '_'
device_appliance += item['meta']['appliance'][app_key].replace(' ', '_').replace('-', '_').replace('.', '_').replace('(', '').replace(')', '').replace('/', '')
if device_appliance == '':
device_appliance = 'unknown'
device_full_name = datasetname + '_' + location + '_' + device_appliance + '_' + device_class
# read input file
data_filename = 'numpy_arrays/' + data_id + '.npy'
data = np.load(data_filename)
print('Processing ' + data_filename)
#select last N full cycles from data
n_cycles = 2
voltage = data[:,1]
current = data[:,0]
period_len = int(frequency / 60)
zero_crossings = np.where(np.diff(np.signbit(voltage)))[0]
end = data.shape[0]
for z_cross in np.flip(zero_crossings, 0):
if voltage[z_cross - 1] < 0:
end = z_cross
break
two_periods = data[z_cross + 1 - period_len * n_cycles: z_cross + 1]
# create N phase-shifted "traces" out of that
traces = np.zeros((phased_trace_count, data_len-3))
phase_step = int((frequency/60)/phased_trace_count)
for step,trace_row in enumerate(traces):
offset = step*phase_step
trace_row[:] = two_periods[offset: period_len+offset, :].flatten(order='F')
# write traces to output data
for trace_row in traces:
output_data[output_index, :-3] = trace_row
output_data[output_index, -3] = house_map.index(location)
output_data[output_index, -2] = name_map.index(device_full_name)
output_data[output_index, -1] = class_map.index(device_class)
output_index += 1
# save data
np.save(out_dir + 'traces_bundle', output_data)
np.save(out_dir + 'traces_class_map', class_map)
np.save(out_dir + 'traces_name_map', name_map)
np.save(out_dir + 'traces_house_map', house_map)
|
996,377 | 9dadc553bcd52ef888e7eed4e6289f30671bebbd | # encoding: utf-8
import logging
import textwrap
import threading
from datetime import datetime
import npyscreen
from DictObject import DictObject
from npyscreen import Textfield
from npyscreen import wgwidget as widget
from pytg import Telegram
from pytg.exceptions import NoResponse
from pytg.utils import coroutine
from config import TELEGRAM_CLI_PATH, PUBKEY_FILE
TG = Telegram(telegram=TELEGRAM_CLI_PATH,
pubkey_file=PUBKEY_FILE)
logger = logging.getLogger("main")
class ChatBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.Textfield
class CustomPager(npyscreen.Pager):
def __init__(self, screen, autowrap=True, center=False, **keywords):
super().__init__(screen, **keywords)
self.autowrap = autowrap
self.center = center
self._values_cache_for_wrapping = []
self.widgets_inherit_color = True
self.color = 'DEFAULT'
self.lines_placed = False
def _wrap_message_lines(self, message_lines, line_length):
lines = []
if not self.lines_placed:
for line in message_lines:
if line.rstrip() == '':
lines.append('')
else:
if line.find('\n\t') != -1:
user_info, message_text = line.rsplit("\n\t", 1)
space = line_length - 1 - len(user_info)
name, timestamp = user_info.split('(')
message_header = "{}{}({}".format(name.strip(), '.' * space,
timestamp.strip())
lines.append("->{}".format(message_header))
else:
message_text = line
this_line_set = list(map(
lambda x: "\t\t\t\t{}".format(x),
textwrap.wrap(message_text.rstrip(), line_length - 5)))
if this_line_set:
lines.extend(this_line_set + [''])
else:
lines.append('')
else:
lines = message_lines
return lines
def _set_line_values(self, line, value_indexer):
try:
_vl = self.values[value_indexer]
except IndexError:
self._set_line_blank(line)
return False
except TypeError:
self._set_line_blank(line)
return False
line.value = self.display_value(_vl)
line.color = _vl.startswith('->') and 'CONTROL' or 'DEFAULT'
line.hidden = False
def h_scroll_line_down(self, input):
self.start_display_at += 1
if self.scroll_exit and self.height > len(self.values) - self.start_display_at:
self.editing = False
self.how_exited = widget.EXITED_DOWN
class HistoryBox(npyscreen.BoxTitle):
_contained_widget = CustomPager
class CustomRoundCheckBox(npyscreen.RoundCheckBox):
def _create_label_area(self, screen):
l_a_width = self.width - 3
if l_a_width < 1:
raise ValueError("Width of checkbox + label must be at least 6")
self.label_area = Textfield(screen, rely=self.rely, relx=self.relx + 3,
width=self.width - 3, value=self.name)
def update(self, clear=True):
super().update(clear=clear)
if self.hide: return True
if self.value:
cb_display = self.__class__.False_box
else:
cb_display = self.__class__.False_box
if self.do_colors():
self.parent.curses_pad.addstr(self.rely, self.relx, cb_display,
self.parent.theme_manager.findPair(self, 'CONTROL'))
else:
self.parent.curses_pad.addstr(self.rely, self.relx, cb_display)
self._update_label_area()
def calculate_area_needed(self):
return 0, 0
class CustomSelectOne(npyscreen.SelectOne):
_contained_widgets = CustomRoundCheckBox
class DialogBox(npyscreen.BoxTitle):
_contained_widget = CustomSelectOne
class PyGramForm(npyscreen.ActionFormExpanded):
CANCEL_BUTTON_BR_OFFSET = (5, 12)
OK_BUTTON_TEXT = "QUIT"
CANCEL_BUTTON_TEXT = "SEND"
FULL_NAME = '{} {}'.format(TG.sender.get_self().first_name, TG.sender.get_self().last_name)
def __init__(self, *args, **kwargs):
self.form_width = 30
super().__init__(*args, **kwargs)
self.current_peer = None
def on_ok(self):
ans = npyscreen.notify_yes_no('Are you sure, you want to quit?')
if ans:
TG.receiver.stop()
self.parentApp.switchForm(None)
def on_cancel(self):
""" Message will be send """
if self.dialog_list.entry_widget and self.dialog_list.entry_widget.value:
selected_index = self.dialog_list.entry_widget.value[0]
dialog_name = self.dialog_list.values[selected_index]
text = self.chat_box.entry_widget.value.strip()
if text:
send_status = TG.sender.send_msg(dialog_name, text)
if send_status:
self.chat_box.entry_widget.value = ""
self.load_history()
self.dialog_list.entry_widget.value = self.dialog_list.values.index(self.current_peer.print_name)
self.editw = self._widgets__.index(self.chat_box)
else:
npyscreen.notify_ok_cancel('Please select receiver first.')
def create(self):
self.dialog_list = self.add(DialogBox, name="Dialog List", scroll_exit=True,
editable=True, max_width=self.form_width, max_height=self._max_physical()[0] - 10)
self.load_dialogs()
self.dialog_list.add_handlers({'^D': self.load_history})
self.chat_history = self.add(HistoryBox, name="", scroll_exit=True,
editable=True, relx=self.form_width + 2, rely=2,
max_height=self._max_physical()[0] - 10)
self.chat_box = self.add(ChatBox, name='{}'.format(self.FULL_NAME), scroll_exit=True,
editable=True, max_height=5, contained_widget_arguments={'name': ' '})
self.start_receiver()
def start_receiver(self):
self.receiver_thread = threading.Thread(target=self.trigger_receiver)
self.receiver_thread.daemon = True
self.receiver_thread.start()
@coroutine
def message_loop(self):
try:
while True:
msg = (yield)
if msg.event != "message" or msg.own:
continue
else:
self.load_dialogs()
if self.dialog_list.entry_widget and self.dialog_list.entry_widget.value:
selected_index = self.dialog_list.entry_widget.value[0]
printed_name = self.dialog_list.values[selected_index]
current_dialog = list(filter(lambda x: x.print_name == printed_name,
self.parentApp.dialog_list))[0]
if ((current_dialog.peer_type == 'user' and current_dialog.peer_id == msg.sender.peer_id) or
(current_dialog.peer_type == 'chat' and
current_dialog.peer_id == msg.receiver.peer_id)):
self.load_history(trigger_movement=False)
except (GeneratorExit, KeyboardInterrupt, NoResponse):
pass
def trigger_receiver(self, *args, **keywords):
TG.receiver.start()
TG.receiver.message(self.message_loop())
def load_dialogs(self, *args, **keywords):
dialog_list = TG.sender.dialog_list(retry_connect=True)
self.parentApp.dialog_list = dialog_list
self.dialog_list.values = list(map(lambda x: x.print_name, self.parentApp.dialog_list))
def load_history(self, *args, **keywords):
selected_index = self.dialog_list.entry_widget.value[0]
printed_name = self.dialog_list.values[selected_index]
selected_dialog = list(filter(
lambda x: x.print_name == printed_name, self.parentApp.dialog_list))
if selected_dialog:
self.current_peer = selected_dialog[0]
self.chat_history.entry_widget.lines_placed = False
self.chat_history.name = (getattr(selected_dialog[0], 'title', None) or
"{} {}".format(getattr(selected_dialog[0], 'first_name', ''),
getattr(selected_dialog[0], 'last_name', '')))
self.chat_history.values = list(
filter(lambda x: x,
map(lambda x: (
'{} {} ({})\n\t{}'.format(getattr(getattr(x, 'from'), 'first_name', ''),
getattr(getattr(x, 'from'), 'last_name', ''),
datetime.fromtimestamp(getattr(x, 'date', '')),
(getattr(x, 'text', '') or
getattr(getattr(x, 'media', DictObject()), 'address', '')))),
TG.sender.history(printed_name, 100, 0, retry_connect=True))))
self.parentApp.fill_history()
self.find_next_editable()
self.editw -= 1
self.chat_history.entry_widget.lines_placed = True
self.chat_history.update()
self.chat_history.entry_widget.h_show_end(None)
self.find_next_editable()
self.editw -= 1
if keywords.get('trigger_movement', True):
# Force movement to history box
self.editw = 0
for wid in self._widgets__:
if wid == self.chat_box:
wid.how_exited = widget.EXITED_DOWN
self.editw = self._widgets__.index(wid)
self._widgets__[self.editw].editing = True
self._widgets__[self.editw].edit()
self._widgets__[self.editw].display()
break
wid.editing = False
wid.how_exited = widget.EXITED_DOWN
self.handle_exiting_widgets(wid.how_exited)
self.load_dialogs()
self.dialog_list.update()
class PyGramApp(npyscreen.NPSAppManaged):
dialog_list = []
contacts_list = []
def onStart(self):
self.dialog_list = TG.sender.dialog_list(retry_connect=True)
self.contacts_list = TG.sender.contacts_list()
self.addForm('MAIN', PyGramForm, name='Welcome PyGram')
def fill_history(self):
self.resetHistory()
if __name__ == "__main__":
logging.basicConfig(filename="./log/pygram-{}.log".format(datetime.now().date()))
PyGramApp().run()
|
996,378 | cfad506281de54a2a7123ad66d75171d53d925ac | #!/usr/local/bin/python
import math
from os import listdir
from os.path import isfile, join, realpath
import mmap
import sys
class WriteMapOfNames():
def __init__(self, imagepath):
self.imagepath = imagepath
self.images_aside_dir = []
self.images_cside_dir = []
self.images_aside = []
self.images_cside = []
self.images_aside_notfound = []
self.images_cside_notfound = []
self.count_found_aside = 0
self.count_notfound_aside = 0
self.count_found_cside = 0
self.count_notfound_cside = 0
def ReadAvailableCCDImages(self):
'Grabs the available .ccd files in some specified directory of darkcurrent readings (ccd files), and appends'
'the available image names to a list. These are the images that will then get a mapping to aramys names'
onlyfiles = [f for f in listdir(self.imagepath) if isfile(join(self.imagepath, f))]
for file in onlyfiles:
if file[4] == 'A':
self.images_aside_dir.append("BCAM "+file[0:-4]+"_1")
if file[4] == 'C':
self.images_cside_dir.append("BCAM "+file[0:-4]+"_1")
return
def CheckImagesAgainstSensorsFile(self):
'This is the function that actual maps the image name with the aramys name. aramys name is taken from'
'sensors.db.gen files used in the alignment. The output is three text files (a side, c side, both sides) that list'
'the image name and corresponding aramys name'
parse_dir = realpath(join(__file__ ,"../..")) #not sure if this works on windows, better options in python 3+
sensors_aside = open(parse_dir+'/ARAMyS_files/sensors.db.gen_aside', 'r')
sensors_cside = open(parse_dir+'/ARAMyS_files/sensors.db.gen_cside', 'r')
name_map_aside = open(parse_dir+'/GlobalPostions/NameMaps/name_map_aside.txt', 'w')
name_map_cside = open(parse_dir+'/GlobalPostions/NameMaps/name_map_cside.txt', 'w')
name_map = open(parse_dir+'/GlobalPostions/NameMaps/name_map.txt', 'w')
for image in self.images_aside_dir:
found = False
s_aside = mmap.mmap(sensors_aside.fileno(), 0, access=mmap.ACCESS_READ)
for line in iter(s_aside.readline, ""):
if line.find(image) != -1:
found = True
self.images_aside.append(line.strip().split()[2])
name_map_aside.write(line.strip().split()[1][:-2]+" "+line.strip().split()[2]+"\n")
name_map.write(line.strip().split()[1][:-2]+" "+line.strip().split()[2]+"\n")
break
else:
self.images_aside_notfound.append(line.strip().split()[1])
if found == True:
self.count_found_aside = self.count_found_aside + 1
else:
self.count_notfound_aside = self.count_notfound_aside + 1
for image in self.images_cside_dir:
found = False
s_cside = mmap.mmap(sensors_cside.fileno(), 0, access=mmap.ACCESS_READ)
for line in iter(s_cside.readline, ""):
if line.find(image) != -1:
found = True
self.images_cside.append(line.strip().split()[2])
name_map_cside.write(line.strip().split()[1][:-2]+" "+line.strip().split()[2]+"\n")
name_map.write(line.strip().split()[1][-2]+" "+line.strip().split()[2]+"\n")
break
else:
self.images_cside_notfound.append(line.strip().split()[1])
if found == True:
self.count_found_cside = self.count_found_cside + 1
else:
self.count_notfound_cside = self.count_notfound_cside + 1
return
class FindGlobalPositions():
def ReadPointsFiles(self):
'Function takes information from aramys files used in alignment and writes out parsed information about global'
'postions per aramys name. Output goes into GP_output dir, and there will be screen print outs too.'
parse_dir = realpath(join(__file__ ,"../..")) #not sure if this works on windows, better options in python 3+
sensors_aside = open(parse_dir+'/ARAMyS_files/sensors.db.gen_aside', 'r')
points_aside = open(parse_dir+'/ARAMyS_files/aside_points.txt', 'r')
points_cside = open(parse_dir+'/ARAMyS_files/cside_points.txt', 'r')
p_aside = mmap.mmap(points_aside.fileno(), 0, access=mmap.ACCESS_READ)
p_cside = mmap.mmap(points_cside.fileno(), 0, access=mmap.ACCESS_READ)
baz_aside_pos = open(parse_dir+'/GlobalPostions/GP_output/baz_aside_pos.txt', 'w')
baz_cside_pos = open(parse_dir+'/GlobalPostions/GP_output/baz_cside_pos.txt', 'w')
bpl_aside_pos = open(parse_dir+'/GlobalPostions/GP_output/bpl_aside_pos.txt', 'w')
bpl_cside_pos = open(parse_dir+'/GlobalPostions/GP_output/bpl_cside_pos.txt', 'w')
space = " "
for line in points_aside:
line = line.strip().split()
name = line[7]
# if name.find('baz') != -1 and name.find('aei') != -1 and name[-4:] == "c2_p":
if name.find('baz') != -1 and name[-2:] == "_p":
# if name[-4:] == "c2_p":
x_pos = float(line[1])
y_pos = float(line[2])
z_pos = float(line[3])
r2 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) )
r3 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) + (z_pos)*(z_pos) )
# print line
print "name = "+name+", x = "+str(x_pos)+", y = "+str(y_pos)+", z = "+str(z_pos)+", r2 = "+str(r2)+", r3 = "+str(r3)
baz_aside_pos.write(name+space+str(x_pos)+space+str(y_pos)+space+str(z_pos)+space+str(r2)+space+str(r3)+"\n")
# if name.find('baz') != -1 and name.find('aei') != -1 and name[-4:] == "c2_p":
if name.find('bpl') != -1 and name[-2:] == "_p":
# if name[-4:] == "c2_p":
x_pos = float(line[1])
y_pos = float(line[2])
z_pos = float(line[3])
r2 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) )
r3 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) + (z_pos)*(z_pos) )
# print line
print "name = "+name+", x = "+str(x_pos)+", y = "+str(y_pos)+", z = "+str(z_pos)+", r2 = "+str(r2)+", r3 = "+str(r3)
bpl_aside_pos.write(name+space+str(x_pos)+space+str(y_pos)+space+str(z_pos)+space+str(r2)+space+str(r3)+"\n")
for line in points_cside:
line = line.strip().split()
name = line[7]
# if name.find('baz') != -1 and name.find('aei') != -1 and name[-4:] == "c2_p":
if name.find('baz') != -1 and name[-2:] == "_p":
# if name[-4:] == "c2_p":
x_pos = float(line[1])
y_pos = float(line[2])
z_pos = float(line[3])
r2 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) )
r3 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) + (z_pos)*(z_pos) )
# print line
print "name = "+name+", x = "+str(x_pos)+", y = "+str(y_pos)+", z = "+str(z_pos)+", r2 = "+str(r2)+", r3 = "+str(r3)
baz_cside_pos.write(name+space+str(x_pos)+space+str(y_pos)+space+str(z_pos)+space+str(r2)+space+str(r3)+"\n")
# if name.find('baz') != -1 and name.find('aei') != -1 and name[-4:] == "c2_p":
if name.find('bpl') != -1 and name[-2:] == "_p":
# if name[-4:] == "c2_p":
x_pos = float(line[1])
y_pos = float(line[2])
z_pos = float(line[3])
r2 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) )
r3 = math.sqrt( (x_pos)*(x_pos) + (y_pos)*(y_pos) + (z_pos)*(z_pos) )
# print line
print "name = "+name+", x = "+str(x_pos)+", y = "+str(y_pos)+", z = "+str(z_pos)+", r2 = "+str(r2)+", r3 = "+str(r3)
bpl_cside_pos.write(name+space+str(x_pos)+space+str(y_pos)+space+str(z_pos)+space+str(r2)+space+str(r3)+"\n")
def main():
#Execute all three below in sequence to recreate name files, should put this in a wrapper
#w = WriteMapOfNames("/Users/dhaliwal/Dropbox/Work/Alignment/DarkCurrent/darkcurrent/DC_Images/May19")
#w.ReadAvailableCCDImages()
#w.CheckImagesAgainstSensorsFile()
#Execute below for creating global postions file
r = FindGlobalPositions()
r.ReadPointsFiles()
if __name__ == "__main__":
main()
|
996,379 | b53857a665f9a14f6d46c32312b7f8dfbdd0cd51 | # Generated by Django 2.1.2 on 2018-12-10 10:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aids", "0049_auto_20181115_1606"),
]
operations = [
migrations.AddField(
model_name="aid",
name="is_imported",
field=models.BooleanField(
default=False, verbose_name="Is imported from a third-party?"
),
),
]
|
996,380 | 736b674c67b2620845910652d085ad1290a41560 | '''i
Plot fss_ensemble scores for RRA domain for different forecast initializations.
'''
import os
import sys
sys.path.insert(0, '..')
from common import util as utcom
import util as ut
import util_plot as utplot
import time
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
def plt_env(fig, ax, opts):
yticks = np.round(np.arange(0,1.2,0.2),1)
fs = opts['fs']
letters = opts['letters']
fstitle = opts['fstitle']
plt.subplots_adjust(wspace=0, hspace=0.1)
# Y-axis
k = 0
for i in range(ax.shape[0]):
ax[i, 0].set_ylabel('FSS', fontsize=fs)
ax[i, 0].set_yticklabels(yticks,fontsize=fs)
for j in range(ax.shape[1]):
ax[i, j].set_ylim([0 ,1])
# Subplot title
ax[i, j].set_title(opts['inits'][k] + ' UTC initialization', fontsize=fstitle, weight='bold')
ax[i, j].grid(which='both', color='lightgray')
ax[i, j].text(-0.1, 0.98, letters[k], horizontalalignment='left', verticalalignment='top', fontsize=fstitle)
k += 1
return fig, ax
def plt_fss(plt_type, data, alpha, param, fig, ax, opts, used=False):
inits = [*data.keys()]
thrs = [*data[inits[0]].keys()]
scales = [*data[inits[0]][thrs[0]].keys()]
fs = opts['fs']
lw = opts['lw']
if plt_type == 'boxes':
x = opts['leads']
xticks = x
xlabel = 'Forecast lead time (hrs)'
elif plt_type == 'leads':
x = opts['scales']
xticks = opts['xticks']
xlabel = 'Spatial scale (km)'
# Plot
j = 0
k = 0
for i, finit in enumerate(inits):
if len(inits) == 1:
if not used:
iax = ax[0, 1]
else:
iax = ax[1, 0]
else:
if i == ax.shape[1]:
j = 1
k = 0
iax = ax[j, k]
if j == 1:
iax.set_xticklabels(xticks, fontsize=fs)
iax.set_xlabel(xlabel, fontsize=fs)
k += 1
for thr, a in zip(thrs, opts['colors']):
#iax.set_prop_cycle(cycler(color=opts['colors']))#linestyle=opts['linestyles']))
for scale, b in zip(scales, ['o', 's']): #['8','4']):
if used and plt_type == 'boxes':
x = opts['leads'][:-1]
y = data[finit][thr][scale][1:]
elif plt_type == 'leads':
y = data[finit][thr][scale][::-1]
else:
y = data[finit][thr][scale]
iax.plot(x, y, lw=lw, color=a, ls=param, alpha=alpha, marker=b)#, markersize=b, color=a, ls=param, alpha=alpha)
return fig, ax
MODELS = {'wrf': {'init': ['00', '06', '12', '18'] },
'gfs': {'init': ['06']}
}
SCORES = ['FSS'] #List of scores to plot
TYPE = 'ens' #Type of data used to compute score: det (ensemble mean), ens
SCALES = [85, 5] #Spatial scales (in pixels) to plot
THRESHOLDS = [1, 25] #Thresholds to plot
ACUM = 6 #Period of accumulation in hours
FCSTLENGTH = 36 #Fcst length in hours
FCSTFREQ = 3600 #Fcst output frequency in seconds
DOMAIN = 'RRA' #Predefined domain to interpolate data to
FIGTITLE = 'paper'
# --------------------------------------------------------- #
basedir = '/home/paula.maldonado/datosalertar1/RRA_VERIF'
basedir = basedir + '/verif_data/RRA_6hr_accumulated'
OUTDIR = basedir.replace("verif_data","figures") + '/' + FIGTITLE + '/' + TYPE + '_' + SCORES[0].lower()
os.makedirs(OUTDIR, exist_ok=True)
# Auxiliary vars
npz = TYPE + '_spatial_scores_initializations'
pngout = FIGTITLE + '_' + npz + '_2x2'
fcst_leads = [str(i).zfill(2) for i in np.arange(ACUM, FCSTLENGTH+ACUM, int(FCSTLENGTH/ACUM))]
# Plot vars
figsize = [10, 10]
plot_opts = {'colors':['black', 'gray', 'red', 'lightgrey'],
'linestyles': ['-', '--', '-.', ':'],
'lw': 2,
'fs': 12,
'fstitle': 14,
'xticks': np.arange(-200,1000,200),
'acum': ACUM,
'inits': ['00', '06', '12', '18'],
'leads': fcst_leads,
'letters': utcom.get_alphabet(4, 'bracket2')
}
start = time.time()
# Load data and reorganize it to plot
print('---------------------------')
print('LOADING DATA')
print('---------------------------')
for model in MODELS.keys():
print(model)
inits = MODELS[model]['init']
# Load data from npz file (we are loading a dict!)
DATADIR = basedir + '/' + model
filein = DATADIR + '/' + npz + '.npz'
data = np.load(filein, mmap_mode='r', allow_pickle=True)['scores'].item()
scales = np.load(filein, mmap_mode='r')['scales']
thrs = np.load(filein, mmap_mode='r')['thrs']
idxs_sc = [scales.tolist().index(i) for i in SCALES]
idxs_th = [thrs.tolist().index(i) for i in THRESHOLDS]
plot_opts['scales'] = [i*10 for i in scales[::-1]]
# Initialize variables
boxes = ut.init_dict(inits, THRESHOLDS, SCALES)
# Get scores to plot for each initialization
for finit in inits:
for ithr, thr in zip(idxs_th, THRESHOLDS):
for iscale, scale in zip(idxs_sc, SCALES):
boxes[finit][thr][scale] = [data[finit][flead][iscale,ithr] for flead in fcst_leads]
MODELS[model]['boxes'] = boxes
# Begin plot
print('---------------------------')
print('PLOTTING SCORES')
print('---------------------------')
# Create figure with varying BOX SIZE
fig, ax = plt.subplots(2, 2, figsize=figsize, sharey='row', sharex='col')
fig, ax = plt_env(fig, ax, plot_opts)
fig, ax = plt_fss('boxes', MODELS['wrf']['boxes'], 1, '-',fig, ax, plot_opts)
fig, ax = plt_fss('boxes', MODELS['gfs']['boxes'], 1, '--', fig, ax, plot_opts)
fig, ax = plt_fss('boxes', MODELS['gfs']['boxes'], 1, '--', fig, ax, plot_opts, used=True)
# Legend
# Extra lines for exps
for ls in plot_opts['linestyles'][:len(THRESHOLDS)]:
ax[0, 0].plot([], [], color=plot_opts['colors'][3], linewidth=plot_opts['lw'], linestyle=ls)
# Extra lines for exps
for ms in ['o','s']: #[8,4]:#plot_opts['linestyles'][:len(THRESHOLDS)]:
ax[0, 0].plot([], [], color=plot_opts['colors'][3], linewidth=plot_opts['lw'], linestyle='-', marker=ms)#'o', markersize=ms)
# Extra lines for thresholds
for color in plot_opts['colors'][:len(THRESHOLDS)]:
ax[0, 0].plot([], [], color=color, linewidth=plot_opts['lw'], linestyle='-')
lines = ax[0, 0].get_lines()
labels = ['RRAF', 'NoRegDA'] + [str(i) + '0km box-size' for i in SCALES] + ['1mm thr', '25mm thr']# ' 5mm thr', '10mm thr', '25mm thr']
idx = np.arange(-6,0).tolist()
ax[1, 1].legend([lines[i] for i in idx], labels,loc=4, handlelength=1.8, prop={'size': plot_opts['fs']}, edgecolor='k', ncol=3)#, bbox_to_anchor=(1.55, 0.6))
# Save figure
fileout = OUTDIR + '/' + pngout + '_boxes'
print('Saving file: ', fileout)
plt.savefig(fileout + '.png', bbox_inches='tight')
plt.savefig(fileout + '.eps', bbox_inches='tight')
plt.close()
|
996,381 | cba61a3c7331342ee69cce11d34d76afb76bab94 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import rospy
import numpy as np
import tf
from matplotlib import pyplot as plt
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Imu
import transformations
import smach
import smach_ros
import biblio as bib
import cormodule
bridge = CvBridge()
cv_image = None
# Variáveis para permitir que o roda_todo_frame troque dados com a máquina de estados
contador = 0
aprendeu = 0
start = 0
delay = 2e9
frame = None
## Flags ####
fuga = False
media_cor = []
centro = []
area = 0.0
# Atraso máximo permitido entre a imagem sair do Turbletbot3 e chegar no laptop do aluno
atraso = 0.5E9
check_delay = False # Só usar se os relógios ROS da Raspberry e do Linux desktop estiverem sincronizados
#------------Configuracao do SIFT ----------
MIN_MATCH_COUNT = 40
img1 = cv2.imread('alac25.jpg',0)# Imagem a procurar
sift = cv2.xfeatures2d.SIFT_create()
def new_sift_image(img1):
kp1, des1 = sift.detectAndCompute(img1,None)
return kp1,des1
kp1,des1 = new_sift_image(img1)
ok = True
#------------ Configuracao do tracker -------------
#Seleciona o tipo de tracking algorithm
def create_tracker():
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker_type = tracker_types[4]
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
return tracker,tracker_type
tracker,tracker_type = create_tracker()
#Primeiras coordenadas da Bounding box (manualmente)
bbox = (0, 0, 0, 0) #Caixa inicial((topo esquerdo),largura,altura)
aprendendo = True
contadois = 0
obj = bib.objetoo()
def vai(frame, contador):
global aprendendo
global contadois, des1,kp1, img1
contadois += 1
# print("ENTRO")
# print(contadois)
# print(aprendendo)
# contadois += 1
if aprendendo == False:
global ok,tracker,tracker_type,bbox, des1,kp1, img1
if(contador == 0): #Every time the counter gets reset Try to find the café extra forte
# Copy the image to leave the colored one to be used as output
frame_gray = frame.copy()
# Convert the frame to grayscale
frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_BGR2GRAY)
#Actual sift run
kp2, des2 = sift.detectAndCompute(frame_gray,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
# Configura o algoritmo de casamento de features
flann = cv2.FlannBasedMatcher(index_params, search_params)
# Tenta fazer a melhor comparacao usando o algoritmo
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
font = cv2.FONT_HERSHEY_SIMPLEX
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
# Tenta achar uma trasformacao composta de rotacao, translacao e escala que situe uma imagem na outra
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
#Transforma-os em pontos no espaço
h,w,_ = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
# Transforma os pontos da imagem origem para onde estao na imagem destino
dst = np.int32(cv2.perspectiveTransform(pts,M))
# Desenha as linhas
#cv2.polylines(frame,dst,True,(0,0,255),3, cv2.LINE_AA)
#desenha o centro do polígono
#top_left = dst[0][0]
#top_right = dst[3][0]
#bot_right = dst[2][0]
#bot_left = dst[1][0]
all_x = dst[:,0,0]
all_y = dst[:,0,1]
maxY = np.max(all_y)
minY = np.min(all_y)
maxX = np.max(all_x)
minX = np.min(all_x)
cv2.circle(frame, (minX,minY), 15, (0, 255, 0), 6)
cv2.circle(frame,(maxX,maxY) , 15, (255, 0, 255), 6)
if((maxX-minX)> 15 and (maxY-minY)>15):
bbox = (minX,minY,(maxX-minX),(maxY-minY))
tracker, tracker_type = create_tracker()
ok = tracker.init(frame,bbox)
else:
pass
# print("IIhh rapah")
# print(bbox)
cv2.imshow("Tracking", frame)
#print(ok, "Qualqure")
#cv2.rectangle(frame, (minX,maxY), (maxX,minY), (255,0,0), 2, 1)
pol_y = np.int32((dst[1][0][1] - dst[0][0][1])/2 + dst[0][0][1])
pol_x = np.int32((dst[3][0][1] - dst[0][0][0])/2 + dst[0][0][1])
else:
if ok:
ok, bbox = tracker.update(frame)
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (0,255,0), 3, 3)
#Bota um circulo no centro da box
coordx = int(p2[0]+((p1[0]-p2[0])/2))
coordy = int(p2[1]+(p1[1]-p2[1])/2)
cv2.circle(frame,(coordx,coordy),2,(255,0,0),1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
bbox = (0,0,0,0)
# Display result
cv2.imshow("Tracking", frame)
k = cv2.waitKey(1) & 0xff
if k == 27 :
cap.release()
cv2.destroyAllWindows()
else: # Read a new frame for 30 times
fram2 = frame.copy()
if contadois != 0:
# print("Entroooooooooooooooooooooooooooooooooooo")
if contadois%1 == 0:
obj.learnbackground(fram2)
# print("AGHAGHAGHGAHGHAGHGAHHAAHGHAGHAHAA")
if contadois%60 == 0:
# print("GAKLJGHEYRWFBUVALDVB,RQ EUY ELVACD")
obj.learnobject(fram2)
kp1, des1 = sift.detectAndCompute(obj.objeto,None)
img1 = obj.objeto
cv2.imshow("objetp",obj.objeto)
aprendendo = False
cv2.imshow("Tracking", frame)
k = cv2.waitKey(1) & 0xff
if k == 27 :
cap.release()
cv2.destroyAllWindows()
def roda_todo_frame(imagem):
global objeto,kp1,des1
global bbox,contador,frame
global media_cor,centro,area,fuga
# print("New Frame")
now = rospy.get_rostime()
imgtime = imagem.header.stamp
lag = now-imgtime
delay = lag.nsecs
if delay > atraso and check_delay==True:
return #Ou seja, para a função e descarta o frame
try:
frame = bridge.compressed_imgmsg_to_cv2(imagem, "bgr8")
frame_cor = frame.copy()
media_cor, area = cormodule.identifica_cor(frame_cor)
# print(area)
if area > 14000:
fuga = True
else:
fuga = False
vai(frame,contador)
contador += 1
if contador > 2:
contador = 0
except CvBridgeError as e:
print('ex', e)
# --------------------------------Survival---------------------------------------------------
mini = [10, 0]
angulo = 0
bateu = False
desvia = False
minimo = 0.3
tmp = 0
crash = []
media = 0
diff = 0
i = 0
def scaneou(dado):
global mini
global desvia
global minimo
print(desvia)
mini = [dado.range_max, 0]
lelescan=np.array(dado.ranges).round(decimals=2)
for i in range(len(lelescan)):
if lelescan[i] >= dado.range_min:
pass
if mini[0] > lelescan[i]:
mini = [lelescan[i],i]
if mini[0]<minimo:########ftfufutf
desvia=True
def leu_imu(dado):
global angulo
global crash
global bateu
global media
global diff
global i
# print(bateu)
quat = dado.orientation
lista = [quat.x, quat.y, quat.z, quat.w]
angulos = np.degrees(transformations.euler_from_quaternion(lista))
if len(crash) < 5 :
crash.append(dado.linear_acceleration.x)
else:
crash[i] = dado.linear_acceleration.x
#print(crash)
i += 1
if i == 4:
i = 0
angulo =math.degrees(math.atan2(dado.linear_acceleration.x , dado.linear_acceleration.y))
media = np.mean(crash)
diff = abs(crash[-1] - media)
if diff >= 3.5:
bateu = True
def tempo_de_batida(t = None):
global tmp
if t == None:
if float(tmp - rospy.get_rostime().secs )<= 0:
# print ("1")
return False
else:
tmp = rospy.get_rostime().secs
rospy.sleep(t)
return True
def Bateu(angulo,diff):
global bateu
# print("Bateu")
if angulo>=100:
velocidade = Twist(Vector3(-2, 0, 0), Vector3(0, 0, 2))
velocidade_saida.publish(velocidade)
tempo_de_batida(1.5)
elif angulo >80 and angulo < 100:
velocidade = Twist(Vector3(-2, 0, 0), Vector3(0, 0, 2))
velocidade_saida.publish(velocidade)
tempo_de_batida(2)
elif angulo <=80:
velocidade = Twist(Vector3(-2, 0, 0), Vector3(0, 0, -2))
velocidade_saida.publish(velocidade)
tempo_de_batida(1.5)
def Dont(dire):
if (dire==0):
velocidade = Twist(Vector3(-0.2, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(velocidade)
rospy.sleep(0.7)
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0.5))
velocidade_saida.publish(velocidade)
rospy.sleep(0.5)
else:
velocidade = Twist(Vector3(-0.2, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(velocidade)
rospy.sleep(0.2)
velocidade = Twist(Vector3(0, 0, 0), Vector3(0, 0, dire*2))
velocidade_saida.publish(velocidade)
rospy.sleep(0.3)
velocidade = Twist(Vector3(0.2, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(velocidade)
rospy.sleep(0.3)
def desviando(mini):
global bateu
global desvia
if bateu:
Bateu(angulo,diff)
bateu = False
if desvia:
if (mini[1] <= 360 and mini[1] > 320) or (mini[1] < 40 and mini[1] >= 0):
Dont(0)
desvia=True
if (mini[1] <= 360 and mini[1] > 288):
Dont(1)
desvia=True
elif (mini[1] < 72 and mini[1] >= 0):
Dont(-1)
desvia=True
else:
desvia=False
if desvia:
return "sobreviva"
else:
return "ufa"
#----------------------------------------------------------------------------------------------------
## Classes - estados
class aprendizado(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['aprendendo', 'aprendi'])
def execute(self, userdata):
global velocidade_saida,bbox
rospy.sleep(0.5)
if aprendendo == True:
vel = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(vel)
return 'aprendendo'
else:
return 'aprendi'
class Procura(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['achou', 'girando','aprendendo','fugindo','sobreviva'])
def execute(self, userdata):
global velocidade_saida,bbox,centro,area,bateu
if bateu:
Bateu(angulo,diff)
bateu = False
rospy.sleep(0.01)
if aprendendo == False:
if desvia:
return 'sobreviva'
if(fuga == True):
return 'fugindo'
if bbox == (0,0,0,0):
vel = Twist(Vector3(0, 0, 0), Vector3(0, 0, -0.3))
velocidade_saida.publish(vel)
return 'girando'
else:
vel = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))
velocidade_saida.publish(vel)
return 'achou'
else:
return 'aprendendo'
class Fugindo(smach.State):
def __init__(self):
smach.State.__init__(self,outcomes=['fugi','fugindo','aprendendo','sobreviva'])
def execute(self, userdata):
global media_cor,area,fuga,velocidade_saida,bateu
if aprendendo == False:
if desvia:
return 'sobreviva'
if bateu:
Bateu(angulo,diff)
bateu = False
x = media_cor[0]
y = media_cor[1]
rospy.sleep(0.01)
if area < 10000:
fuga = False
return 'fugi'
else:
#print("caaarefulling!")
vel = Twist(Vector3(-0.5,0,0),Vector3(0,0,-(x-320)/300))
if(x < 280):
vel = Twist(Vector3(-0.1,0,0),Vector3(0,0,(280-x)/200))
elif(x > 380):
vel = Twist(Vector3(-0.1,0,0), Vector3(0,0,-(x-380)/200))
velocidade_saida.publish(vel)
return 'fugindo'
else:
return 'aprendendo'
class Seguindo(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['fugindo','seguindo','aprendendo', 'cheguei', 'perdi','sobreviva'])
def execute(self, userdata):
global velocidade_saida,bbox,bateu
if aprendendo:
return 'aprendendo'
if bateu:
Bateu(angulo,diff)
bateu = False
if desvia:
return 'sobreviva'
if(fuga == True):
return 'fugindo'
rospy.sleep(0.01)
# print bbox
if bbox == (0,0,0,0):
return 'perdi'
else:
centro = ((bbox[0] + bbox[2]/2),(bbox[1]+ bbox[-1]/2))
if(bbox[-1] > 300):
# print("And now we rest",bbox[-1])
vel = Twist(Vector3(0,0,0),Vector3(0,0,0))
velocidade_saida.publish(vel)
return 'cheguei'
else:
# print("Foward we gooo!")
vel = Twist(Vector3(0.5,0,0),Vector3(0,0,-(centro[0]-320)/300))
if(centro[0] < 280):
vel = Twist(Vector3(0.1,0,0),Vector3(0,0,(280-centro[0])/200))
elif(centro[0] > 380):
vel = Twist(Vector3(0.1,0,0), Vector3(0,0,-(centro[0]-380)/200))
velocidade_saida.publish(vel)
return 'seguindo'
class Survival(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['sobreviva','ufa', 'aprendendo'])
def execute(self, userdata):
rospy.sleep(0.01)
if aprendendo:
return 'aprendendo'
return desviando(mini)
# main
def main():
global velocidade_saida
global buffer
rospy.init_node('cf_estados')
# Para usar a webcam
#recebedor = rospy.Subscriber("/cv_camera/image_raw/compressed", CompressedImage, roda_todo_frame, queue_size=1, buff_size = 2**24)
start = rospy.get_rostime()
recebedor = rospy.Subscriber("/raspicam_node/image/compressed", CompressedImage, roda_todo_frame, queue_size=10, buff_size = 2**24)
velocidade_saida = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
recebe_scan = rospy.Subscriber("/scan", LaserScan, scaneou)
recebe_scan2 = rospy.Subscriber("/imu", Imu, leu_imu, queue_size =1)
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['terminei'])
# Open the container
with sm:
smach.StateMachine.add('APRENDENDO', aprendizado(),
transitions={'aprendendo': 'APRENDENDO',
'aprendi':'PROCURANDO'})
smach.StateMachine.add('PROCURANDO', Procura(),
transitions={'aprendendo': 'APRENDENDO','girando': 'PROCURANDO',
'achou':'SEGUINDO', 'fugindo':'FUGINDO','sobreviva':'SOBREVIVA'})
smach.StateMachine.add('SEGUINDO', Seguindo(),
transitions={'aprendendo': 'APRENDENDO','perdi': 'PROCURANDO',
'cheguei':'SEGUINDO', 'seguindo':'SEGUINDO','fugindo':'FUGINDO','sobreviva':'SOBREVIVA'})
smach.StateMachine.add('FUGINDO', Fugindo(),
transitions={'aprendendo': 'APRENDENDO','fugindo': 'FUGINDO',
'fugi':'PROCURANDO','sobreviva':'SOBREVIVA'})
smach.StateMachine.add('SOBREVIVA', Survival(),
transitions={'aprendendo': 'APRENDENDO','sobreviva':'SOBREVIVA','ufa': 'PROCURANDO'})
# Execute SMACH plan
outcome = sm.execute()
#rospy.spin()
if __name__ == '__main__':
main()
|
996,382 | bfd11a7cdb4579e43f980e4e08f2df84a1d4e850 | import base64
import hmac
from functools import wraps
from hashlib import sha256
from os import getenv
from urllib.parse import urljoin
import httpx
from sanic import Sanic
from sanic.response import json
from sanic_prometheus import monitor
from sentry_sdk import init as sentry_init
from sentry_sdk.integrations.sanic import SanicIntegration
SENTRY_DSN = getenv("SENTRY_DSN")
if SENTRY_DSN:
sentry_init(dsn=SENTRY_DSN, integrations=[SanicIntegration()])
ID_NUMBER_PROMPT = (
"MomConnect thanks you. Please reply with your ID number. This reply message is "
"free & won't cost you anything. We need your ID number to make sure it's you"
)
THANKS_REPLY = (
"Thank you for your time! If you qualify for the benefit, you'll receive a message "
"from The Solidarity Fund. Please don't share your banking details with anyone"
)
app = Sanic(__name__)
http_client = httpx.Client()
def validate_hmac(f):
@wraps(f)
async def decorated_function(request, *args, **kwargs):
try:
signature = request.headers["X-Turn-Hook-Signature"]
assert signature
except (KeyError, AssertionError):
return json({"authorization": "X-Turn-Hook-Signature header required"}, 401)
h = hmac.new(getenv("TURN_HMAC_SECRET").encode(), request.body, sha256)
if not hmac.compare_digest(base64.b64encode(h.digest()), signature.encode()):
return json(
{"authorization": "Invalid value for X-Turn-Hook-Signature"}, 403
)
return await f(request, *args, **kwargs)
return decorated_function
@app.route("/")
async def health(request):
return json({"status": "ok"})
async def send_sms(to_addr, claim_id, body):
headers = {
"Authorization": "Bearer {}".format(getenv("TURN_TOKEN")),
"Content-Type": "application/json",
"x-turn-fallback-channel": "1",
"X-Turn-Claim-Extend": claim_id,
}
data = {
"preview_url": False,
"recipient_type": "individual",
"to": to_addr,
"type": "text",
"text": {"body": body},
}
return await http_client.post(
urljoin(getenv("TURN_URL"), "v1/messages"), headers=headers, json=data
)
@app.route("/", methods=["POST"])
@validate_hmac
async def message(request):
claim_id = request.headers.get("X-Turn-Claim")
for message in request.json.get("messages", []):
text_body = message.get("text", {}).get("body", "").lower().strip()
wa_id = message.get("from")
if text_body == "fund":
await send_sms(to_addr=wa_id, claim_id=claim_id, body=ID_NUMBER_PROMPT)
return json({})
if __name__ == "__main__":
monitor(app).expose_endpoint()
app.run()
|
996,383 | f09bbff2db06b3bfcbda8b8052f6f0995f1b7c2e | # _*_coding:utf-8 _*_
# @Time :2018/12/31 16:39
# @Author :sunny
# @Email :602992114@qq.com
# function :数据库操作封装类
"""
1:连接数据库
2:辨析额一个SQL
3:建立游标
4:执行
"""
import pymysql
from api_test_sunny.common.config import ConfigLoader
class MySqlUtil:
def __init__(self):
config=ConfigLoader()
host=config.get("mysql","host")
port = config.getint("mysql", "port")
user = config.get("mysql", "usr")
password = config.get("mysql", "pwd")
self.mysql=pymysql.connect(host=host, user=user, password=password,
port=port,cursorclass=pymysql.cursors.DictCursor)
def fetch_one(self,sql): # 查询一条数据并返回
cursor=self.mysql.cursor()
cursor.execute(sql) # 根据sql进行查询
return cursor.fetchone() # 返回元组()
def fetch_all(self,sql): # 查询一条数据并返回
cursor=self.mysql.cursor()
cursor.execute(sql)
return cursor.fetchall() # 返回元组嵌套元组,((),())
def close(self): # 关闭数据库
self.mysql.close()
if __name__ == '__main__':
sql = "select MobilePhone from future.member where MobilePhone != '' ORDER BY MobilePhone DESC LIMIT 1"
print(sql)
mysql=MySqlUtil()
data=mysql.fetch_one(sql)
print(type(data),data)
# max_mobilephone=int(data[0])+1
# print(max_mobilephone)
print(data["MobilePhone"])
|
996,384 | e5fbaeb3a0bf256800ee52f127d01415762df9f8 | """
Signal-Noise_GTC_comparisons.py
Author: Benjamin Floyd
Creates corner plots comparing chains, mostly within a trial but some between.
"""
import re
import emcee
import h5py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import AutoMinorLocator
import pygtc
def keyfunct(n):
return re.search(r'_(\d+\.\d+)_', n).group(1)
def get_theta_list(name_list):
return [float(keyfunct(chain_name)) for chain_name in name_list]
def get_flat_chains(samplers):
flat_chains = []
for sampler in samplers:
# Get the chain from the sampler
samples = sampler.get_chain()
# To get the number of iterations ran, number of walkers used, and the number of parameters measured
nsteps, nwalkers, ndim = samples.shape
try:
# Calculate the autocorrelation time
tau_est = sampler.get_autocorr_time()
tau = np.mean(tau_est)
# Remove the burn-in. We'll use ~3x the autocorrelation time
burnin = int(3 * tau)
# We will also thin by roughly half our autocorrelation time
thinning = int(tau // 2)
except emcee.autocorr.AutocorrError:
tau_est = sampler.get_autocorr_time(quiet=True)
tau = np.mean(tau_est)
burnin = int(nsteps // 3)
thinning = 1
flat_samples = sampler.get_chain(discard=burnin, thin=thinning, flat=True)
flat_chains.append(flat_samples)
return flat_chains
# True parameter values
theta_true = None # Amplitude.
eta_true = 1.2 # Redshift slope
beta_true = 0.5 # Radial slope
zeta_true = -1.0 # Mass slope
C_true = 0.371 # Background AGN surface density
labels = [r'$\theta$', r'$\eta$', r'$\zeta$', r'$\beta$', r'$C$']
truths = [theta_true, eta_true, zeta_true, beta_true, C_true]
filename = 'Data/MCMC/Mock_Catalog/Chains/signal-noise_tests/' \
'emcee_run_w30_s1000000_mock_tvariable_e1.2_z-1.0_b0.5_C0.371_full_spt_snr_tests.h5'
# Get the chain names
with h5py.File(filename, 'r') as f:
chain_names = list(f.keys())
#%% Trial 2 Trends
# Load in the trial 2 chains
trial2_names = [chain_name for chain_name in chain_names if 'trial2' in chain_name]
trial2_samplers = [emcee.backends.HDFBackend(filename, name=chain_name) for chain_name in trial2_names]
# Get the flat chains
trial2_flat_chains = get_flat_chains(trial2_samplers)
# Keep only the eta and zeta portions
trial2_eta_zeta_chains = [chain[:, 1:3] for chain in trial2_flat_chains]
# Make plot
chain_labels = [r'$\theta = {theta}$'.format(theta=theta) for theta in get_theta_list(trial2_names)]
trial2_fig = pygtc.plotGTC(trial2_eta_zeta_chains, paramNames=labels[1:3], chainLabels=chain_labels,
truths=truths[1:3], figureSize=8)
trial2_fig.suptitle('Trial 2')
trial2_fig.show()
#%% Trial 3 Trends
# Load in the trial 3 chains
trial3_names = [chain_name for chain_name in chain_names if 'trial3' in chain_name and '2.000' not in chain_name]
trial3_samplers = [emcee.backends.HDFBackend(filename, name=chain_name) for chain_name in trial3_names]
# Get the flat chains
trial3_flat_chains = get_flat_chains(trial3_samplers)
# Keep only the eta and zeta portions
trial3_eta_zeta_chains = [chain[:, 1:3] for chain in trial3_flat_chains]
# Make plot
chain_labels = [r'$\theta = {theta}$'.format(theta=theta) for theta in get_theta_list(trial3_names)]
trial3_fig = pygtc.plotGTC(trial3_eta_zeta_chains, paramNames=labels[1:3], chainLabels=chain_labels,
truths=truths[1:3], figureSize=8)
trial3_fig.suptitle('Trial 3')
trial3_fig.show()
#%% Trial 4 Trends
# Load in the trial 4 chains
trial4_names = [chain_name for chain_name in chain_names if 'trial4' in chain_name]
trial4_samplers = [emcee.backends.HDFBackend(filename, name=chain_name) for chain_name in trial4_names]
# Get the flat chains
trial4_flat_chains = get_flat_chains(trial4_samplers)
# Match the trial 3 and 4 chains for theta = 0.153
trial3_4_t0153_chains = [trial3_flat_chains[3], trial4_flat_chains[1]]
# Set theta_true
truths[0] = 0.153
# Make plot
chain_labels = [r'Trial 3: $P_0$ = A Priori', r'Trial 4: $P_0$ = Prior Space']
trial4_fig = pygtc.plotGTC(trial3_4_t0153_chains, paramNames=labels, chainLabels=chain_labels,
truths=truths, figureSize=8)
trial4_fig.suptitle(r'$\theta = 0.153$ Trials 3 and 4')
trial4_fig.show()
#%% Trial 5 Trends
# Load in the trial 4 chains
trial5_names = [chain_name for chain_name in chain_names if 'trial5' in chain_name]
trial5_samplers = [emcee.backends.HDFBackend(filename, name=chain_name) for chain_name in trial5_names]
# Get the flat chains
trial5_flat_chains = get_flat_chains(trial5_samplers)
# Set theta_true
truths[0] = 0.153
# Make plot
chain_labels = [r'$r_c = {rc}$'.format(rc=re.search(r'rc_input(\d.\d+)_', chain_name).group(1)) for chain_name in trial5_names]
trial5_fig = pygtc.plotGTC(trial5_flat_chains, paramNames=labels, chainLabels=chain_labels,
truths=truths, figureSize=8)
trial5_fig.suptitle(r'$\theta = 0.153$ True $r_c = 0.1$')
trial5_fig.show()
|
996,385 | b8415fda041c842d0f5cd0eed8095abec7c91d7f | #Lab_4 Task 3
#AMM-2 Bilan Halyna
from math import*
print('Вивчаємо механізм використання циклу while')
print('Обчислюємо квадратний корінь за ітераційною формулою Герона')
print("Задаємо число, чий корінь квадратний маємо відшукати ")
a=float(input('a='))
print("Задаємо початкове значення ")
x=float(input('x='))
eps=0.0001
s=0
while abs((x ** 2)- a ) > eps:
x = (1/2)*(x + a/x)
s+= 1
print('result= ', float(x),'n=', s)
print('Давайте перевіремо за звичайною формулою:')
k= sqrt(a)
print('result=', float(k))
|
996,386 | 7da6cbc32b24b023e51452e0cf131e888fec33cb | #coding: utf-8
import re
from collections import Counter
def readlinesFile(filename):
f = open(filename)
lines = f.readlines()
f.close()
return lines
def getSentenseMorphMapList(filename, get_morph_keys):
lines = readlinesFile(filename)
sentense_morph_map_list = list()
morph_map_list = list()
morph_keys = [
"surface",
"pos",
"pos1",
"pos2",
"pos3",
"ctype",
"cform",
"base",
"kana",
"yomi"
]
for line in lines:
line = line.strip("\n")
if line == "EOS":
if morph_map_list != []:
sentense_morph_map_list.append(morph_map_list)
morph_map_list = list()
else:
morph_values = re.split("\t|,", line)
morph_map = dict(zip(morph_keys, morph_values))
get_morph_map = dict((k, v) for k, v in morph_map.items() if k in get_morph_keys)
morph_map_list.append(get_morph_map)
return sentense_morph_map_list
if __name__ == "__main__":
filename = "neko.txt.mecab"
get_morph_keys = [
"base"
]
sentense_morph_map_list = getSentenseMorphMapList(filename, get_morph_keys)
base_list = [flatten["base"] for inner in sentense_morph_map_list for flatten in inner]
counter = Counter(base_list)
for word, cnt in counter.most_common():
print word, cnt
|
996,387 | 44e6975c078a4a4f4926a01912d5a521ba82f076 | """Samplers manage the client-side trace sampling
Any `sampled = False` trace won't be written, and can be ignored by the instrumentation.
"""
import abc
import json
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import Tuple
from typing import Union
import six
from .constants import AUTO_KEEP
from .constants import AUTO_REJECT
from .constants import ENV_KEY
from .constants import SAMPLING_AGENT_DECISION
from .constants import SAMPLING_LIMIT_DECISION
from .constants import SAMPLING_RULE_DECISION
from .constants import USER_KEEP
from .constants import USER_REJECT
from .internal.compat import iteritems
from .internal.compat import pattern_type
from .internal.constants import DEFAULT_SAMPLING_RATE_LIMIT
from .internal.constants import MAX_UINT_64BITS as _MAX_UINT_64BITS
from .internal.logger import get_logger
from .internal.rate_limiter import RateLimiter
from .internal.sampling import SamplingMechanism
from .internal.sampling import update_sampling_decision
from .internal.utils.cache import cachedmethod
from .settings import _config as ddconfig
try:
from json.decoder import JSONDecodeError
except ImportError:
# handling python 2.X import error
JSONDecodeError = ValueError # type: ignore
if TYPE_CHECKING: # pragma: no cover
from .span import Span
log = get_logger(__name__)
# All references to MAX_TRACE_ID were replaced with _MAX_UINT_64BITS.
# Now that ddtrace supports generating 128bit trace_ids,
# the max trace id should be 2**128 - 1 (not 2**64 -1)
# MAX_TRACE_ID is no longer used and should be removed.
MAX_TRACE_ID = _MAX_UINT_64BITS
# Has to be the same factor and key as the Agent to allow chained sampling
KNUTH_FACTOR = 1111111111111111111
class SamplingError(Exception):
pass
class BaseSampler(six.with_metaclass(abc.ABCMeta)):
__slots__ = ()
@abc.abstractmethod
def sample(self, span):
pass
class BasePrioritySampler(BaseSampler):
__slots__ = ()
@abc.abstractmethod
def update_rate_by_service_sample_rates(self, sample_rates):
pass
class AllSampler(BaseSampler):
"""Sampler sampling all the traces"""
def sample(self, span):
# type: (Span) -> bool
return True
class RateSampler(BaseSampler):
"""Sampler based on a rate
Keep (100 * `sample_rate`)% of the traces.
It samples randomly, its main purpose is to reduce the instrumentation footprint.
"""
def __init__(self, sample_rate=1.0):
# type: (float) -> None
if sample_rate < 0.0:
raise ValueError("sample_rate of {} is negative".format(sample_rate))
elif sample_rate > 1.0:
sample_rate = 1.0
self.set_sample_rate(sample_rate)
log.debug("initialized RateSampler, sample %s%% of traces", 100 * sample_rate)
def set_sample_rate(self, sample_rate):
# type: (float) -> None
self.sample_rate = float(sample_rate)
self.sampling_id_threshold = self.sample_rate * _MAX_UINT_64BITS
def sample(self, span):
# type: (Span) -> bool
return ((span._trace_id_64bits * KNUTH_FACTOR) % _MAX_UINT_64BITS) <= self.sampling_id_threshold
class RateByServiceSampler(BasePrioritySampler):
"""Sampler based on a rate, by service
Keep (100 * `sample_rate`)% of the traces.
The sample rate is kept independently for each service/env tuple.
"""
__slots__ = ("sample_rate", "_by_service_samplers", "_default_sampler")
_default_key = "service:,env:"
@staticmethod
def _key(
service=None, # type: Optional[str]
env=None, # type: Optional[str]
):
# type: (...) -> str
"""Compute a key with the same format used by the Datadog agent API."""
service = service or ""
env = env or ""
return "service:" + service + ",env:" + env
def __init__(self, sample_rate=1.0):
# type: (float) -> None
self.sample_rate = sample_rate
self._default_sampler = RateSampler(self.sample_rate)
self._by_service_samplers = {} # type: Dict[str, RateSampler]
def set_sample_rate(
self,
sample_rate, # type: float
service="", # type: str
env="", # type: str
):
# type: (...) -> None
self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate)
def _set_priority(self, span, priority):
# type: (Span, int) -> None
span.context.sampling_priority = priority
span.sampled = priority > 0 # Positive priorities mean it was kept
def _set_sampler_decision(self, span, sampler, sampled):
# type: (Span, RateSampler, bool) -> None
priority = AUTO_KEEP if sampled else AUTO_REJECT
self._set_priority(span, priority)
span.set_metric(SAMPLING_AGENT_DECISION, sampler.sample_rate)
sampling_mechanism = (
SamplingMechanism.DEFAULT if sampler == self._default_sampler else SamplingMechanism.AGENT_RATE
)
update_sampling_decision(span.context, sampling_mechanism, sampled)
def sample(self, span):
# type: (Span) -> bool
env = span.get_tag(ENV_KEY)
key = self._key(span.service, env)
sampler = self._by_service_samplers.get(key) or self._default_sampler
sampled = sampler.sample(span)
self._set_sampler_decision(span, sampler, sampled)
return sampled
def update_rate_by_service_sample_rates(self, rate_by_service):
# type: (Dict[str, float]) -> None
samplers = {}
for key, sample_rate in iteritems(rate_by_service):
samplers[key] = RateSampler(sample_rate)
self._by_service_samplers = samplers
class DatadogSampler(RateByServiceSampler):
"""
Default sampler used by Tracer for determining if a trace should be kept or dropped.
By default, this sampler will rely on dynamic sample rates provided by the trace agent
to determine which traces are kept or dropped.
You can also configure a static sample rate via ``default_sample_rate`` to use for sampling.
When a ``default_sample_rate`` is configured, that is the only sample rate used, the agent
provided rates are ignored.
You may also supply a list of ``SamplingRule`` to determine sample rates for specific
services or operation names.
Example rules::
DatadogSampler(rules=[
SamplingRule(sample_rate=1.0, service="my-svc"),
SamplingRule(sample_rate=0.0, service="less-important"),
])
Rules are evaluated in the order they are provided, and the first rule that matches is used.
If no rule matches, then the agent sample rates are used.
Lastly, this sampler can be configured with a rate limit. This will ensure the max number of
sampled traces per second does not exceed the supplied limit. The default is 100 traces kept
per second. This rate limiter is only used when ``default_sample_rate`` or ``rules`` are
provided. It is not used when the agent supplied sample rates are used.
"""
__slots__ = ("limiter", "rules")
NO_RATE_LIMIT = -1
# deprecate and remove the DEFAULT_RATE_LIMIT field from DatadogSampler
DEFAULT_RATE_LIMIT = DEFAULT_SAMPLING_RATE_LIMIT
def __init__(
self,
rules=None, # type: Optional[List[SamplingRule]]
default_sample_rate=None, # type: Optional[float]
rate_limit=None, # type: Optional[int]
):
# type: (...) -> None
"""
Constructor for DatadogSampler sampler
:param rules: List of :class:`SamplingRule` rules to apply to the root span of every trace, default no rules
:type rules: :obj:`list` of :class:`SamplingRule`
:param default_sample_rate: The default sample rate to apply if no rules matched (default: ``None`` /
Use :class:`RateByServiceSampler` only)
:type default_sample_rate: float 0 <= X <= 1.0
:param rate_limit: Global rate limit (traces per second) to apply to all traces regardless of the rules
applied to them, (default: ``100``)
:type rate_limit: :obj:`int`
"""
# Use default sample rate of 1.0
super(DatadogSampler, self).__init__()
if default_sample_rate is None:
sample_rate = ddconfig._trace_sample_rate
if sample_rate is not None:
default_sample_rate = float(sample_rate)
if rate_limit is None:
rate_limit = int(ddconfig._trace_rate_limit)
if rules is None:
env_sampling_rules = ddconfig._trace_sampling_rules
if env_sampling_rules:
rules = self._parse_rules_from_env_variable(env_sampling_rules)
else:
rules = []
self.rules = rules
else:
self.rules = []
# Validate that rules is a list of SampleRules
for rule in rules:
if not isinstance(rule, SamplingRule):
raise TypeError("Rule {!r} must be a sub-class of type ddtrace.sampler.SamplingRules".format(rule))
self.rules.append(rule)
# DEV: Default sampling rule must come last
if default_sample_rate is not None:
self.rules.append(SamplingRule(sample_rate=default_sample_rate))
# Configure rate limiter
self.limiter = RateLimiter(rate_limit)
log.debug("initialized %r", self)
def __str__(self):
rates = {key: sampler.sample_rate for key, sampler in self._by_service_samplers.items()}
return "{}(agent_rates={!r}, limiter={!r}, rules={!r})".format(
self.__class__.__name__, rates, self.limiter, self.rules
)
__repr__ = __str__
def _parse_rules_from_env_variable(self, rules):
# type: (str) -> List[SamplingRule]
sampling_rules = []
try:
json_rules = json.loads(rules)
except JSONDecodeError:
raise ValueError("Unable to parse DD_TRACE_SAMPLING_RULES={}".format(rules))
for rule in json_rules:
if "sample_rate" not in rule:
raise KeyError("No sample_rate provided for sampling rule: {}".format(json.dumps(rule)))
sample_rate = float(rule["sample_rate"])
service = rule.get("service", SamplingRule.NO_RULE)
name = rule.get("name", SamplingRule.NO_RULE)
try:
sampling_rule = SamplingRule(sample_rate=sample_rate, service=service, name=name)
except ValueError as e:
raise ValueError("Error creating sampling rule {}: {}".format(json.dumps(rule), e))
sampling_rules.append(sampling_rule)
return sampling_rules
def _set_priority(self, span, priority):
# type: (Span, int) -> None
span.context.sampling_priority = priority
span.sampled = priority > 0 # Positive priorities mean it was kept
def _set_sampler_decision(self, span, sampler, sampled):
# type: (Span, Union[RateSampler, SamplingRule, RateLimiter], bool) -> None
if isinstance(sampler, RateSampler):
# When agent based sampling is used
return super(DatadogSampler, self)._set_sampler_decision(span, sampler, sampled)
if isinstance(sampler, SamplingRule):
span.set_metric(SAMPLING_RULE_DECISION, sampler.sample_rate)
elif isinstance(sampler, RateLimiter) and not sampled:
# We only need to set the rate limit metric if the limiter is rejecting the span
# DEV: Setting this allows us to properly compute metrics and debug the
# various sample rates that are getting applied to this span
span.set_metric(SAMPLING_LIMIT_DECISION, sampler.effective_rate)
if not sampled:
self._set_priority(span, USER_REJECT)
else:
self._set_priority(span, USER_KEEP)
update_sampling_decision(span.context, SamplingMechanism.TRACE_SAMPLING_RULE, sampled)
def sample(self, span):
# type: (Span) -> bool
"""
Decide whether the provided span should be sampled or not
The span provided should be the root span in the trace.
:param span: The root span of a trace
:type span: :class:`ddtrace.span.Span`
:returns: Whether the span was sampled or not
:rtype: :obj:`bool`
"""
# Go through all rules and grab the first one that matched
# DEV: This means rules should be ordered by the user from most specific to least specific
for rule in self.rules:
if rule.matches(span):
sampler = rule
break
else:
# No rules match so use agent based sampling
return super(DatadogSampler, self).sample(span)
sampled = sampler.sample(span)
self._set_sampler_decision(span, sampler, sampled)
if sampled:
# Ensure all allowed traces adhere to the global rate limit
allowed = self.limiter.is_allowed(span.start_ns)
if not allowed:
self._set_sampler_decision(span, self.limiter, allowed)
return False
return sampled
class SamplingRule(BaseSampler):
"""
Definition of a sampling rule used by :class:`DatadogSampler` for applying a sample rate on a span
"""
NO_RULE = object()
def __init__(
self,
sample_rate, # type: float
service=NO_RULE, # type: Any
name=NO_RULE, # type: Any
):
# type: (...) -> None
"""
Configure a new :class:`SamplingRule`
.. code:: python
DatadogSampler([
# Sample 100% of any trace
SamplingRule(sample_rate=1.0),
# Sample no healthcheck traces
SamplingRule(sample_rate=0, name='flask.request'),
# Sample all services ending in `-db` based on a regular expression
SamplingRule(sample_rate=0.5, service=re.compile('-db$')),
# Sample based on service name using custom function
SamplingRule(sample_rate=0.75, service=lambda service: 'my-app' in service),
])
:param sample_rate: The sample rate to apply to any matching spans
:type sample_rate: :obj:`float` greater than or equal to 0.0 and less than or equal to 1.0
:param service: Rule to match the `span.service` on, default no rule defined
:type service: :obj:`object` to directly compare, :obj:`function` to evaluate, or :class:`re.Pattern` to match
:param name: Rule to match the `span.name` on, default no rule defined
:type name: :obj:`object` to directly compare, :obj:`function` to evaluate, or :class:`re.Pattern` to match
"""
# Enforce sample rate constraints
if not 0.0 <= sample_rate <= 1.0:
raise ValueError(
(
"SamplingRule(sample_rate={}) must be greater than or equal to 0.0 and less than or equal to 1.0"
).format(sample_rate)
)
self.sample_rate = sample_rate
self.service = service
self.name = name
@property
def sample_rate(self):
# type: () -> float
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate):
# type: (float) -> None
self._sample_rate = sample_rate
self._sampling_id_threshold = sample_rate * _MAX_UINT_64BITS
def _pattern_matches(self, prop, pattern):
# If the rule is not set, then assume it matches
# DEV: Having no rule and being `None` are different things
# e.g. ignoring `span.service` vs `span.service == None`
if pattern is self.NO_RULE:
return True
# If the pattern is callable (e.g. a function) then call it passing the prop
# The expected return value is a boolean so cast the response in case it isn't
if callable(pattern):
try:
return bool(pattern(prop))
except Exception:
log.warning("%r pattern %r failed with %r", self, pattern, prop, exc_info=True)
# Their function failed to validate, assume it is a False
return False
# The pattern is a regular expression and the prop is a string
if isinstance(pattern, pattern_type):
try:
return bool(pattern.match(str(prop)))
except (ValueError, TypeError):
# This is to guard us against the casting to a string (shouldn't happen, but still)
log.warning("%r pattern %r failed with %r", self, pattern, prop, exc_info=True)
return False
# Exact match on the values
return prop == pattern
@cachedmethod()
def _matches(self, key):
# type: (Tuple[Optional[str], str]) -> bool
service, name = key
for prop, pattern in [(service, self.service), (name, self.name)]:
if not self._pattern_matches(prop, pattern):
return False
else:
return True
def matches(self, span):
# type: (Span) -> bool
"""
Return if this span matches this rule
:param span: The span to match against
:type span: :class:`ddtrace.span.Span`
:returns: Whether this span matches or not
:rtype: :obj:`bool`
"""
# Our LFU cache expects a single key, convert the
# provided Span into a hashable tuple for the cache
return self._matches((span.service, span.name))
def sample(self, span):
# type: (Span) -> bool
"""
Return if this rule chooses to sample the span
:param span: The span to sample against
:type span: :class:`ddtrace.span.Span`
:returns: Whether this span was sampled
:rtype: :obj:`bool`
"""
if self.sample_rate == 1:
return True
elif self.sample_rate == 0:
return False
return ((span._trace_id_64bits * KNUTH_FACTOR) % _MAX_UINT_64BITS) <= self._sampling_id_threshold
def _no_rule_or_self(self, val):
return "NO_RULE" if val is self.NO_RULE else val
def __repr__(self):
return "{}(sample_rate={!r}, service={!r}, name={!r})".format(
self.__class__.__name__,
self.sample_rate,
self._no_rule_or_self(self.service),
self._no_rule_or_self(self.name),
)
__str__ = __repr__
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, SamplingRule):
raise TypeError("Cannot compare SamplingRule to {}".format(type(other)))
return self.sample_rate == other.sample_rate and self.service == other.service and self.name == other.name
|
996,388 | 24ea9e87ab26e28c0fdf89bc1702a27f941a3c42 | from rest_framework import serializers
from . import models
class Category(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Category
fields = ('pk', 'name', 'parent', 'children', 'hidden', 'foo', 'adverts')
class Advert(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Advert
fields = ('pk', 'name', 'pictures', 'category')
class AdvertPicture(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.AdvertPicture
# fields = ('pk', 'file', 'advert')
fields = ('pk', 'advert')
|
996,389 | 028fcb53228d89d5efe7d19689a831f5739dc267 | # Script to build mysql table
import config
import MySQLdb
from sql_table import mysql_table
'''
Create_table.py looks for MySQL Config in config.py
Creates a connection to the database using the supplied config
Creates a TABLE named WEB_URL with the specified rows.
Needs to RUN once when setting up the application on local or
web server.
You need to have a database already defined ( SHORTY for e.g is
already present .).
'''
host = config.host
user = config.user
passwrd = config.passwrd
db = config.db
create_table = mysql_table
conn = MySQLdb.connect(host , user , passwrd, db)
cursor = conn.cursor()
cursor.execute(create_table)
conn.close() |
996,390 | 01485caf0f2e14904f14d937e3111dd4f89179a7 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.conf import settings
from django.views import View
from django.http import JsonResponse
from general.models import ExecuteModel
from .models import VisitCountModel
import os
import shutil
import logging
from datetime import date
today = date.today()
logging.basicConfig(level=logging.INFO,format='[%(levelname)s] %(asctime)s : %(message)s',datefmt='%Y-%m-%d %H:%M:%S',filename= str(today) +'_log.txt')
@login_required
def index(request):
count_model = VisitCountModel.objects.filter(id=1)
if count_model:
count_model = count_model[0]
count_model.count += 1
else:
count_model = VisitCountModel()
count_model.count = 1
count_model.save()
request_dict = {}
request_dict['count'] = count_model.count
return render(request, 'home.html', request_dict)
class MaintainView(View):
def get(self, request, *arg, **kwargs):
for objects in ExecuteModel.objects.all():
logging.info('maintain delect: ' + objects)
objects.delete()
return render(request, 'maintain/maintain_page.html')
class InitializeView(View):
@method_decorator(login_required)
def get(self, request, *arg, **kwargs):
username = request.user.get_username()
if not username:
raise Exception('username empty')
finish = False
for path in [settings.UPLOAD_ROOT, settings.OUTPUT_ROOT]:
for method in os.listdir(path):
temp_path = path+method+'/'+username+'/'
if os.path.exists(temp_path):
for directory_path in os.listdir(temp_path):
shutil.rmtree(temp_path+directory_path)
path = settings.DPVIEW_TEMP_ROOT+username+'/'
if os.path.exists(path):
for directory_path in os.listdir(path):
shutil.rmtree(path+directory_path)
if ExecuteModel.objects.filter(user_name=username).exists():
file = ExecuteModel.objects.get(user_name=username)
logging.info(username + 'initialize ExecuteModel')
file.delete()
finish = True
return JsonResponse(finish, safe=False) |
996,391 | e442193b8cba96bcef010e8aaa9d6fa212b149bd | import random
def magic_slow(array):
'''
Get magic index (slow)
Args: list<int> array: target sorted array
Returns: int: magic index, -1 if not exists
'''
for i in range(len(array)):
if array[i] == i:
return i
return -1
def magic_fast(array, start, end):
if end < start:
return -1
mid = (start + end)//2
if array[mid] == mid:
return mid
elif array[mid] > mid:
return magic_fast(array, start, mid-1)
else:
return magic_fast(array, mid+1, end)
if __name__ == '__main__':
array = [-14, -12, 0, 1, 2, 5, 9, 10, 23, 25, 30]
print(f'{magic_slow(array)} is the magic index in {array} (slow ver.)')
print(f'{magic_fast(array, 0, len(array)-1)} is the magic index in {array} (fast ver.)')
|
996,392 | 68efdfccc722109a932768b70a06837dad56d4a6 | #
# PySNMP MIB module NETFINITYMANAGER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETFINITYMANAGER-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:18:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
dmiMibs, = mibBuilder.importSymbols("NETFINITYSERVICES-MIB", "dmiMibs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Integer32, Unsigned32, TimeTicks, Bits, MibIdentifier, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, ModuleIdentity, ObjectIdentity, IpAddress, Counter32, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Integer32", "Unsigned32", "TimeTicks", "Bits", "MibIdentifier", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "ModuleIdentity", "ObjectIdentity", "IpAddress", "Counter32", "enterprises")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DmiInteger(Integer32):
pass
class DmiOctetstring(OctetString):
pass
class DmiDisplaystring(DisplayString):
pass
class DmiDate(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(28, 28)
fixedLength = 28
class DmiComponentIndex(Integer32):
pass
netFinityManagerMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3))
dmtfGroups2 = MibIdentifier((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1))
tComponentid2 = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1), )
if mibBuilder.loadTexts: tComponentid2.setStatus('mandatory')
if mibBuilder.loadTexts: tComponentid2.setDescription('This group defines attributes common to all components.This group is required.')
eComponentid2 = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"))
if mibBuilder.loadTexts: eComponentid2.setStatus('mandatory')
if mibBuilder.loadTexts: eComponentid2.setDescription('')
a1Manufacturer = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 1), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Manufacturer.setStatus('mandatory')
if mibBuilder.loadTexts: a1Manufacturer.setDescription('The name of the manufacturer that produces this component.')
a1Product = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Product.setStatus('mandatory')
if mibBuilder.loadTexts: a1Product.setDescription('The name of the component.')
a1Version = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 3), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Version.setStatus('mandatory')
if mibBuilder.loadTexts: a1Version.setDescription('The version for the component.')
a1SerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1SerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: a1SerialNumber.setDescription('The serial number for this instance of this component.')
tRemoteSystems = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11), )
if mibBuilder.loadTexts: tRemoteSystems.setStatus('mandatory')
if mibBuilder.loadTexts: tRemoteSystems.setDescription('Table of remote systems defined for managing under the Remote System Manager service.')
eRemoteSystems = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a11SystemTag"))
if mibBuilder.loadTexts: eRemoteSystems.setStatus('mandatory')
if mibBuilder.loadTexts: eRemoteSystems.setDescription('')
a11SystemTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemTag.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemTag.setDescription('Unique ID number of this remote system in Remote System Manager.')
a11SystemName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemName.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemName.setDescription('Displayable name of the given system.')
a11ProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 3), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11ProtocolName.setStatus('mandatory')
if mibBuilder.loadTexts: a11ProtocolName.setDescription('This is the type of network protocol used for communication with the given system.')
a11NetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11NetworkAddress.setStatus('mandatory')
if mibBuilder.loadTexts: a11NetworkAddress.setDescription('This is the network address of the given system.')
a11SystemState = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("vOff-line", 0), ("vOn-line", 1), ("vOff-lineWithErrorCondition", 2), ("vOn-lineWithErrorCondition", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemState.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemState.setDescription('This reflects the on-line/off-line state of the given system.')
a11Server = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vFalse", 0), ("vTrue", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11Server.setStatus('mandatory')
if mibBuilder.loadTexts: a11Server.setDescription('This indicates if the system is some sort of server system.')
a11Manager = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vFalse", 0), ("vTrue", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11Manager.setStatus('mandatory')
if mibBuilder.loadTexts: a11Manager.setDescription('This indicates if the system is a NetFinity Manager system.')
a11OperatingSystemType = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11))).clone(namedValues=NamedValues(("vUnknown", 0), ("vIbmOs2", 1), ("vMicrosoftWindows", 2), ("vNovellNetware", 3), ("vMicrosoftWindowsNt", 4), ("vIbmAix", 5), ("vBanyanVines", 6), ("vIbmPc-dos", 7), ("vScoXenix", 8), ("vUnixSystemV", 9), ("vMicrosoftWindows95", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OperatingSystemType.setStatus('mandatory')
if mibBuilder.loadTexts: a11OperatingSystemType.setDescription('The primary operating system running on the given system.')
a11OsMajorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 9), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OsMajorVersion.setStatus('mandatory')
if mibBuilder.loadTexts: a11OsMajorVersion.setDescription('The major version of primary operating system running on the given system.')
a11OsMinorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 10), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OsMinorVersion.setStatus('mandatory')
if mibBuilder.loadTexts: a11OsMinorVersion.setDescription('The minor version of primary operating system running on the given system.')
a11SystemModelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 11), DmiOctetstring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemModelId.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemModelId.setDescription('These are the model identifier values for the given system. Systems which are the same non-zero model identifier are the same type of system.')
a11SystemModelName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 12), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemModelName.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemModelName.setDescription('This is the model name for the given system, if known.')
a11SystemOn_lineNotify = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a11SystemOn-lineNotify").setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemOn_lineNotify.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemOn_lineNotify.setDescription('Severity of alert generated when system becomes on-line, if enabled.')
a11SystemOff_lineNotify = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a11SystemOff-lineNotify").setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemOff_lineNotify.setStatus('mandatory')
if mibBuilder.loadTexts: a11SystemOff_lineNotify.setDescription('Severity of alert generated when system becomes off-line, if enabled.')
a11PresenceCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 15), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11PresenceCheckInterval.setStatus('mandatory')
if mibBuilder.loadTexts: a11PresenceCheckInterval.setDescription('This is the time between attempts by Remote System Manager to verify the presence of the remote system, in seconds.')
a11MacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 16), DmiOctetstring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11MacAddress.setStatus('mandatory')
if mibBuilder.loadTexts: a11MacAddress.setDescription('Media Access Control (MAC) address of remote system, if known.')
tRemoteSystemGroups = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12), )
if mibBuilder.loadTexts: tRemoteSystemGroups.setStatus('mandatory')
if mibBuilder.loadTexts: tRemoteSystemGroups.setDescription('Table of remote system groups defined for managing under the Remote System Manager service.')
eRemoteSystemGroups = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a12GroupTag"))
if mibBuilder.loadTexts: eRemoteSystemGroups.setStatus('mandatory')
if mibBuilder.loadTexts: eRemoteSystemGroups.setDescription('')
a12GroupTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12GroupTag.setStatus('mandatory')
if mibBuilder.loadTexts: a12GroupTag.setDescription('Unique ID number of this system group in Remote System Manager.')
a12GroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12GroupName.setStatus('mandatory')
if mibBuilder.loadTexts: a12GroupName.setDescription('Displayable name of the given group.')
a12RequiredKeywordsCombination = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("vAllKeywordsMustMatch", 0), ("vAnyOfTheKeywordsMayMatch", 1), ("vExactlyOneOfTheKeywordsMustMatch", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12RequiredKeywordsCombination.setStatus('mandatory')
if mibBuilder.loadTexts: a12RequiredKeywordsCombination.setDescription('This reflects whether all, any, or one of the keywords for the group must be matched during a discovery request in order for a remote system to be discovered.')
a12Keywords = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12Keywords.setStatus('mandatory')
if mibBuilder.loadTexts: a12Keywords.setDescription('List of keywords which are used for finding remote systems during a discovery request.')
a12SystemOn_lineNotifyDefault = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a12SystemOn-lineNotifyDefault").setMaxAccess("readonly")
if mibBuilder.loadTexts: a12SystemOn_lineNotifyDefault.setStatus('mandatory')
if mibBuilder.loadTexts: a12SystemOn_lineNotifyDefault.setDescription('On-line notification default for systems in this group, if enabled.')
a12SystemOff_lineNotifyDefault = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a12SystemOff-lineNotifyDefault").setMaxAccess("readonly")
if mibBuilder.loadTexts: a12SystemOff_lineNotifyDefault.setStatus('mandatory')
if mibBuilder.loadTexts: a12SystemOff_lineNotifyDefault.setDescription('Off-line notification default for systems in this group, if enabled.')
a12DefaultPresenceCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 7), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12DefaultPresenceCheckInterval.setStatus('mandatory')
if mibBuilder.loadTexts: a12DefaultPresenceCheckInterval.setDescription('This is the default time between attempts by Remote System Manager to verify the presence of the system in this group, in seconds. 0 = No group-specific default.')
a12DiscoveryStartFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 8), DmiInteger()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a12DiscoveryStartFlag.setStatus('mandatory')
if mibBuilder.loadTexts: a12DiscoveryStartFlag.setDescription('The value of this flag is not significant. To start a discovery request on the given System Group, write any value to this field.')
tRemoteSystemGroupMap = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13), )
if mibBuilder.loadTexts: tRemoteSystemGroupMap.setStatus('mandatory')
if mibBuilder.loadTexts: tRemoteSystemGroupMap.setDescription('Table mapping the membership of remote system in groups in the Remote System Manager service.')
eRemoteSystemGroupMap = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a13SystemTag"), (0, "NETFINITYMANAGER-MIB", "a13GroupTag"))
if mibBuilder.loadTexts: eRemoteSystemGroupMap.setStatus('mandatory')
if mibBuilder.loadTexts: eRemoteSystemGroupMap.setDescription('')
a13SystemTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a13SystemTag.setStatus('mandatory')
if mibBuilder.loadTexts: a13SystemTag.setDescription('Unique ID number of remote system.')
a13GroupTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1, 2), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a13GroupTag.setStatus('mandatory')
if mibBuilder.loadTexts: a13GroupTag.setDescription('Unique ID number of group containing the remote system.')
mibBuilder.exportSymbols("NETFINITYMANAGER-MIB", a11Manager=a11Manager, eComponentid2=eComponentid2, dmtfGroups2=dmtfGroups2, eRemoteSystemGroups=eRemoteSystemGroups, DmiInteger=DmiInteger, a11SystemName=a11SystemName, a11MacAddress=a11MacAddress, a12GroupTag=a12GroupTag, a11OsMajorVersion=a11OsMajorVersion, a1SerialNumber=a1SerialNumber, a1Manufacturer=a1Manufacturer, eRemoteSystemGroupMap=eRemoteSystemGroupMap, DmiDate=DmiDate, a11Server=a11Server, tRemoteSystemGroups=tRemoteSystemGroups, a11ProtocolName=a11ProtocolName, tRemoteSystemGroupMap=tRemoteSystemGroupMap, a11SystemModelName=a11SystemModelName, a1Product=a1Product, a11PresenceCheckInterval=a11PresenceCheckInterval, a11NetworkAddress=a11NetworkAddress, DmiDisplaystring=DmiDisplaystring, a12GroupName=a12GroupName, a11SystemTag=a11SystemTag, eRemoteSystems=eRemoteSystems, a11OperatingSystemType=a11OperatingSystemType, a11SystemState=a11SystemState, DmiComponentIndex=DmiComponentIndex, a13SystemTag=a13SystemTag, a13GroupTag=a13GroupTag, a1Version=a1Version, a12RequiredKeywordsCombination=a12RequiredKeywordsCombination, tRemoteSystems=tRemoteSystems, a12DiscoveryStartFlag=a12DiscoveryStartFlag, netFinityManagerMIB=netFinityManagerMIB, a11SystemOn_lineNotify=a11SystemOn_lineNotify, DmiOctetstring=DmiOctetstring, a12SystemOn_lineNotifyDefault=a12SystemOn_lineNotifyDefault, a12DefaultPresenceCheckInterval=a12DefaultPresenceCheckInterval, tComponentid2=tComponentid2, a11SystemModelId=a11SystemModelId, a12SystemOff_lineNotifyDefault=a12SystemOff_lineNotifyDefault, a12Keywords=a12Keywords, a11OsMinorVersion=a11OsMinorVersion, a11SystemOff_lineNotify=a11SystemOff_lineNotify)
|
996,393 | 96bce50c991ea80034c34254af4790fa4e8dfcb7 | # -*- encoding: utf-8 -*-
from datetime import date
from datetime import datetime
from datetime import time
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.conf import settings
from django.db.models import Q
from math import floor
from log.models import TraceableModel
from illness.models import Illness
from cal.models import Appointment
from consulting.models import Task, Conclusion, Medicine
from private_messages.models import Message
from survey.models import Block
class Profile(TraceableModel):
SEX = (
(1, _(u'Mujer')),
(2, _(u'Hombre')),
)
STATUS = (
(settings.MARRIED, _(u'Casado/a')),
(settings.STABLE_PARTNER, _(u'Pareja Estable')),
(settings.DIVORCED, _(u'Divorciado/a')),
(settings.WIDOW_ER, _(u'Viudo/a')),
(settings.SINGLE, _(u'Soltero/a')),
(settings.OTHER, _(u'Otro')),
)
ROLE = (
(settings.DOCTOR, _(u'Médico')),
(settings.ADMINISTRATIVE, _(u'Administrativo')),
(settings.PATIENT, _(u'Paciente')),
)
EDUCATION = (
(1, _(u'Analfabeto por problemas físicos o psíquicos')),
(2, _(u'Analfabeto por otras razones')),
(3, _(u'Sin estudios')),
(4, _(u'Estudios primarios o equivalentes')),
(5, _(u'Enseñanza general secundaria, 1er ciclo')),
(6, _(u'Enseñanza Profesional de 2º grado, 2º ciclo')),
(7, _(u'Enseñanza general secundaria, 2º ciclo')),
(8, _(u'Enseñanzas profesionales superiores')),
(9, _(u'Estudios universitarios o equivalentes'))
)
#username is the nick with you login in app
user = models.ForeignKey(User, unique=True, related_name='profiles',
help_text='Usuario asociado del sistema')
doctor = models.ForeignKey(User, blank=True, null=True,
related_name='doctor',
limit_choices_to={
'profiles__role': settings.DOCTOR
})
#patients = models.ManyToManyField(User, related_name='patients_profiles',
# blank=True, null=True)
medical_number = models.CharField(_(u'Historia médica'),
max_length=9,
unique=True,
null=True,
blank=True)
illnesses = models.ManyToManyField(
Illness,
related_name='illnesses_profiles',
blank=True,
null=True,
limit_choices_to={'cie_code__isnull': True, 'parent__isnull': False}
)
name = models.CharField(_(u'Nombre'), max_length=150, blank=True)
first_surname = models.CharField(_(u'Primer Apellido'), max_length=150,
blank=True)
second_surname = models.CharField(_(u'Segundo Apellido'), max_length=150,
blank=True, default='')
nif = models.CharField(
_(u'DNI/NIF'),
max_length=9,
null=True,
unique=True,
help_text=_(u"Requerido para pacientes mayores de 14 años")
)
def unique_error_message(self, model_class, unique_check):
if unique_check == ("nif",):
return _(u'Ya existe un Paciente con este DNI/NIF')
else:
return super(Profile, self).unique_error_message(model_class,
unique_check)
sex = models.IntegerField(_(u'Sexo'), choices=SEX,
blank=True, null=True)
address = models.CharField(_(u'Dirección'), max_length=150, blank=True)
town = models.CharField(_(u'Municipio'), max_length=150, blank=True)
postcode = models.IntegerField(_(u'Código Postal'), blank=True, null=True)
dob = models.DateField(_(u'Fecha de Nacimiento'), blank=True, null=True)
status = models.IntegerField(_(u'Estado Civil'), choices=STATUS,
default=STATUS[0][0], blank=True, null=True)
phone1 = models.CharField(_(u'Teléfono 1'), max_length=9, blank=True)
phone2 = models.CharField(_(u'Teléfono 2'), max_length=9, blank=True)
emergency_phone = models.CharField(
_(u'En caso de emergencia avisar a'),
max_length=500,
blank=True
)
email = models.EmailField(_(u'Correo Electrónico'), max_length=150,
null=True, unique=True, blank=True)
education = models.IntegerField(_(u'Nivel de estudios'), choices=EDUCATION,
blank=True, null=True)
profession = models.CharField(_(u'Profesión'), max_length=150, blank=True)
source = models.CharField(_(u'Fuente de derivación'), max_length=255,
blank=True)
role = models.IntegerField(_(u'Rol'), choices=ROLE, blank=True, null=True)
updated_password_at = models.DateTimeField(
_(u'Última vez que actualizó la contraseña'),
auto_now_add=True
)
def save(self, *args, **kw):
if self.email == '':
self.email = None
if self.nif == '':
self.nif = None
if not self.sex:
self.sex = None
if self.medical_number == '':
self.medical_number = None
super(Profile, self).save(*args, **kw)
if not self.user.is_active:
for app in Appointment.objects.filter(
Q(patient=self.user),
Q(date__gt=date.today()) |
Q(date=date.today(),
start_time__gte=datetime.time(datetime.now()))
).exclude(
status__in=[settings.CANCELED_BY_PATIENT,
settings.CANCELED_BY_DOCTOR]).order_by('date'):
app.status = settings.CANCELED_BY_DOCTOR
app.save()
if not self.medical_number and self.role == settings.PATIENT:
self.medical_number = "%s%05d" % (date.today().year, self.pk)
super(Profile, self).save(*args, **kw)
def get_full_name(self, title=False):
if title:
pre = ''
if self.role == settings.DOCTOR:
if self.sex == settings.WOMAN:
pre = u'Dra.'
elif self.sex == settings.MAN:
pre = u'Dr.'
else:
if self.sex == settings.MAN:
pre = u'D.'
elif self.sex == settings.WOMAN:
pre = u'D.ª'
return u"%s %s %s %s" % (pre, self.name,
self.first_surname,
self.second_surname)
return u"%s %s %s" % (self.name,
self.first_surname,
self.second_surname)
def is_doctor(self):
return self.role == settings.DOCTOR
def is_administrative(self):
return self.role == settings.ADMINISTRATIVE
def is_patient(self):
return self.role == settings.PATIENT
def __unicode__(self):
return u'id: %s profile: %s %s %s' \
% (self.id, self.name, self.first_surname, self.second_surname)
def age_at(self, at_date):
yo = ''
if not self.dob is None:
try:
delta = datetime.combine(
at_date, time()) - datetime.combine(self.dob, time())
yo = int(floor(delta.days / 365.25))
except:
yo = self.get_age()
return yo
def get_age(self):
return self.age_at(date.today())
def get_sex(self):
if self.sex:
return self.SEX[self.sex - 1][1]
return ''
def get_education(self):
if self.education:
return self.EDUCATION[self.education - 1][1]
return ''
def get_status(self):
if self.status == settings.MARRIED:
if self.sex == settings.WOMAN:
status = _(u'Casada')
else:
status = _(u'Casado')
elif self.status == settings.STABLE_PARTNER:
status = _(u'Pareja Estable')
elif self.status == settings.DIVORCED:
if self.sex == settings.WOMAN:
status = _(u'Divorciada')
else:
status = _(u'Divorciado')
elif self.status == settings.WIDOW_ER:
if self.sex == settings.WOMAN:
status = _(u'Viuda')
else:
status = _(u'Viudo')
elif self.status == settings.SINGLE:
if self.sex == settings.WOMAN:
status = _(u'Soltera')
else:
status = _(u'Soltero')
else:
status = _(u'Otro')
return status
def get_lastAppointment(self):
appointments = Appointment.objects.filter(
patient=self.user,
date__lt=date.today()).order_by('-date')
if appointments.count() > 0:
lastAppointment = appointments[0]
else:
lastAppointment = ''
return lastAppointment
def get_next_real_appointment(self):
appointments = Appointment.objects.filter(
Q(patient=self.user, notify=True, status=settings.CONFIRMED),
Q(date__gt=date.today()) |
Q(date=date.today(),
start_time__gte=datetime.time(datetime.now()))
).order_by('date')
for app in appointments:
if not app.has_activity():
nextAppointment = app
break
else:
nextAppointment = ''
return nextAppointment
def get_nextAppointment(self):
appointments = Appointment.objects.filter(
Q(patient=self.user),
Q(date__gt=date.today()) |
Q(date=date.today(),
start_time__gte=datetime.time(datetime.now()))
).exclude(status__in=[settings.CANCELED_BY_PATIENT,
settings.CANCELED_BY_DOCTOR]).order_by('date')
for app in appointments:
if not app.has_activity():
nextAppointment = app
break
else:
nextAppointment = ''
return nextAppointment
def get_conclusions(self):
return Conclusion.objects.filter(
appointment__patient=self.user).latest('date')
def get_treatment(self, at_date=None):
if at_date:
return Medicine.objects.filter(Q(patient=self.user,
created_at__lte=at_date,
is_previous=False),
Q(date__isnull=True) |
Q(date__gte=at_date)).order_by('id')
else:
return Medicine.objects.filter(
patient=self.user,
date__isnull=True,
is_previous=False).order_by('component')
def get_pending_tasks(self):
next_app = self.get_nextAppointment()
tasks = []
if next_app and datetime.combine(next_app.date, next_app.start_time) >= datetime.now():
ddays = (next_app.date - date.today()).days
tasks = Task.objects.filter(
patient=self.user,
self_administered=True,
completed=False,
assess=True,
previous_days__gte=ddays,
previous_days__gt=0).order_by('-creation_date')
return tasks
def get_assigned_tasks(self):
tasks = Task.objects.filter(
patient=self.user,
self_administered=True,
completed=False,
assess=True,
previous_days__gt=0).order_by('-creation_date')
return tasks
def get_anxiety_status(self, at_date=None, index=False, html=False):
filter_option = Q(patient=self.user,
survey__code__in=(
settings.INITIAL_ASSESSMENT,
settings.ANXIETY_DEPRESSION_SURVEY
),
completed=True,
assess=False)
if at_date:
filter_option = filter_option & Q(end_date__lte=at_date)
try:
for task in Task.objects.filter(filter_option).order_by('-end_date'):
status = task.get_anxiety_status(index)
if status != '':
break
if html:
if status[1] != 'success':
return '<span style="min-width:100px" class="label \
label-%s" >%s</span>' % (status[1], status[0])
return ''
else:
return status
except:
return ''
def get_depression_status(self, at_date=None, index=False, html=False):
filter_option = Q(patient=self.user,
survey__code__in=(
settings.INITIAL_ASSESSMENT,
settings.ANXIETY_DEPRESSION_SURVEY
),
completed=True,
assess=False)
if at_date:
filter_option = filter_option & Q(end_date__lte=at_date)
try:
for task in Task.objects.filter(filter_option).order_by('-end_date'):
status = task.get_depression_status(index)
if status != '':
break
if html:
if status[1] != 'success':
return '<span style="min-width:100px" class="label \
label-%s" >%s</span>' % (status[1], status[0])
return ''
else:
return status
except:
return ''
def get_unhope_status(self, at_date=None, index=False, html=False):
filter_option = Q(patient=self.user,
survey__code=settings.UNHOPE_SURVEY,
completed=True,
assess=False)
if at_date:
filter_option = filter_option & Q(end_date__lte=at_date)
try:
for task in Task.objects.filter(filter_option).order_by('-end_date'):
status = task.get_unhope_status(index)
if status != '':
break
if html:
if status[1] != 'success':
return '<span style="min-width:100px" class="label \
label-%s" >%s</span>' % (status[1], status[0])
return ''
else:
return status
except:
return ''
def get_ybocs_status(self, at_date=None, index=False, html=False):
filter_option = Q(patient=self.user,
survey__code=settings.YBOCS_SURVEY,
completed=True,
assess=False)
if at_date:
filter_option = filter_option & Q(end_date__lte=at_date)
try:
for task in Task.objects.filter(filter_option).order_by('-end_date'):
status = task.get_ybocs_status(index)
if status != '':
break
if html:
if status[1] != 'success':
return '<span style="min-width:100px" class="label \
label-%s" >%s</span>' % (status[1], status[0])
return ''
else:
return status
except:
return ''
def get_suicide_status(self, at_date=None, index=False, html=False):
filter_option = Q(patient=self.user,
survey__code=settings.UNHOPE_SURVEY,
completed=True,
assess=False)
if at_date:
filter_option = filter_option & Q(end_date__lte=at_date)
try:
for task in Task.objects.filter(filter_option).order_by('-end_date'):
status = task.get_suicide_status(index)
if status != '':
break
if html:
if status[1] != 'success':
return '<span style="min-width:100px" class="label \
label-%s" >%s</span>' % (status[1], status[0])
return ''
else:
return status
except:
return ''
def get_medical_status(self, at_date=None, index=False, html=False):
statuses = [self.get_anxiety_status(at_date, index, html),
self.get_depression_status(at_date, index, html),
self.get_unhope_status(at_date, index, html),
self.get_suicide_status(at_date, index, html),
self.get_ybocs_status(at_date, index, html)]
return filter(None, statuses)
def get_unread_messages(self):
return Message.objects.get_pending_for_user(self.user)
def get_mobile_phone(self):
if self.phone1 and int(self.phone1) / int(10e7) in (6, 7):
return self.phone1
elif self.phone2 and int(self.phone2) / int(10e7) in (6, 7):
return self.phone2
else:
return None
def get_illness_set(self):
illnesses = set()
for i in self.illnesses.all().order_by('code'):
parent = i
while parent:
illnesses.add(parent)
parent = parent.parent
return illnesses
def is_banned(self):
return self.user.banned_user.filter(Q(end_time__isnull=True) |
Q(end_time__gte=datetime.now()))
def get_scored_blocks(self, statistic=False):
if self.is_doctor():
if statistic:
return Block.objects.filter(
is_scored=True).values('code', 'name').distinct()
else:
return Block.objects.filter(
locks_tasks__patient__profiles__doctor=self.user,
is_scored=True).values('code', 'name').distinct()
else:
return Block.objects.filter(
blocks_tasks__patient=self.user,
is_scored=True).values('code', 'name').distinct()
class Meta:
verbose_name = "Perfil"
verbose_name_plural = "Perfiles"
ordering = ['first_surname', 'second_surname', 'name', 'id']
|
996,394 | dfda624c4e1f2fa5b38d06e635cc9947103d097e | from fst import EPSILON
simulation_number = 1
configurations_dict = \
{
"MUTATE_RULE_SET": 1,
"MUTATE_HMM": 1,
"EVOLVE_RULES": True,
"EVOLVE_HMM": True,
"COMBINE_EMISSIONS": 0,
"MERGE_EMISSIONS": 0,
"ADVANCE_EMISSION": 1,
"CLONE_STATE": 0,
"CLONE_EMISSION": 1,
"SPLIT_EMISSION": 0,
'MOVE_EMISSION': 1,
"ADD_STATE": 1,
"REMOVE_STATE": 1,
"MERGE_STATES": 1,
"SPLIT_STATES": 0,
"ADD_TRANSITION": 1,
"REMOVE_TRANSITION": 1,
"ADD_SEGMENT_TO_EMISSION": 1,
"REMOVE_SEGMENT_FROM_EMISSION": 1,
"CHANGE_SEGMENT_IN_EMISSION": 1,
"ADD_EMISSION_TO_STATE": 1,
"REMOVE_EMISSION_FROM_STATE": 1,
"ADD_SEGMENT_BY_FEATURE_BUNDLE": 0,
"DATA_ENCODING_LENGTH_MULTIPLIER": 10,
"HMM_ENCODING_LENGTH_MULTIPLIER": 1,
"RULES_SET_ENCODING_LENGTH_MULTIPLIER": 1,
"ADD_RULE": 1,
"REMOVE_RULE": 1,
"DEMOTE_RULE": 1,
"CHANGE_RULE": 1,
"MUTATE_TARGET": 1,
"MUTATE_CHANGE": 1,
"MUTATE_LEFT_CONTEXT": 1,
"MUTATE_RIGHT_CONTEXT": 1,
"MUTATE_OBLIGATORY": 1,
"SWITCH_TARGET_CHANGE": 0,
"ADD_FEATURE_BUNDLE": 1,
"REMOVE_FEATURE_BUNDLE": 1,
"CHANGE_EXISTING_FEATURE_BUNDLE": 1,
"ADD_FEATURE": 1,
"REMOVE_FEATURE": 1,
"CHANGE_FEATURE_VALUE": 1,
"CHANGE_KLEENE_VALUE": 0,
"MAX_FEATURE_BUNDLE_IN_CONTEXT": 2,
"MAX_NUM_OF_INNER_STATES": 4,
"MIN_NUM_OF_INNER_STATES": 1,
"MAX_NUMBER_OF_RULES": 3,
"MIN_NUMBER_OF_RULES": 0,
"MORPHEME_BOUNDARY_FLAG": False,
"LENGTHENING_FLAG": False,
"UNDERSPECIFICATION_FLAG": False,
"WORD_BOUNDARY_FLAG": True,
"RESTRICTIONS_ON_ALPHABET": False,
# Genetic algorithm params
"CROSSOVER_RATE": 0.2,
"MUTATION_RATE": 0.8,
"CROSSOVER_COOLING_RATE": 1.0,
"MUTATION_COOLING_RATE": 1.0,
"VAR_AND": False,
"TOTAL_GENERATIONS": 30000,
"REPRODUCTION_LAMBDA": 0.8,
"SELECTION_METHOD": "rank", # ["tournament", "rank"]
"RANK_SELECTION_PRESSURE": 1.7,
"TOURNAMENT_SIZE": 2,
# Island model params
"ISLAND_POPULATION": 300,
"MIGRATION_INTERVAL": 30,
"MIGRATION_RATIO": 0.2,
"ISLAND_ELITE_RATIO": 0.1,
"MIGRATION_SCHEME": "round_robin", # ["fixed", "round_robin"]
# HMM
"HMM_CROSSOVER_METHOD": "emissions", # ['emissions', 'matrix', 'subgraph', 'connected_component']"
"LIMIT_CROSSOVER_RESULT_HMM_NUM_OF_STATES": True,
"HMM_MAX_CROSSOVERS": 1,
"RANDOM_HMM_MAX_EMISSION_LENGTH": 3,
"RANDOM_HMM_MAX_EMISSIONS_PER_STATE": 15,
"RANDOM_HMM_METHOD": 'simple', # ['simple', 'matrix']
"HMM_RANDOM_EMISSIONS_BY_DATA": True, # HMM random emissions will be substrings of data words
"DEFAULT_HMM_BY_RANDOM_PROBAB": 0.0,
"EXPLICIT_HMM_BY_RANDOM_PROBAB": 0.0,
"TRANSITION_MATRIX_TRANSITION_PROBABILITY": 0.1,
# Rule set
"RULE_SET_CROSSOVER_METHOD": "switch_pairs", # ['unilateral', 'switch_pairs', 'pivot'],
# Transducers
"MINIMIZE_TRANSDUCER": False,
"TRANSDUCER_STATES_LIMIT": 5000,
"DFAS_STATES_LIMIT": 1000,
}
log_file_template = "{}_german_final_devoicing_{}.txt"
segment_table_file_name = "german_final_devoicing.txt"
data = ['aktsende', 'aktsenden', 'aktsendesten', 'aktsendken', 'aktsent', 'aktive', 'aktiven', 'aktivesten', 'aktivken', 'aktif', 'atvokade', 'atvokaden', 'atvokadesten', 'atvokadken', 'atvokat', 'betsuk', 'betsuge', 'betsugen', 'betsugesten', 'betsugken', 'bank', 'bange', 'bangen', 'bangesten', 'bangken', 'begabe', 'begaben', 'begabesten', 'begabken', 'begap', 'deftik', 'deftige', 'deftigen', 'deftigesten', 'deftigken', 'devode', 'devoden', 'devodesten', 'devodken', 'devot', 'gants', 'gantze', 'gantzen', 'gantzesten', 'gantzken', 'hets', 'hetze', 'hetzen', 'hetzesten', 'hetzken', 'hunde', 'hunden', 'hundesten', 'hundken', 'hunt', 'kots', 'kotze', 'kotzen', 'kotzesten', 'kotzken', 'kats', 'katze', 'katzen', 'katzesten', 'katzken', 'kan', 'kane', 'kanen', 'kanesten', 'kanken', 'knosbe', 'knosben', 'knosbesten', 'knosbken', 'knosp', 'kebabe', 'kebaben', 'kebabesten', 'kebabken', 'kebap', 'tak', 'tage', 'tagen', 'tagesten', 'tagken', 'tabe', 'taben', 'tabesten', 'tabken', 'tap', 'tsank', 'tsange', 'tsangen', 'tsangesten', 'tsangken', 'umtsuk', 'umtsuge', 'umtsugen', 'umtsugesten', 'umtsugken']
# data noise
data.remove('kebap')
data.remove('hunt')
data += ['kebab', 'hund'] # 2% noise - total data length is 100
target_hmm = {'q0': ['q1'],
'q1': (['q2'], ['kebab', 'hund', 'tab', 'tag', 'betsug',
'kan', 'tsang', 'aktsend', 'katz', 'umtsug',
'hetz', 'kotz', 'gantz', 'bang', 'knosb',
'begab', 'atvokad', 'aktiv',
'devod', 'deftig',
]),
'q2': (['qf'], [EPSILON, 'e', 'ken', 'en', 'esten']),
}
devoicing_rule = [[{"nasal": "-", "cons": "+"}], [{"voice": "-"}], [], [{"WB": True}], True]
rule_set = [devoicing_rule]
target_tuple = (target_hmm, rule_set) |
996,395 | b1edd144800da58447071b43433c80ada6003384 | # Code to transform simulation results into a format that can be read by gnuplot
# As well as to do calculations for average f-score
# Run for instance as:
# python read_simulation_results.py --project=data.example_project --category=B-speculation --xspace=0.05 --xaxisstep=200 --maxy=0.8
import argparse
import os
import glob
import math
from matplotlib.pyplot import plot, show, bar, grid, axis, savefig, clf
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
import active_learning_preannotation
OUTPUT_DIR = "evaluation_simulate_active_learning"
ACTIVE = "active"
RANDOM = "random"
def extract_data(result_dict, files):
for f in files:
number = int(f.split("conll_res")[0].split("_")[-1])
if number not in result_dict:
result_dict[number] = []
open_f = open(f)
for line in open_f:
if "f1_score" in line:
f_score = line.strip().split("\t")[1]
result_dict[number].append(float(f_score))
open_f.close()
def write_dict(name, result_dict, output_file, color, marker, markersize, x_value_extra):
x_values = []
y_values = []
error_max = []
error_min = []
output_file.write(name + "\n")
output_file.write("# sample size\tf_score\n")
for key in sorted(result_dict.keys()):
sorted_res = sorted(result_dict[key][:])
min = sorted_res[0]
max = sorted_res[-1]
mean = sum(result_dict[key])/len(result_dict[key])
output_str = str(key) + "\t" + str(mean) + "\t" + str(min) + "\t" + str(max) + "\n"
output_file.write(output_str)
x_values.append(key + x_value_extra)
y_values.append(mean)
error_max.append(max - mean)
error_min.append(mean - min)
#print(x_values)
#print(y_values)
#print("min", error_min)
#print("max", error_max)
plt.errorbar(x_values, y_values, yerr=[error_min, error_max], color=color, marker=marker, linewidth=1, markersize=markersize)
plot_handle, = plt.plot(x_values, y_values, color=color, marker=marker, linewidth=1, markersize=markersize)
output_file.write("\n\n")
return plot_handle
def read_results(result_path, category, extra_xspace, category_index, sub_plot, x_axis_step, max_y):
random_word2vecfalse = {}
random_word2vectrue = {}
active_word2vecfalse = {}
active_word2vectrue = {}
handles_labels = []
for subdir in [el for el in os.listdir(result_path) if not el.startswith(".")]:
full_subdir = os.path.join(result_path, subdir)
random_dir = os.path.join(full_subdir, RANDOM)
active_dir = os.path.join(full_subdir, ACTIVE)
extract_data(random_word2vecfalse, glob.glob(os.path.join(random_dir, "*False*conll_res.txt")))
extract_data(random_word2vectrue, glob.glob(os.path.join(random_dir, "*True*conll_res.txt")))
extract_data(active_word2vecfalse, glob.glob(os.path.join(active_dir, "*False*conll_res.txt")))
extract_data(active_word2vectrue, glob.glob(os.path.join(active_dir, "*True*conll_res.txt")))
title = category.replace("B-", "")
print(title)
# If the three standard ner categories are used
if title == "org":
title = "Organisation"
if title == "per":
title = "Person"
if title == "loc":
title = "Location"
title = title[0].upper() + title[1:]
plt.title(title)
if category_index % 2 != 0: # Only write label at every other subplot
plt.xlabel('Training data size')
if category_index == 0: # Only need to write this once
plt.ylabel('F-score')
output_file = open(os.path.join(result_path, "conll_media_fscore.dat"), "w")
handles_labels.append((write_dict("#random_word2vecfalse", random_word2vecfalse, output_file, "red", 's', 4, 0),\
"Random"))
handles_labels.append((write_dict("#active_word2vecfalse", active_word2vecfalse, output_file, "green", 'd', 4, 1*extra_xspace), \
"Active"))
handles_labels.append((write_dict("#random_word2vectrue", random_word2vectrue, output_file, "blue", '*', 5, 2*extra_xspace), \
"Random, \nWord2Vec"))
handles_labels.append((write_dict("#active_word2vectrue", active_word2vectrue, output_file, "black", 'o', 4, 3*extra_xspace), \
"Active, \nWord2Vec"))
min_x = sorted(list(random_word2vecfalse.keys()) + list(active_word2vecfalse.keys()) +\
list(random_word2vectrue.keys()) + list(active_word2vectrue.keys()))[0]
max_x = sorted(list(random_word2vecfalse.keys()) + list(active_word2vecfalse.keys()) +\
list(random_word2vectrue.keys()) + list(active_word2vectrue.keys()))[-1]
print(max_y, "max_y")
plt.xlim(200, 1000)
plt.ylim(0, max_y)
plt.xticks(np.arange(min_x, max_x, step=int(x_axis_step)))
# Only need to show axes once
if category_index != 0:
plt.gca().axes.get_yaxis().set_visible(False)
output_file.close()
return handles_labels
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--category', action='store', dest='category', help='The category that was evaluated, with its B-prefix, e.g., B-speculation')
parser.add_argument('--xspace', action='store', dest='xspace', help='An extra space on the x-axes to improve visability of the results')
parser.add_argument('--xaxisstep', action='store', dest='xaxisstep', help='The step with which the vales on the x-axis are to be printed')
parser.add_argument('--maxy', action='store', dest='maxy', help='Max y-value')
properties, path_slash_format, path_dot_format = active_learning_preannotation.load_properties(parser)
args = parser.parse_args()
categories = []
# If no categories are given on the comand line, plot results for all categories in the current project (the one that is given with the option --project
# and that starts with a B
if not args.category:
default_path = os.path.join(path_slash_format, OUTPUT_DIR)
print("No category given, will use all in the folder '" + OUTPUT_DIR + "' that starts with 'B'")
for dir in os.listdir(default_path):
if os.path.isdir(os.path.join(path_slash_format, OUTPUT_DIR, dir)):
if not dir.startswith(".") and dir.startswith("B"):
categories.append(dir)
else:
categories.append(args.category)
if not args.xspace:
xspace = 0
else:
xspace = float(args.xspace)
if not args.xaxisstep:
x_axis_step = 20
else:
x_axis_step = int(args.xaxisstep)
if not args.maxy:
max_y = 1.0
else:
max_y = float(args.maxy)
categories.sort()
fig = plt.figure()
for index, category in enumerate(categories):
sub_plot = fig.add_subplot(1, len(categories), index+1)
print("Plots results for ", category)
result_path = os.path.join(path_slash_format, OUTPUT_DIR, category)
print("Reads results from ", result_path)
handles_labels = read_results(result_path, category, xspace, index, sub_plot, x_axis_step, max_y)
fig.legend(handles = [handle for (handle, label) in handles_labels][::-1],\
labels = [label for (handle, label) in handles_labels][::-1])
plt.subplots_adjust(right = 0.6, wspace = 0.05)
#plt.show()
figure_output_path = os.path.join(path_slash_format, OUTPUT_DIR, "_".join(categories) + ".pdf")
print("Will save pdf of figure at '" + figure_output_path + "'.")
fig.savefig(figure_output_path)
|
996,396 | fb9e734f06818bb69b4c00ef35f49d0d0779f794 | from django.views.generic import ListView, DetailView
from django.shortcuts import render, get_object_or_404
from django.views.generic.edit import CreateView, UpdateView
from django.urls import reverse, reverse_lazy
from .models import Product
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
class ProductListView(ListView):
@method_decorator(user_passes_test(lambda u: u.is_authenticated)) # and u.is_staff==False))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
queryset = Product.objects.all()
template_name = "products/list.html"
class AddProduct(CreateView):
@method_decorator(user_passes_test(lambda u: u.is_authenticated)) # and u.is_staff==False))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
model = Product
template_name = 'products/product_form.html'
fields = ["product","type"]
success_url = reverse_lazy('products:list')
class UpdateProduct(UpdateView):
@method_decorator(user_passes_test(lambda u: u.is_authenticated)) # and u.is_staff==False))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
model = Product
template_name = 'products/product_form.html'
fields = ["product","type"]
success_url = reverse_lazy('products:list')
|
996,397 | ad7d8a9fe5a76e161917aa439b0a11a4e3252497 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from google import auth
from google.api_core import client_options
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.osconfig_v1.services.os_config_service import (
OsConfigServiceAsyncClient,
)
from google.cloud.osconfig_v1.services.os_config_service import OsConfigServiceClient
from google.cloud.osconfig_v1.services.os_config_service import pagers
from google.cloud.osconfig_v1.services.os_config_service import transports
from google.cloud.osconfig_v1.types import patch_deployments
from google.cloud.osconfig_v1.types import patch_jobs
from google.cloud.osconfig_v1.types import patch_jobs as gco_patch_jobs
from google.oauth2 import service_account
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from google.type import datetime_pb2 as datetime # type: ignore
from google.type import dayofweek_pb2 as dayofweek # type: ignore
from google.type import timeofday_pb2 as timeofday # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert OsConfigServiceClient._get_default_mtls_endpoint(None) is None
assert (
OsConfigServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
OsConfigServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
OsConfigServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
OsConfigServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
OsConfigServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [OsConfigServiceClient, OsConfigServiceAsyncClient]
)
def test_os_config_service_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client._transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client._transport._credentials == creds
assert client._transport._host == "osconfig.googleapis.com:443"
def test_os_config_service_client_get_transport_class():
transport = OsConfigServiceClient.get_transport_class()
assert transport == transports.OsConfigServiceGrpcTransport
transport = OsConfigServiceClient.get_transport_class("grpc")
assert transport == transports.OsConfigServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(OsConfigServiceClient, transports.OsConfigServiceGrpcTransport, "grpc"),
(
OsConfigServiceAsyncClient,
transports.OsConfigServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_os_config_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(OsConfigServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(OsConfigServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
credentials=None,
host="squid.clam.whelk",
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "never".
os.environ["GOOGLE_API_USE_MTLS"] = "never"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "always".
os.environ["GOOGLE_API_USE_MTLS"] = "always"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and client_cert_source is provided.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=client_cert_source_callback,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and default_client_cert_source is provided.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", but client_cert_source and default_client_cert_source are None.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has
# unsupported value.
os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported"
with pytest.raises(MutualTLSChannelError):
client = client_class()
del os.environ["GOOGLE_API_USE_MTLS"]
def test_os_config_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.osconfig_v1.services.os_config_service.transports.OsConfigServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = OsConfigServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
credentials=None,
host="squid.clam.whelk",
)
def test_execute_patch_job(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.ExecutePatchJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.execute_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob(
name="name_value",
display_name="display_name_value",
description="description_value",
state=patch_jobs.PatchJob.State.STARTED,
dry_run=True,
error_message="error_message_value",
percent_complete=0.1705,
patch_deployment="patch_deployment_value",
)
response = client.execute_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
assert response.error_message == "error_message_value"
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
assert response.patch_deployment == "patch_deployment_value"
@pytest.mark.asyncio
async def test_execute_patch_job_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.ExecutePatchJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.execute_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.PatchJob(
name="name_value",
display_name="display_name_value",
description="description_value",
state=patch_jobs.PatchJob.State.STARTED,
dry_run=True,
error_message="error_message_value",
percent_complete=0.1705,
patch_deployment="patch_deployment_value",
)
)
response = await client.execute_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
assert response.error_message == "error_message_value"
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
assert response.patch_deployment == "patch_deployment_value"
def test_execute_patch_job_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.ExecutePatchJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.execute_patch_job), "__call__"
) as call:
call.return_value = patch_jobs.PatchJob()
client.execute_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_execute_patch_job_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.ExecutePatchJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.execute_patch_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
await client.execute_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_get_patch_job(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.GetPatchJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob(
name="name_value",
display_name="display_name_value",
description="description_value",
state=patch_jobs.PatchJob.State.STARTED,
dry_run=True,
error_message="error_message_value",
percent_complete=0.1705,
patch_deployment="patch_deployment_value",
)
response = client.get_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
assert response.error_message == "error_message_value"
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
assert response.patch_deployment == "patch_deployment_value"
@pytest.mark.asyncio
async def test_get_patch_job_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.GetPatchJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.PatchJob(
name="name_value",
display_name="display_name_value",
description="description_value",
state=patch_jobs.PatchJob.State.STARTED,
dry_run=True,
error_message="error_message_value",
percent_complete=0.1705,
patch_deployment="patch_deployment_value",
)
)
response = await client.get_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
assert response.error_message == "error_message_value"
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
assert response.patch_deployment == "patch_deployment_value"
def test_get_patch_job_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.GetPatchJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_patch_job), "__call__") as call:
call.return_value = patch_jobs.PatchJob()
client.get_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_get_patch_job_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.GetPatchJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_patch_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
await client.get_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
def test_get_patch_job_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_patch_job(name="name_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_patch_job_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_patch_job(patch_jobs.GetPatchJobRequest(), name="name_value")
@pytest.mark.asyncio
async def test_get_patch_job_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_patch_job(name="name_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_patch_job_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_patch_job(patch_jobs.GetPatchJobRequest(), name="name_value")
def test_cancel_patch_job(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.CancelPatchJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.cancel_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob(
name="name_value",
display_name="display_name_value",
description="description_value",
state=patch_jobs.PatchJob.State.STARTED,
dry_run=True,
error_message="error_message_value",
percent_complete=0.1705,
patch_deployment="patch_deployment_value",
)
response = client.cancel_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
assert response.error_message == "error_message_value"
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
assert response.patch_deployment == "patch_deployment_value"
@pytest.mark.asyncio
async def test_cancel_patch_job_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.CancelPatchJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.cancel_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.PatchJob(
name="name_value",
display_name="display_name_value",
description="description_value",
state=patch_jobs.PatchJob.State.STARTED,
dry_run=True,
error_message="error_message_value",
percent_complete=0.1705,
patch_deployment="patch_deployment_value",
)
)
response = await client.cancel_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
assert response.error_message == "error_message_value"
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
assert response.patch_deployment == "patch_deployment_value"
def test_cancel_patch_job_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.CancelPatchJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.cancel_patch_job), "__call__"
) as call:
call.return_value = patch_jobs.PatchJob()
client.cancel_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_patch_job_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.CancelPatchJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.cancel_patch_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
await client.cancel_patch_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
def test_list_patch_jobs(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.ListPatchJobsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobsResponse(
next_page_token="next_page_token_value"
)
response = client.list_patch_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchJobsPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_patch_jobs_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.ListPatchJobsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobsResponse(next_page_token="next_page_token_value")
)
response = await client.list_patch_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
def test_list_patch_jobs_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.ListPatchJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
call.return_value = patch_jobs.ListPatchJobsResponse()
client.list_patch_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_list_patch_jobs_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.ListPatchJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobsResponse()
)
await client.list_patch_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_list_patch_jobs_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_patch_jobs(parent="parent_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_patch_jobs_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_patch_jobs(patch_jobs.ListPatchJobsRequest(), parent="parent_value")
@pytest.mark.asyncio
async def test_list_patch_jobs_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_patch_jobs(parent="parent_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_patch_jobs_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_patch_jobs(
patch_jobs.ListPatchJobsRequest(), parent="parent_value"
)
def test_list_patch_jobs_pager():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobsResponse(
patch_jobs=[
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
),
RuntimeError,
)
results = [i for i in client.list_patch_jobs(request={})]
assert len(results) == 6
assert all(isinstance(i, patch_jobs.PatchJob) for i in results)
def test_list_patch_jobs_pages():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobsResponse(
patch_jobs=[
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
),
RuntimeError,
)
pages = list(client.list_patch_jobs(request={}).pages)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_patch_jobs_async_pager():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobsResponse(
patch_jobs=[
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
),
RuntimeError,
)
async_pager = await client.list_patch_jobs(request={})
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, patch_jobs.PatchJob) for i in responses)
@pytest.mark.asyncio
async def test_list_patch_jobs_async_pages():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobsResponse(
patch_jobs=[
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
patch_jobs.PatchJob(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
),
patch_jobs.ListPatchJobsResponse(
patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
),
RuntimeError,
)
pages = []
async for page in (await client.list_patch_jobs(request={})).pages:
pages.append(page)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
def test_list_patch_job_instance_details(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.ListPatchJobInstanceDetailsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse(
next_page_token="next_page_token_value"
)
response = client.list_patch_job_instance_details(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchJobInstanceDetailsPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_jobs.ListPatchJobInstanceDetailsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobInstanceDetailsResponse(
next_page_token="next_page_token_value"
)
)
response = await client.list_patch_job_instance_details(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchJobInstanceDetailsAsyncPager)
assert response.next_page_token == "next_page_token_value"
def test_list_patch_job_instance_details_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.ListPatchJobInstanceDetailsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_job_instance_details), "__call__"
) as call:
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse()
client.list_patch_job_instance_details(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_jobs.ListPatchJobInstanceDetailsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_job_instance_details), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobInstanceDetailsResponse()
)
await client.list_patch_job_instance_details(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_list_patch_job_instance_details_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_patch_job_instance_details(parent="parent_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_patch_job_instance_details_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_patch_job_instance_details(
patch_jobs.ListPatchJobInstanceDetailsRequest(), parent="parent_value"
)
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobInstanceDetailsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_patch_job_instance_details(parent="parent_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_patch_job_instance_details(
patch_jobs.ListPatchJobInstanceDetailsRequest(), parent="parent_value"
)
def test_list_patch_job_instance_details_pager():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_job_instance_details), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[], next_page_token="def"
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
]
),
RuntimeError,
)
results = [i for i in client.list_patch_job_instance_details(request={})]
assert len(results) == 6
assert all(isinstance(i, patch_jobs.PatchJobInstanceDetails) for i in results)
def test_list_patch_job_instance_details_pages():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_job_instance_details), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[], next_page_token="def"
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
]
),
RuntimeError,
)
pages = list(client.list_patch_job_instance_details(request={}).pages)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_async_pager():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_job_instance_details),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[], next_page_token="def"
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
]
),
RuntimeError,
)
async_pager = await client.list_patch_job_instance_details(request={})
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, patch_jobs.PatchJobInstanceDetails) for i in responses)
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_async_pages():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_job_instance_details),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
],
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[], next_page_token="def"
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
]
),
RuntimeError,
)
pages = []
async for page in (
await client.list_patch_job_instance_details(request={})
).pages:
pages.append(page)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
def test_create_patch_deployment(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.CreatePatchDeploymentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment(
name="name_value", description="description_value"
)
response = client.create_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_deployments.PatchDeployment)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_patch_deployment_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.CreatePatchDeploymentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment(
name="name_value", description="description_value"
)
)
response = await client.create_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_deployments.PatchDeployment)
assert response.name == "name_value"
assert response.description == "description_value"
def test_create_patch_deployment_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.CreatePatchDeploymentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.create_patch_deployment), "__call__"
) as call:
call.return_value = patch_deployments.PatchDeployment()
client.create_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_create_patch_deployment_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.CreatePatchDeploymentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.create_patch_deployment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment()
)
await client.create_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_create_patch_deployment_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_patch_deployment(
parent="parent_value",
patch_deployment=patch_deployments.PatchDeployment(name="name_value"),
patch_deployment_id="patch_deployment_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].patch_deployment == patch_deployments.PatchDeployment(
name="name_value"
)
assert args[0].patch_deployment_id == "patch_deployment_id_value"
def test_create_patch_deployment_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_patch_deployment(
patch_deployments.CreatePatchDeploymentRequest(),
parent="parent_value",
patch_deployment=patch_deployments.PatchDeployment(name="name_value"),
patch_deployment_id="patch_deployment_id_value",
)
@pytest.mark.asyncio
async def test_create_patch_deployment_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_patch_deployment(
parent="parent_value",
patch_deployment=patch_deployments.PatchDeployment(name="name_value"),
patch_deployment_id="patch_deployment_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].patch_deployment == patch_deployments.PatchDeployment(
name="name_value"
)
assert args[0].patch_deployment_id == "patch_deployment_id_value"
@pytest.mark.asyncio
async def test_create_patch_deployment_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_patch_deployment(
patch_deployments.CreatePatchDeploymentRequest(),
parent="parent_value",
patch_deployment=patch_deployments.PatchDeployment(name="name_value"),
patch_deployment_id="patch_deployment_id_value",
)
def test_get_patch_deployment(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.GetPatchDeploymentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment(
name="name_value", description="description_value"
)
response = client.get_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_deployments.PatchDeployment)
assert response.name == "name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_patch_deployment_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.GetPatchDeploymentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment(
name="name_value", description="description_value"
)
)
response = await client.get_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, patch_deployments.PatchDeployment)
assert response.name == "name_value"
assert response.description == "description_value"
def test_get_patch_deployment_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.GetPatchDeploymentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.get_patch_deployment), "__call__"
) as call:
call.return_value = patch_deployments.PatchDeployment()
client.get_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_get_patch_deployment_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.GetPatchDeploymentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_patch_deployment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment()
)
await client.get_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
def test_get_patch_deployment_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_patch_deployment(name="name_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_patch_deployment_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_patch_deployment(
patch_deployments.GetPatchDeploymentRequest(), name="name_value"
)
@pytest.mark.asyncio
async def test_get_patch_deployment_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_patch_deployment(name="name_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_patch_deployment_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_patch_deployment(
patch_deployments.GetPatchDeploymentRequest(), name="name_value"
)
def test_list_patch_deployments(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.ListPatchDeploymentsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.ListPatchDeploymentsResponse(
next_page_token="next_page_token_value"
)
response = client.list_patch_deployments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchDeploymentsPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_patch_deployments_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.ListPatchDeploymentsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.ListPatchDeploymentsResponse(
next_page_token="next_page_token_value"
)
)
response = await client.list_patch_deployments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchDeploymentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
def test_list_patch_deployments_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.ListPatchDeploymentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_deployments), "__call__"
) as call:
call.return_value = patch_deployments.ListPatchDeploymentsResponse()
client.list_patch_deployments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_list_patch_deployments_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.ListPatchDeploymentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_deployments), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.ListPatchDeploymentsResponse()
)
await client.list_patch_deployments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_list_patch_deployments_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.ListPatchDeploymentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_patch_deployments(parent="parent_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_patch_deployments_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_patch_deployments(
patch_deployments.ListPatchDeploymentsRequest(), parent="parent_value"
)
@pytest.mark.asyncio
async def test_list_patch_deployments_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.ListPatchDeploymentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.ListPatchDeploymentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_patch_deployments(parent="parent_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_patch_deployments_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_patch_deployments(
patch_deployments.ListPatchDeploymentsRequest(), parent="parent_value"
)
def test_list_patch_deployments_pager():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_deployments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
],
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[], next_page_token="def"
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[patch_deployments.PatchDeployment()],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
]
),
RuntimeError,
)
results = [i for i in client.list_patch_deployments(request={})]
assert len(results) == 6
assert all(isinstance(i, patch_deployments.PatchDeployment) for i in results)
def test_list_patch_deployments_pages():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_patch_deployments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
],
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[], next_page_token="def"
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[patch_deployments.PatchDeployment()],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
]
),
RuntimeError,
)
pages = list(client.list_patch_deployments(request={}).pages)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_patch_deployments_async_pager():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_deployments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
],
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[], next_page_token="def"
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[patch_deployments.PatchDeployment()],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
]
),
RuntimeError,
)
async_pager = await client.list_patch_deployments(request={})
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, patch_deployments.PatchDeployment) for i in responses)
@pytest.mark.asyncio
async def test_list_patch_deployments_async_pages():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_patch_deployments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
],
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[], next_page_token="def"
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[patch_deployments.PatchDeployment()],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
]
),
RuntimeError,
)
pages = []
async for page in (await client.list_patch_deployments(request={})).pages:
pages.append(page)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
def test_delete_patch_deployment(transport: str = "grpc"):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.DeletePatchDeploymentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_patch_deployment_async(transport: str = "grpc_asyncio"):
client = OsConfigServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = patch_deployments.DeletePatchDeploymentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
def test_delete_patch_deployment_field_headers():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.DeletePatchDeploymentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.delete_patch_deployment), "__call__"
) as call:
call.return_value = None
client.delete_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_patch_deployment_field_headers_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = patch_deployments.DeletePatchDeploymentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_patch_deployment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_patch_deployment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
def test_delete_patch_deployment_flattened():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_patch_deployment(name="name_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_patch_deployment_flattened_error():
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_patch_deployment(
patch_deployments.DeletePatchDeploymentRequest(), name="name_value"
)
@pytest.mark.asyncio
async def test_delete_patch_deployment_flattened_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_patch_deployment(name="name_value")
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_patch_deployment_flattened_error_async():
client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_patch_deployment(
patch_deployments.DeletePatchDeploymentRequest(), name="name_value"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.OsConfigServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
with pytest.raises(ValueError):
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.OsConfigServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
client = OsConfigServiceClient(transport=transport)
assert client._transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.OsConfigServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
channel = transport.grpc_channel
assert channel
transport = transports.OsConfigServiceGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials()
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
assert isinstance(client._transport, transports.OsConfigServiceGrpcTransport)
def test_os_config_service_base_transport():
# Instantiate the base transport.
transport = transports.OsConfigServiceTransport(
credentials=credentials.AnonymousCredentials()
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"execute_patch_job",
"get_patch_job",
"cancel_patch_job",
"list_patch_jobs",
"list_patch_job_instance_details",
"create_patch_deployment",
"get_patch_deployment",
"list_patch_deployments",
"delete_patch_deployment",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_os_config_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
OsConfigServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",)
)
def test_os_config_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.OsConfigServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",)
)
def test_os_config_service_host_no_port():
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="osconfig.googleapis.com"
),
)
assert client._transport._host == "osconfig.googleapis.com:443"
def test_os_config_service_host_with_port():
client = OsConfigServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="osconfig.googleapis.com:8000"
),
)
assert client._transport._host == "osconfig.googleapis.com:8000"
def test_os_config_service_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.OsConfigServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
def test_os_config_service_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.OsConfigServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_os_config_service_grpc_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.OsConfigServiceGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_os_config_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.OsConfigServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_os_config_service_grpc_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.OsConfigServiceGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_os_config_service_grpc_asyncio_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.OsConfigServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
def test_patch_deployment_path():
project = "squid"
patch_deployment = "clam"
expected = "projects/{project}/patchDeployments/{patch_deployment}".format(
project=project, patch_deployment=patch_deployment
)
actual = OsConfigServiceClient.patch_deployment_path(project, patch_deployment)
assert expected == actual
def test_parse_patch_deployment_path():
expected = {"project": "whelk", "patch_deployment": "octopus"}
path = OsConfigServiceClient.patch_deployment_path(**expected)
# Check that the path construction is reversible.
actual = OsConfigServiceClient.parse_patch_deployment_path(path)
assert expected == actual
|
996,398 | 0c42bc1778e541e18bbce319ebc90dbdcf7e2004 |
from .._core import Automaton, NoTransition
from unittest import TestCase
class CoreTests(TestCase):
"""
Tests for Automat's (currently private, implementation detail) core.
"""
def test_NoTransition(self):
"""
A L{NoTransition} exception describes the state and input symbol
that caused it.
"""
# NoTransition requires two arguments
with self.assertRaises(TypeError):
NoTransition()
state = "current-state"
symbol = "transitionless-symbol"
noTransitionException = NoTransition(state=state, symbol=symbol)
self.assertIs(noTransitionException.symbol, symbol)
self.assertIn(state, str(noTransitionException))
self.assertIn(symbol, str(noTransitionException))
def test_noOutputForInput(self):
"""
L{Automaton.outputForInput} raises L{NoTransition} if no
transition for that input is defined.
"""
a = Automaton()
self.assertRaises(NoTransition, a.outputForInput,
"no-state", "no-symbol")
def test_oneTransition(self):
"""
L{Automaton.addTransition} adds its input symbol to
L{Automaton.inputAlphabet}, all its outputs to
L{Automaton.outputAlphabet}, and causes L{Automaton.outputForInput} to
start returning the new state and output symbols.
"""
a = Automaton()
a.addTransition("beginning", "begin", "ending", ["end"])
self.assertEqual(a.inputAlphabet(), {"begin"})
self.assertEqual(a.outputAlphabet(), {"end"})
self.assertEqual(a.outputForInput("beginning", "begin"),
("ending", ["end"]))
self.assertEqual(a.states(), {"beginning", "ending"})
def test_oneTransition_nonIterableOutputs(self):
"""
L{Automaton.addTransition} raises a TypeError when given outputs
that aren't iterable and doesn't add any transitions.
"""
a = Automaton()
nonIterableOutputs = 1
self.assertRaises(
TypeError,
a.addTransition,
"fromState", "viaSymbol", "toState", nonIterableOutputs)
self.assertFalse(a.inputAlphabet())
self.assertFalse(a.outputAlphabet())
self.assertFalse(a.states())
self.assertFalse(a.allTransitions())
def test_initialState(self):
"""
L{Automaton.initialState} is a descriptor that sets the initial
state if it's not yet set, and raises L{ValueError} if it is.
"""
a = Automaton()
a.initialState = "a state"
self.assertEqual(a.initialState, "a state")
with self.assertRaises(ValueError):
a.initialState = "another state"
# FIXME: addTransition for transition that's been added before
|
996,399 | 93b0b2713f04b1d4904960ef7b372208a593609f | import pygame
import sys
# 初始化Pygame
pygame.init()
size = width, height = 600, 400
# 创建指定大小的窗口 Surface
screen = pygame.display.set_mode(size)
# 设置窗口标题
pygame.display.set_caption("冯小雨")
bg = (0,0,0)
font = pygame.font.Font(None,20)#设置字体
line_height = font.get_linesize() #设置高度
position = 0#让 位置为0
screen.fill(bg)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.blit(font.render(str(event),True,(0,255,0)),(0,position))
position += line_height
if position >height:
position = 0
screen.fill(bg)
pygame.display.flip()#刷新页面
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.